diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/discard_block_engine.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/discard_block_engine.inl new file mode 100644 index 0000000000000000000000000000000000000000..488aeb91a31f9cc5d4e46b56c1a328eca1edb00f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/discard_block_engine.inl @@ -0,0 +1,184 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE discard_block_engine::discard_block_engine() + : m_e() + , m_n(0) +{} + +template +_CCCL_HOST_DEVICE discard_block_engine::discard_block_engine(result_type s) + : m_e(s) + , m_n(0) +{} + +template +_CCCL_HOST_DEVICE discard_block_engine::discard_block_engine(const base_type& urng) + : m_e(urng) + , m_n(0) +{} + +template +_CCCL_HOST_DEVICE void discard_block_engine::seed() +{ + m_e.seed(); + m_n = 0; +} + +template +_CCCL_HOST_DEVICE void discard_block_engine::seed(result_type s) +{ + m_e.seed(s); + m_n = 0; +} + +template +_CCCL_HOST_DEVICE typename discard_block_engine::result_type +discard_block_engine::operator()(void) +{ + if (m_n >= used_block) + { + m_e.discard(block_size - m_n); + // for(; m_n < block_size; ++m_n) + // m_e(); + m_n = 0; + } + + ++m_n; + + return m_e(); +} + +template +_CCCL_HOST_DEVICE void discard_block_engine::discard(unsigned long long z) +{ + // XXX this should be accelerated + for (; z > 0; --z) + { + this->operator()(); + } // end for +} + +template +_CCCL_HOST_DEVICE const typename discard_block_engine::base_type& +discard_block_engine::base() const +{ + return m_e; +} + +template +template +std::basic_ostream& +discard_block_engine::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags & fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + // output the base engine followed by n + os << m_e << space << m_n; + + // restore flags & fill character + os.flags(flags); + os.fill(fill); + + return os; +} + +template +template +std::basic_istream& discard_block_engine::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + // input the base engine and then n + is >> m_e >> m_n; + + // restore old flags + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool discard_block_engine::equal(const discard_block_engine& rhs) const +{ + return (m_e == rhs.m_e) && (m_n == rhs.m_n); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const discard_block_engine& e) +{ + return thrust::random::detail::random_core_access::stream_out(os, e); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, discard_block_engine& e) +{ + return thrust::random::detail::random_core_access::stream_in(is, e); +} + +template +_CCCL_HOST_DEVICE bool +operator==(const discard_block_engine& lhs, const discard_block_engine& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool +operator!=(const discard_block_engine& lhs, const discard_block_engine& rhs) +{ + return !(lhs == rhs); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine.inl new file mode 100644 index 0000000000000000000000000000000000000000..dae38e9bd1389cf30b131cd149a54ac6d08855b6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine.inl @@ -0,0 +1,155 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE linear_congruential_engine::linear_congruential_engine(result_type s) +{ + seed(s); +} // end linear_congruential_engine::linear_congruential_engine() + +template +_CCCL_HOST_DEVICE void linear_congruential_engine::seed(result_type s) +{ + if ((detail::mod(c) == 0) && (detail::mod(s) == 0)) + { + m_x = detail::mod(1); + } + else + { + m_x = detail::mod(s); + } +} // end linear_congruential_engine::seed() + +template +_CCCL_HOST_DEVICE typename linear_congruential_engine::result_type +linear_congruential_engine::operator()(void) +{ + m_x = detail::mod(m_x); + return m_x; +} // end linear_congruential_engine::operator()() + +template +_CCCL_HOST_DEVICE void linear_congruential_engine::discard(unsigned long long z) +{ + thrust::random::detail::linear_congruential_engine_discard::discard(*this, z); +} // end linear_congruential_engine::discard() + +template +template +std::basic_ostream& +linear_congruential_engine::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags & fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(os.widen(' ')); + + // output one word of state + os << m_x; + + // restore flags & fill character + os.flags(flags); + os.fill(fill); + + return os; +} + +template +template +std::basic_istream& +linear_congruential_engine::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::dec); + + // input one word of state + is >> m_x; + + // restore flags + is.flags(flags); + + return is; +} + +template +_CCCL_HOST_DEVICE bool +linear_congruential_engine::equal(const linear_congruential_engine& rhs) const +{ + return m_x == rhs.m_x; +} + +template +_CCCL_HOST_DEVICE bool operator==(const linear_congruential_engine& lhs, + const linear_congruential_engine& rhs) +{ + return detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool operator!=(const linear_congruential_engine& lhs, + const linear_congruential_engine& rhs) +{ + return !(lhs == rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const linear_congruential_engine& e) +{ + return detail::random_core_access::stream_out(os, e); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, linear_congruential_engine& e) +{ + return detail::random_core_access::stream_in(is, e); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine_discard.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine_discard.h new file mode 100644 index 0000000000000000000000000000000000000000..9dbe1d03e832057c6d02cb032e3241716aebf5b3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_congruential_engine_discard.h @@ -0,0 +1,109 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +namespace detail +{ + +template +struct linear_congruential_engine_discard_implementation +{ + _CCCL_HOST_DEVICE static void discard(UIntType& state, unsigned long long z) + { + for (; z > 0; --z) + { + state = detail::mod(state); + } + } +}; // end linear_congruential_engine_discard + +// specialize for small integers and c == 0 +// XXX figure out a robust implementation of this for any unsigned integer type later +template +struct linear_congruential_engine_discard_implementation +{ + _CCCL_HOST_DEVICE static void discard(std::uint32_t& state, unsigned long long z) + { + const std::uint32_t modulus = m; + + // XXX we need to use unsigned long long here or we will encounter overflow in the + // multiplies below + // figure out a robust implementation of this later + unsigned long long multiplier = a; + unsigned long long multiplier_to_z = 1; + + // see http://en.wikipedia.org/wiki/Modular_exponentiation + while (z > 0) + { + if (z & 1) + { + // multiply in this bit's contribution while using modulus to keep result small + multiplier_to_z = (multiplier_to_z * multiplier) % modulus; + } + + // move to the next bit of the exponent, square (and mod) the base accordingly + z >>= 1; + multiplier = (multiplier * multiplier) % modulus; + } + + state = static_cast((multiplier_to_z * state) % modulus); + } +}; // end linear_congruential_engine_discard + +struct linear_congruential_engine_discard +{ + template + _CCCL_HOST_DEVICE static void discard(LinearCongruentialEngine& lcg, unsigned long long z) + { + using result_type = typename LinearCongruentialEngine::result_type; + const result_type c = LinearCongruentialEngine::increment; + const result_type a = LinearCongruentialEngine::multiplier; + const result_type m = LinearCongruentialEngine::modulus; + + // XXX WAR unused variable warnings + (void) c; + (void) a; + (void) m; + + linear_congruential_engine_discard_implementation::discard(lcg.m_x, z); + } +}; // end linear_congruential_engine_discard + +} // namespace detail + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine.inl new file mode 100644 index 0000000000000000000000000000000000000000..c73c338ad524f06b582fc2ae8bdb8660e37163ac --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine.inl @@ -0,0 +1,151 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE linear_feedback_shift_engine::linear_feedback_shift_engine(result_type value) +{ + seed(value); +} // end linear_feedback_shift_engine::linear_feedback_shift_engine() + +template +_CCCL_HOST_DEVICE void linear_feedback_shift_engine::seed(result_type value) +{ + m_value = value; +} // end linear_feedback_shift_engine::seed() + +template +_CCCL_HOST_DEVICE typename linear_feedback_shift_engine::result_type +linear_feedback_shift_engine::operator()(void) +{ + const UIntType b = (((m_value << q) ^ m_value) & wordmask) >> (k - s); + const UIntType mask = ((~static_cast(0)) << (w - k)) & wordmask; + m_value = ((m_value & mask) << s) ^ b; + return m_value; +} // end linear_feedback_shift_engine::operator()() + +template +_CCCL_HOST_DEVICE void linear_feedback_shift_engine::discard(unsigned long long z) +{ + for (; z > 0; --z) + { + this->operator()(); + } // end for +} // end linear_feedback_shift_engine::discard() + +template +template +std::basic_ostream& +linear_feedback_shift_engine::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags & fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(os.widen(' ')); + + // output one word of state + os << m_value; + + // restore flags & fill character + os.flags(flags); + os.fill(fill); + + return os; +} + +template +template +std::basic_istream& +linear_feedback_shift_engine::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + // input one word of state + is >> m_value; + + // restore flags + is.flags(flags); + + return is; +} + +template +_CCCL_HOST_DEVICE bool linear_feedback_shift_engine::equal( + const linear_feedback_shift_engine& rhs) const +{ + return m_value == rhs.m_value; +} + +template +_CCCL_HOST_DEVICE bool operator==(const linear_feedback_shift_engine& lhs, + const linear_feedback_shift_engine& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool operator!=(const linear_feedback_shift_engine& lhs, + const linear_feedback_shift_engine& rhs) +{ + return !(lhs == rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const linear_feedback_shift_engine& e) +{ + return thrust::random::detail::random_core_access::stream_out(os, e); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, linear_feedback_shift_engine& e) +{ + return thrust::random::detail::random_core_access::stream_in(is, e); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h new file mode 100644 index 0000000000000000000000000000000000000000..7a22a6c4fa95c0f87d6571e9a050a501e1fb64c4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h @@ -0,0 +1,53 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +namespace detail +{ + +template +struct linear_feedback_shift_engine_wordmask +{ + static const T value = (T(1u) << i) | linear_feedback_shift_engine_wordmask::value; +}; // end linear_feedback_shift_engine_wordmask + +template +struct linear_feedback_shift_engine_wordmask +{ + static const T value = 0; +}; // end linear_feedback_shift_engine_wordmask + +} // namespace detail + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/mod.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/mod.h new file mode 100644 index 0000000000000000000000000000000000000000..a47d1e053dd43476f9d06bd52f313678445eec35 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/mod.h @@ -0,0 +1,101 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +namespace detail +{ + +template +struct static_mod +{ + static const T q = m / a; + static const T r = m % a; + + _CCCL_HOST_DEVICE T operator()(T x) const + { + _CCCL_IF_CONSTEXPR (a == 1) + { + x %= m; + } + else + { + T t1 = a * (x % q); + T t2 = r * (x / q); + if (t1 >= t2) + { + x = t1 - t2; + } + else + { + x = m - t2 + t1; + } + } + + _CCCL_IF_CONSTEXPR (c != 0) + { + const T d = m - x; + if (d > c) + { + x += c; + } + else + { + x = c - d; + } + } + + return x; + } +}; // end static_mod + +// Rely on machine overflow handling +template +struct static_mod +{ + _CCCL_HOST_DEVICE T operator()(T x) const + { + return a * x + c; + } +}; // end static_mod + +template +_CCCL_HOST_DEVICE T mod(T x) +{ + static_mod f; + return f(x); +} // end static_mod + +} // namespace detail + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution.inl new file mode 100644 index 0000000000000000000000000000000000000000..9713a1ccd2006439454b6a65ea96c1241c6bb394 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution.inl @@ -0,0 +1,189 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE normal_distribution::normal_distribution(RealType a, RealType b) + : super_t() + , m_param(a, b) +{} // end normal_distribution::normal_distribution() + +template +_CCCL_HOST_DEVICE normal_distribution::normal_distribution(const param_type& parm) + : super_t() + , m_param(parm) +{} // end normal_distribution::normal_distribution() + +template +_CCCL_HOST_DEVICE void normal_distribution::reset() +{ + super_t::reset(); +} // end normal_distribution::reset() + +template +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type +normal_distribution::operator()(UniformRandomNumberGenerator& urng) +{ + return operator()(urng, m_param); +} // end normal_distribution::operator()() + +template +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type +normal_distribution::operator()(UniformRandomNumberGenerator& urng, const param_type& parm) +{ + return super_t::sample(urng, parm.first, parm.second); +} // end normal_distribution::operator()() + +template +_CCCL_HOST_DEVICE typename normal_distribution::param_type normal_distribution::param() const +{ + return m_param; +} // end normal_distribution::param() + +template +_CCCL_HOST_DEVICE void normal_distribution::param(const param_type& parm) +{ + m_param = parm; +} // end normal_distribution::param() + +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type normal_distribution::min +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return ::cuda::std::numeric_limits::lowest(); +} // end normal_distribution::min() + +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type normal_distribution::max +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return ::cuda::std::numeric_limits::max(); +} // end normal_distribution::max() + +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type normal_distribution::mean() const +{ + return m_param.first; +} // end normal_distribution::mean() + +template +_CCCL_HOST_DEVICE typename normal_distribution::result_type normal_distribution::stddev() const +{ + return m_param.second; +} // end normal_distribution::stddev() + +template +_CCCL_HOST_DEVICE bool normal_distribution::equal(const normal_distribution& rhs) const +{ + return m_param == rhs.param(); +} + +template +template +std::basic_ostream& normal_distribution::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags and fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + os << mean() << space << stddev(); + + // restore old flags and fill character + os.flags(flags); + os.fill(fill); + return os; +} + +template +template +std::basic_istream& normal_distribution::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + is >> m_param.first >> m_param.second; + + // restore old flags + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool operator==(const normal_distribution& lhs, const normal_distribution& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool operator!=(const normal_distribution& lhs, const normal_distribution& rhs) +{ + return !(lhs == rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const normal_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_out(os, d); +} + +template +std::basic_istream& operator>>(std::basic_istream& is, normal_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_in(is, d); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution_base.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution_base.h new file mode 100644 index 0000000000000000000000000000000000000000..07fb48d8f48138582c82c8495f478472fff96bb1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/normal_distribution_base.h @@ -0,0 +1,160 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Copyright Jens Maurer 2000-2001 + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN +namespace random +{ +namespace detail +{ + +// this version samples the normal distribution directly +// and uses the non-standard math function erfcinv +template +class normal_distribution_nvcc +{ +protected: + template + _CCCL_HOST_DEVICE RealType sample(UniformRandomNumberGenerator& urng, const RealType mean, const RealType stddev) + { + using uint_type = typename UniformRandomNumberGenerator::result_type; + constexpr uint_type urng_range = UniformRandomNumberGenerator::max - UniformRandomNumberGenerator::min; + + // Constants for conversion + constexpr RealType S1 = static_cast(1. / static_cast(urng_range)); + constexpr RealType S2 = S1 / 2; + + RealType S3 = static_cast(-1.4142135623730950488016887242097); // -sqrt(2) + + // Get the integer value + uint_type u = urng() - UniformRandomNumberGenerator::min; + + // Ensure the conversion to float will give a value in the range [0,0.5) + if (u > (urng_range / 2)) + { + u = urng_range - u; + S3 = -S3; + } + + // Convert to floating point in [0,0.5) + RealType p = u * S1 + S2; + + // Apply inverse error function + return mean + stddev * S3 * erfcinv(2 * p); + } + + // no-op + _CCCL_HOST_DEVICE void reset() {} +}; + +// this version samples the normal distribution using +// Marsaglia's "polar method" +template +class normal_distribution_portable +{ +protected: + normal_distribution_portable() + : m_r1() + , m_r2() + , m_cached_rho() + , m_valid(false) + {} + + normal_distribution_portable(const normal_distribution_portable& other) + : m_r1(other.m_r1) + , m_r2(other.m_r2) + , m_cached_rho(other.m_cached_rho) + , m_valid(other.m_valid) + {} + + void reset() + { + m_valid = false; + } + + // note that we promise to call this member function with the same mean and stddev + template + _CCCL_HOST_DEVICE RealType sample(UniformRandomNumberGenerator& urng, const RealType mean, const RealType stddev) + { + // implementation from Boost + // allow for Koenig lookup + using std::cos; + using std::log; + using std::sin; + using std::sqrt; + + if (!m_valid) + { + uniform_real_distribution u01; + m_r1 = u01(urng); + m_r2 = u01(urng); + m_cached_rho = sqrt(-RealType(2) * log(RealType(1) - m_r2)); + + m_valid = true; + } + else + { + m_valid = false; + } + + const RealType pi = RealType(3.14159265358979323846); + + RealType result = m_cached_rho * (m_valid ? cos(RealType(2) * pi * m_r1) : sin(RealType(2) * pi * m_r1)); + + return mean + stddev * result; + } + +private: + RealType m_r1, m_r2, m_cached_rho; + bool m_valid; +}; + +template +struct normal_distribution_base +{ +#if _CCCL_HAS_CUDA_COMPILER && !_CCCL_CUDA_COMPILER(NVHPC) + using type = normal_distribution_nvcc; +#else + using type = normal_distribution_portable; +#endif +}; + +} // namespace detail +} // namespace random +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/random_core_access.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/random_core_access.h new file mode 100644 index 0000000000000000000000000000000000000000..060981bc3260a783304aa9985d9c3b13f7907ecd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/random_core_access.h @@ -0,0 +1,63 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +namespace detail +{ + +struct random_core_access +{ + template + static OStream& stream_out(OStream& os, const EngineOrDistribution& x) + { + return x.stream_out(os); + } + + template + static IStream& stream_in(IStream& is, EngineOrDistribution& x) + { + return x.stream_in(is); + } + + template + _CCCL_HOST_DEVICE static bool equal(const EngineOrDistribution& lhs, const EngineOrDistribution& rhs) + { + return lhs.equal(rhs); + } + +}; // end random_core_access + +} // namespace detail + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/subtract_with_carry_engine.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/subtract_with_carry_engine.inl new file mode 100644 index 0000000000000000000000000000000000000000..b69ed6206c7c28e022051bdcc3b8216e2a6cc77e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/subtract_with_carry_engine.inl @@ -0,0 +1,201 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE subtract_with_carry_engine::subtract_with_carry_engine(result_type value) +{ + seed(value); +} // end subtract_with_carry_engine::subtract_with_carry_engine() + +template +_CCCL_HOST_DEVICE void subtract_with_carry_engine::seed(result_type value) +{ + thrust::random::linear_congruential_engine e( + value == 0u ? default_seed : value); + + // initialize state + for (size_t i = 0; i < long_lag; ++i) + { + m_x[i] = detail::mod(e()); + } // end for i + + m_carry = (m_x[long_lag - 1] == 0); + m_k = 0; +} // end subtract_with_carry_engine::seed() + +template +_CCCL_HOST_DEVICE typename subtract_with_carry_engine::result_type +subtract_with_carry_engine::operator()(void) +{ + // XXX we probably need to cache these m_x[m_k] in a register + // maybe we need to cache the use of all member variables + int short_index = m_k - short_lag; + if (short_index < 0) + { + short_index += long_lag; + } + result_type xi; + if (m_x[short_index] >= m_x[m_k] + m_carry) + { + // x(n) >= 0 + xi = m_x[short_index] - m_x[m_k] - m_carry; + m_carry = 0; + } + else + { + // x(n) < 0 + xi = modulus - m_x[m_k] - m_carry + m_x[short_index]; + m_carry = 1; + } + m_x[m_k] = xi; + ++m_k; + if (m_k >= long_lag) + { + m_k = 0; + } + return xi; +} // end subtract_with_carry_engine::operator()() + +template +_CCCL_HOST_DEVICE void subtract_with_carry_engine::discard(unsigned long long z) +{ + for (; z > 0; --z) + { + this->operator()(); + } // end for +} // end subtract_with_carry_engine::discard() + +template +template +std::basic_ostream& +subtract_with_carry_engine::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + const UIntType long_lag_ = r; + + for (size_t i = 0; i < r; ++i) + { + os << m_x[(i + m_k) % long_lag_] << space; + } + os << m_carry; + + os.flags(flags); + os.fill(fill); + return os; +} + +template +template +std::basic_istream& +subtract_with_carry_engine::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + const typename ios_base::fmtflags flags = is.flags(); + is.flags(ios_base::dec | ios_base::skipws); + + for (size_t i = 0; i < r; ++i) + { + is >> m_x[i]; + } + is >> m_carry; + + m_k = 0; + + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool +subtract_with_carry_engine::equal(const subtract_with_carry_engine& rhs) const +{ + const UIntType long_lag_ = r; + + bool result = true; + for (size_t i = 0; i < r; ++i) + { + result &= (m_x[(i + m_k) % long_lag_] == rhs.m_x[(i + rhs.m_k) % long_lag_]); + } + + // XXX not sure if this last check is necessary + result &= (m_carry == rhs.m_carry); + + return result; +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const subtract_with_carry_engine& e) +{ + return thrust::random::detail::random_core_access::stream_out(os, e); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, subtract_with_carry_engine& e) +{ + return thrust::random::detail::random_core_access::stream_in(is, e); +} + +template +_CCCL_HOST_DEVICE bool operator==(const subtract_with_carry_engine& lhs, + const subtract_with_carry_engine& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool operator!=(const subtract_with_carry_engine& lhs, + const subtract_with_carry_engine& rhs) +{ + return !(lhs == rhs); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_int_distribution.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_int_distribution.inl new file mode 100644 index 0000000000000000000000000000000000000000..c342abd00ebee0de60e3c15fbf22cddf620be39e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_int_distribution.inl @@ -0,0 +1,198 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE uniform_int_distribution::uniform_int_distribution(IntType a, IntType b) + : m_param(a, b) +{} // end uniform_int_distribution::uniform_int_distribution() + +template +_CCCL_HOST_DEVICE uniform_int_distribution::uniform_int_distribution(const param_type& parm) + : m_param(parm) +{} // end uniform_int_distribution::uniform_int_distribution() + +template +_CCCL_HOST_DEVICE void uniform_int_distribution::reset() +{} // end uniform_int_distribution::reset() + +template +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type +uniform_int_distribution::operator()(UniformRandomNumberGenerator& urng) +{ + return operator()(urng, m_param); +} // end uniform_int_distribution::operator()() + +template +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type +uniform_int_distribution::operator()(UniformRandomNumberGenerator& urng, const param_type& parm) +{ + // XXX this implementation is somewhat hacky and will skip + // values if the range of the RNG is smaller than the range of the distribution + // we should improve this implementation in a later version + + using float_type = typename thrust::detail::largest_available_float::type; + + const float_type real_min(static_cast(parm.first)); + const float_type real_max(static_cast(parm.second)); + + // add one to the right end of the interval because it is half-open + // XXX adding 1.0 to a potentially large floating point number seems like a bad idea + uniform_real_distribution real_dist(real_min, real_max + float_type(1)); + + return static_cast(real_dist(urng)); +} // end uniform_int_distribution::operator()() + +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type uniform_int_distribution::a() const +{ + return m_param.first; +} // end uniform_int_distribution::a() + +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type uniform_int_distribution::b() const +{ + return m_param.second; +} // end uniform_int_distribution::b() + +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::param_type uniform_int_distribution::param() const +{ + return m_param; +} // end uniform_int_distribution::param() + +template +_CCCL_HOST_DEVICE void uniform_int_distribution::param(const param_type& parm) +{ + m_param = parm; +} // end uniform_int_distribution::param() + +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type uniform_int_distribution::min +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return a(); +} // end uniform_int_distribution::min() + +template +_CCCL_HOST_DEVICE typename uniform_int_distribution::result_type uniform_int_distribution::max +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return b(); +} // end uniform_int_distribution::max() + +template +_CCCL_HOST_DEVICE bool uniform_int_distribution::equal(const uniform_int_distribution& rhs) const +{ + return param() == rhs.param(); +} + +template +template +std::basic_ostream& +uniform_int_distribution::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags and fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + os << a() << space << b(); + + // restore old flags and fill character + os.flags(flags); + os.fill(fill); + return os; +} + +template +template +std::basic_istream& uniform_int_distribution::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + is >> m_param.first >> m_param.second; + + // restore old flags + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool +operator==(const uniform_int_distribution& lhs, const uniform_int_distribution& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool +operator!=(const uniform_int_distribution& lhs, const uniform_int_distribution& rhs) +{ + return !(lhs == rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const uniform_int_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_out(os, d); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, uniform_int_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_in(is, d); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_real_distribution.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_real_distribution.inl new file mode 100644 index 0000000000000000000000000000000000000000..1d122dee9f563329dd5f0951b4d8b15a2f98ba43 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/uniform_real_distribution.inl @@ -0,0 +1,198 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE uniform_real_distribution::uniform_real_distribution(RealType a, RealType b) + : m_param(a, b) +{} // end uniform_real_distribution::uniform_real_distribution() + +template +_CCCL_HOST_DEVICE uniform_real_distribution::uniform_real_distribution(const param_type& parm) + : m_param(parm) +{} // end uniform_real_distribution::uniform_real_distribution() + +template +_CCCL_HOST_DEVICE void uniform_real_distribution::reset() +{} // end uniform_real_distribution::reset() + +template +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type +uniform_real_distribution::operator()(UniformRandomNumberGenerator& urng) +{ + return operator()(urng, m_param); +} // end uniform_real::operator()() + +template +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type +uniform_real_distribution::operator()(UniformRandomNumberGenerator& urng, const param_type& parm) +{ + // call the urng & map its result to [0,1) + result_type result = static_cast(urng() - UniformRandomNumberGenerator::min); + + // adding one to the denominator ensures that the interval is half-open at 1.0 + // XXX adding 1.0 to a potentially large floating point number seems like a bad idea + // XXX OTOH adding 1 to what is potentially UINT_MAX also seems like a bad idea + // XXX we could statically check if 1u + (max - min) is representable and do that, otherwise use the current + // implementation + result /= + (result_type(1) + static_cast(UniformRandomNumberGenerator::max - UniformRandomNumberGenerator::min)); + + return (result * (parm.second - parm.first)) + parm.first; +} // end uniform_real::operator()() + +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type +uniform_real_distribution::a() const +{ + return m_param.first; +} // end uniform_real::a() + +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type +uniform_real_distribution::b() const +{ + return m_param.second; +} // end uniform_real_distribution::b() + +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::param_type +uniform_real_distribution::param() const +{ + return m_param; + ; +} // end uniform_real_distribution::param() + +template +_CCCL_HOST_DEVICE void uniform_real_distribution::param(const param_type& parm) +{ + m_param = parm; +} // end uniform_real_distribution::param() + +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type uniform_real_distribution::min +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return a(); +} // end uniform_real_distribution::min() + +template +_CCCL_HOST_DEVICE typename uniform_real_distribution::result_type uniform_real_distribution::max +THRUST_PREVENT_MACRO_SUBSTITUTION() const +{ + return b(); +} // end uniform_real_distribution::max() + +template +_CCCL_HOST_DEVICE bool uniform_real_distribution::equal(const uniform_real_distribution& rhs) const +{ + return m_param == rhs.param(); +} + +template +template +std::basic_ostream& +uniform_real_distribution::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags and fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + os << a() << space << b(); + + // restore old flags and fill character + os.flags(flags); + os.fill(fill); + return os; +} + +template +template +std::basic_istream& uniform_real_distribution::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + is >> m_param.first >> m_param.second; + + // restore old flags + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool +operator==(const uniform_real_distribution& lhs, const uniform_real_distribution& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool +operator!=(const uniform_real_distribution& lhs, const uniform_real_distribution& rhs) +{ + return !(lhs == rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const uniform_real_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_out(os, d); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, uniform_real_distribution& d) +{ + return thrust::random::detail::random_core_access::stream_in(is, d); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine.inl b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine.inl new file mode 100644 index 0000000000000000000000000000000000000000..ae57607d4e4b9ba204b4bea48e0d78a1bd4c9c0d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine.inl @@ -0,0 +1,183 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +template +_CCCL_HOST_DEVICE xor_combine_engine::xor_combine_engine() + : m_b1() + , m_b2() +{} // end xor_combine_engine::xor_combine_engine() + +template +_CCCL_HOST_DEVICE +xor_combine_engine::xor_combine_engine(const base1_type& urng1, const base2_type& urng2) + : m_b1(urng1) + , m_b2(urng2) +{} // end xor_combine_engine::xor_combine_engine() + +template +_CCCL_HOST_DEVICE xor_combine_engine::xor_combine_engine(result_type s) + : m_b1(s) + , m_b2(s) +{} // end xor_combine_engine::xor_combine_engine() + +template +_CCCL_HOST_DEVICE void xor_combine_engine::seed() +{ + m_b1.seed(); + m_b2.seed(); +} // end xor_combine_engine::seed() + +template +_CCCL_HOST_DEVICE void xor_combine_engine::seed(result_type s) +{ + m_b1.seed(s); + m_b2.seed(s); +} // end xor_combine_engine::seed() + +template +_CCCL_HOST_DEVICE const typename xor_combine_engine::base1_type& +xor_combine_engine::base1() const +{ + return m_b1; +} // end xor_combine_engine::base1() + +template +_CCCL_HOST_DEVICE const typename xor_combine_engine::base2_type& +xor_combine_engine::base2() const +{ + return m_b2; +} // end xor_combine_engine::base2() + +template +_CCCL_HOST_DEVICE typename xor_combine_engine::result_type +xor_combine_engine::operator()(void) +{ + return (result_type(m_b1() - base1_type::min) << shift1) ^ (result_type(m_b2() - base2_type::min) << shift2); +} // end xor_combine_engine::operator()() + +template +_CCCL_HOST_DEVICE void xor_combine_engine::discard(unsigned long long z) +{ + for (; z > 0; --z) + { + this->operator()(); + } // end for +} // end xor_combine_engine::discard() + +template +template +std::basic_ostream& +xor_combine_engine::stream_out(std::basic_ostream& os) const +{ + using ostream_type = std::basic_ostream; + using ios_base = typename ostream_type::ios_base; + + // save old flags and fill character + const typename ios_base::fmtflags flags = os.flags(); + const CharT fill = os.fill(); + + const CharT space = os.widen(' '); + os.flags(ios_base::dec | ios_base::fixed | ios_base::left); + os.fill(space); + + // output each base engine in turn + os << base1() << space << base2(); + + // restore old flags and fill character + os.flags(flags); + os.fill(fill); + return os; +} + +template +template +std::basic_istream& +xor_combine_engine::stream_in(std::basic_istream& is) +{ + using istream_type = std::basic_istream; + using ios_base = typename istream_type::ios_base; + + // save old flags + const typename ios_base::fmtflags flags = is.flags(); + + is.flags(ios_base::skipws); + + // input each base engine in turn + is >> m_b1 >> m_b2; + + // restore old flags + is.flags(flags); + return is; +} + +template +_CCCL_HOST_DEVICE bool +xor_combine_engine::equal(const xor_combine_engine& rhs) const +{ + return (m_b1 == rhs.m_b1) && (m_b2 == rhs.m_b2); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const xor_combine_engine& e) +{ + return thrust::random::detail::random_core_access::stream_out(os, e); +} + +template +std::basic_istream& +operator>>(std::basic_istream& is, xor_combine_engine& e) +{ + return thrust::random::detail::random_core_access::stream_in(is, e); +} + +template +_CCCL_HOST_DEVICE bool operator==(const xor_combine_engine& lhs, + const xor_combine_engine& rhs) +{ + return thrust::random::detail::random_core_access::equal(lhs, rhs); +} + +template +_CCCL_HOST_DEVICE bool operator!=(const xor_combine_engine& lhs, + const xor_combine_engine& rhs) +{ + return !(lhs == rhs); +} + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine_max.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine_max.h new file mode 100644 index 0000000000000000000000000000000000000000..f232d26288a38973b8b6a1f33a62e915c5d4599b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/detail/xor_combine_engine_max.h @@ -0,0 +1,216 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +namespace detail +{ + +namespace math = thrust::detail::mpl::math; + +namespace detail +{ + +// two cases for this function avoids compile-time warnings of overflow +template +struct lshift_w +{ + static const UIntType value = 0; +}; + +template +struct lshift_w +{ + static const UIntType value = lhs << rhs; +}; + +} // namespace detail + +template +struct lshift_w +{ + static const bool shift_will_overflow = rhs >= w; + + static const UIntType value = detail::lshift_w::value; +}; + +template +struct lshift : lshift_w::digits, lhs, rhs> +{}; + +template +struct two_to_the_power : lshift +{}; + +template +class xor_combine_engine_max_aux_constants +{ +public: + static const result_type two_to_the_d = two_to_the_power::value; + static const result_type c = lshift::value; + + static const result_type t = math::max::value; + + static const result_type u = math::min::value; + + static const result_type p = math::log2::value; + static const result_type two_to_the_p = two_to_the_power::value; + + static const result_type k = math::div::value; +}; + +template +struct xor_combine_engine_max_aux; + +template +struct xor_combine_engine_max_aux_case4 +{ + using constants = xor_combine_engine_max_aux_constants; + + static const result_type k_plus_1_times_two_to_the_p = + lshift::value, constants::p>::value; + + static const result_type M = + xor_combine_engine_max_aux::value, + constants::two_to_the_p>::value, + math::mod::value, + d>::value; + + static const result_type value = math::plus::value; +}; + +template +struct xor_combine_engine_max_aux_case3 +{ + using constants = xor_combine_engine_max_aux_constants; + + static const result_type k_plus_1_times_two_to_the_p = + lshift::value, constants::p>::value; + + static const result_type M = + xor_combine_engine_max_aux::value, + constants::two_to_the_p>::value, + math::mod::value, + d>::value; + + static const result_type value = math::plus::value; +}; + +template +struct xor_combine_engine_max_aux_case2 +{ + using constants = xor_combine_engine_max_aux_constants; + + static const result_type k_plus_1_times_two_to_the_p = + lshift::value, constants::p>::value; + + static const result_type value = math::minus::value; +}; + +template +struct xor_combine_engine_max_aux_case1 +{ + static const result_type c = lshift::value; + + static const result_type value = math::plus::value; +}; + +template +struct xor_combine_engine_max_aux_2 +{ + using constants = xor_combine_engine_max_aux_constants; + + static const result_type value = thrust::detail::eval_if< + // if k is odd... + math::is_odd::value, + thrust::detail::identity_< + thrust::detail::integral_constant::value>>, + thrust::detail::eval_if< + // otherwise if a * 2^3 >= b, then case 3 + a * constants::two_to_the_d >= b, + thrust::detail::identity_< + thrust::detail::integral_constant::value>>, + // otherwise, case 4 + thrust::detail::identity_< + thrust::detail::integral_constant::value>>>>:: + type::value; +}; + +template ::value)> +struct xor_combine_engine_max_aux_1 : xor_combine_engine_max_aux_case1 +{}; + +template +struct xor_combine_engine_max_aux_1 : xor_combine_engine_max_aux_2 +{}; + +template +struct xor_combine_engine_max_aux : xor_combine_engine_max_aux_1 +{}; + +template +struct xor_combine_engine_max +{ + static const size_t w = std::numeric_limits::digits; + + static const result_type m1 = math:: + min::value - 1>::value; + + static const result_type m2 = math:: + min::value - 1>::value; + + static const result_type s = s1 - s2; + + static const result_type M = xor_combine_engine_max_aux::value; + + // the value is M(m1,m2,s) lshift_w s2 + static const result_type value = lshift_w::value; +}; // end xor_combine_engine_max + +} // namespace detail + +} // namespace random + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/discard_block_engine.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/discard_block_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..8bf8e935f04989735b83c0acfba55a004535bc24 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/discard_block_engine.h @@ -0,0 +1,239 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file discard_block_engine.h + * \brief A random number engine which adapts a base engine and produces + * numbers by discarding all but a contiguous blocks of its values. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_engine_adaptors Random Number Engine Adaptor Class Templates + * \ingroup random + * \{ + */ + +/*! \class discard_block_engine + * \brief A \p discard_block_engine adapts an existing base random number engine and produces + * random values by discarding some of the values returned by its base engine. + * Each cycle of the compound engine begins by returning \c r values successively produced + * by the base engine and ends by discarding p-r such values. The engine's state + * is the state of its base engine followed by the number of calls to operator() + * that have occurred since the beginning of the current cycle. + * + * \tparam Engine The type of the base random number engine to adapt. + * \tparam p The discard cycle length. + * \tparam r The number of values to return of the base engine. Because p-r will be + * discarded, r <= p. + * + * The following code snippet shows an example of using a \p discard_block_engine instance: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * // create a discard_block_engine from minstd_rand, with a cycle length of 13 + * // keep every first 10 values, and discard the next 3 + * thrust::discard_block_engine rng; + * + * // print a random number to standard output + * std::cout << rng() << std::endl; + * + * return 0; + * } + * \endcode + */ +template +class discard_block_engine +{ +public: + // types + + /*! \typedef base_type + * \brief The type of the adapted base random number engine. + */ + using base_type = Engine; + + /*! \typedef result_type + * \brief The type of the unsigned integer produced by this \p linear_congruential_engine. + */ + using result_type = typename base_type::result_type; + + // engine characteristics + + /*! The length of the production cycle. + */ + static const size_t block_size = p; + + /*! The number of used numbers per production cycle. + */ + static const size_t used_block = r; + + /*! The smallest value this \p discard_block_engine may potentially produce. + */ + static const result_type min = base_type::min; + + /*! The largest value this \p discard_block_engine may potentially produce. + */ + static const result_type max = base_type::max; + + // constructors and seeding functions + + /*! This constructor constructs a new \p discard_block_engine and constructs + * its \p base_type engine using its null constructor. + */ + _CCCL_HOST_DEVICE discard_block_engine(); + + /*! This constructor constructs a new \p discard_block_engine using + * a given \p base_type engine to initialize its adapted base engine. + * + * \param urng A \p base_type to use to initialize this \p discard_block_engine's + * adapted base engine. + */ + _CCCL_HOST_DEVICE explicit discard_block_engine(const base_type& urng); + + /*! This constructor initializes a new \p discard_block_engine with a given seed. + * + * \param s The seed used to initialize this \p discard_block_engine's adapted base engine. + */ + _CCCL_HOST_DEVICE explicit discard_block_engine(result_type s); + + /*! This method initializes the state of this \p discard_block_engine's adapted base engine + * by using its \p default_seed value. + */ + _CCCL_HOST_DEVICE void seed(); + + /*! This method initializes the state of this \p discard_block_engine's adapted base engine + * by using the given seed. + * + * \param s The seed with which to initialize this \p discard_block_engine's adapted base engine. + */ + _CCCL_HOST_DEVICE void seed(result_type s); + + // generating functions + + /*! This member function produces a new random value and updates this \p discard_block_engine's state. + * \return A new random number. + */ + _CCCL_HOST_DEVICE result_type operator()(void); + + /*! This member function advances this \p discard_block_engine's state a given number of times + * and discards the results. + * + * \param z The number of random values to discard. + * \note This function is provided because an implementation may be able to accelerate it. + */ + _CCCL_HOST_DEVICE void discard(unsigned long long z); + + // property functions + + /*! This member function returns a const reference to this \p discard_block_engine's + * adapted base engine. + * + * \return A const reference to the base engine this \p discard_block_engine adapts. + */ + _CCCL_HOST_DEVICE const base_type& base() const; + + /*! \cond + */ + +private: + base_type m_e; + unsigned int m_n; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const discard_block_engine& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + /*! \endcond + */ +}; // end discard_block_engine + +/*! This function checks two \p discard_block_engines for equality. + * \param lhs The first \p discard_block_engine to test. + * \param rhs The second \p discard_block_engine to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator==(const discard_block_engine& lhs, const discard_block_engine& rhs); + +/*! This function checks two \p discard_block_engines for inequality. + * \param lhs The first \p discard_block_engine to test. + * \param rhs The second \p discard_block_engine to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator!=(const discard_block_engine& lhs, const discard_block_engine& rhs); + +/*! This function streams a discard_block_engine to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param e The \p discard_block_engine to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const discard_block_engine& e); + +/*! This function streams a discard_block_engine in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param e The \p discard_block_engine to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, discard_block_engine& e); + +/*! \} // end random_number_engine_adaptors + */ + +} // namespace random + +// import names into thrust:: +using random::discard_block_engine; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_congruential_engine.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_congruential_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..8ff2649d8e36e87fb1a7a6b61e97d2fd918fdb71 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_congruential_engine.h @@ -0,0 +1,288 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file linear_congruential_engine.h + * \brief A linear congruential pseudorandom number engine. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_engine_templates Random Number Engine Class Templates + * \ingroup random + * \{ + */ + +/*! \class linear_congruential_engine + * \brief A \p linear_congruential_engine random number engine produces unsigned integer + * random numbers using a linear congruential random number generation algorithm. + * + * The generation algorithm has the form x_i = (a * x_{i-1} + c) mod m. + * + * \tparam UIntType The type of unsigned integer to produce. + * \tparam a The multiplier used in the generation algorithm. + * \tparam c The increment used in the generation algorithm. + * \tparam m The modulus used in the generation algorithm. + * + * \note Inexperienced users should not use this class template directly. Instead, use + * \p minstd_rand or \p minstd_rand0. + * + * The following code snippet shows examples of use of a \p linear_congruential_engine instance: + * + * \code + * #include + * #include + * + * int main() + * { + * // create a minstd_rand object, which is an instance of linear_congruential_engine + * thrust::minstd_rand rng1; + * + * // output some random values to cout + * std::cout << rng1() << std::endl; + * + * // a random value is printed + * + * // create a new minstd_rand from a seed + * thrust::minstd_rand rng2(13); + * + * // discard some random values + * rng2.discard(13); + * + * // stream the object to an iostream + * std::cout << rng2 << std::endl; + * + * // rng2's current state is printed + * + * // print the minimum and maximum values that minstd_rand can produce + * std::cout << thrust::minstd_rand::min << std::endl; + * std::cout << thrust::minstd_rand::max << std::endl; + * + * // the range of minstd_rand is printed + * + * // save the state of rng2 to a different object + * thrust::minstd_rand rng3 = rng2; + * + * // compare rng2 and rng3 + * std::cout << (rng2 == rng3) << std::endl; + * + * // 1 is printed + * + * // re-seed rng2 with a different seed + * rng2.seed(7); + * + * // compare rng2 and rng3 + * std::cout << (rng2 == rng3) << std::endl; + * + * // 0 is printed + * + * return 0; + * } + * + * \endcode + * + * \see thrust::random::minstd_rand + * \see thrust::random::minstd_rand0 + */ +template +class linear_congruential_engine +{ +public: + // types + + /*! \typedef result_type + * \brief The type of the unsigned integer produced by this \p linear_congruential_engine. + */ + using result_type = UIntType; + + // engine characteristics + + /*! The multiplier used in the generation algorithm. + */ + static const result_type multiplier = a; + + /*! The increment used in the generation algorithm. + */ + static const result_type increment = c; + + /*! The modulus used in the generation algorithm. + */ + static const result_type modulus = m; + + /*! The smallest value this \p linear_congruential_engine may potentially produce. + */ +#ifndef _CCCL_DOXYGEN_INVOKED // Doxygen breaks on the ternary :shrug: + static const result_type min = c == 0u ? 1u : 0u; +#else + static const result_type min = 0u; +#endif // _CCCL_DOXYGEN_INVOKED + + /*! The largest value this \p linear_congruential_engine may potentially produce. + */ + static const result_type max = m - 1u; + + /*! The default seed of this \p linear_congruential_engine. + */ + static const result_type default_seed = 1u; + + // constructors and seeding functions + + /*! This constructor, which optionally accepts a seed, initializes a new + * \p linear_congruential_engine. + * + * \param s The seed used to initialize this \p linear_congruential_engine's state. + */ + _CCCL_HOST_DEVICE explicit linear_congruential_engine(result_type s = default_seed); + + /*! This method initializes this \p linear_congruential_engine's state, and optionally accepts + * a seed value. + * + * \param s The seed used to initializes this \p linear_congruential_engine's state. + */ + _CCCL_HOST_DEVICE void seed(result_type s = default_seed); + + // generating functions + + /*! This member function produces a new random value and updates this \p linear_congruential_engine's state. + * \return A new random number. + */ + _CCCL_HOST_DEVICE result_type operator()(void); + + /*! This member function advances this \p linear_congruential_engine's state a given number of times + * and discards the results. + * + * \param z The number of random values to discard. + * \note This function is provided because an implementation may be able to accelerate it. + */ + _CCCL_HOST_DEVICE void discard(unsigned long long z); + + /*! \cond + */ + +private: + result_type m_x; + + static void transition(result_type& state); + + friend struct thrust::random::detail::random_core_access; + + friend struct thrust::random::detail::linear_congruential_engine_discard; + + _CCCL_HOST_DEVICE bool equal(const linear_congruential_engine& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + + /*! \endcond + */ +}; // end linear_congruential_engine + +/*! This function checks two \p linear_congruential_engines for equality. + * \param lhs The first \p linear_congruential_engine to test. + * \param rhs The second \p linear_congruential_engine to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator==(const linear_congruential_engine& lhs, + const linear_congruential_engine& rhs); + +/*! This function checks two \p linear_congruential_engines for inequality. + * \param lhs The first \p linear_congruential_engine to test. + * \param rhs The second \p linear_congruential_engine to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator!=(const linear_congruential_engine& lhs, + const linear_congruential_engine& rhs); + +/*! This function streams a linear_congruential_engine to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param e The \p linear_congruential_engine to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const linear_congruential_engine& e); + +/*! This function streams a linear_congruential_engine in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param e The \p linear_congruential_engine to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, linear_congruential_engine& e); + +/*! \} // random_number_engine_templates + */ + +/*! \addtogroup predefined_random + * \{ + */ + +// XXX the type N2111 used here was uint_fast32_t + +/*! \typedef minstd_rand0 + * \brief A random number engine with predefined parameters which implements a version of + * the Minimal Standard random number generation algorithm. + * \note The 10000th consecutive invocation of a default-constructed object of type \p minstd_rand0 + * shall produce the value \c 1043618065 . + */ +using minstd_rand0 = linear_congruential_engine; + +/*! \typedef minstd_rand + * \brief A random number engine with predefined parameters which implements a version of + * the Minimal Standard random number generation algorithm. + * \note The 10000th consecutive invocation of a default-constructed object of type \p minstd_rand + * shall produce the value \c 399268537 . + */ +using minstd_rand = linear_congruential_engine; + +/*! \} // predefined_random + */ + +} // namespace random + +// import names into thrust:: +using random::linear_congruential_engine; +using random::minstd_rand; +using random::minstd_rand0; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_feedback_shift_engine.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_feedback_shift_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..77ef62cf48352b04a10bd16504e1a25451e61505 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/linear_feedback_shift_engine.h @@ -0,0 +1,216 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file linear_feedback_shift_engine.h + * \brief A linear feedback shift pseudorandom number generator. + */ + +/* + * Copyright Jens Maurer 2002 + * + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying NOTICE file for the complete license) + * + * For more information, see http://www.boost.org + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include // for size_t +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_engine_templates + * \{ + */ + +/*! \class linear_feedback_shift_engine + * \brief A \p linear_feedback_shift_engine random number engine produces + * unsigned integer random values using a linear feedback shift random number + * generation algorithm. + * + * \tparam UIntType The type of unsigned integer to produce. + * \tparam w The word size of the produced values (w <= sizeof(UIntType)). + * \tparam k The k parameter of Tausworthe's 1965 algorithm. + * \tparam q The q exponent of Tausworthe's 1965 algorithm. + * \tparam s The step size of Tausworthe's 1965 algorithm. + * + * \note linear_feedback_shift_engine is based on the Boost Template Library's linear_feedback_shift. + */ +template +class linear_feedback_shift_engine +{ +public: + // types + + /*! \typedef result_type + * \brief The type of the unsigned integer produced by this \p linear_feedback_shift_engine. + */ + using result_type = UIntType; + + // engine characteristics + + /*! The word size of the produced values. + */ + static const size_t word_size = w; + + /*! A constant used in the generation algorithm. + */ + static const size_t exponent1 = k; + + /*! A constant used in the generation algorithm. + */ + static const size_t exponent2 = q; + + /*! The step size used in the generation algorithm. + */ + static const size_t step_size = s; + + /*! \cond + */ + +private: + static const result_type wordmask = detail::linear_feedback_shift_engine_wordmask::value; + /*! \endcond + */ + +public: + /*! The smallest value this \p linear_feedback_shift_engine may potentially produce. + */ + static const result_type min = 0; + + /*! The largest value this \p linear_feedback_shift_engine may potentially produce. + */ + static const result_type max = wordmask; + + /*! The default seed of this \p linear_feedback_shift_engine. + */ + static const result_type default_seed = 341u; + + // constructors and seeding functions + + /*! This constructor, which optionally accepts a seed, initializes a new + * \p linear_feedback_shift_engine. + * + * \param value The seed used to initialize this \p linear_feedback_shift_engine's state. + */ + _CCCL_HOST_DEVICE explicit linear_feedback_shift_engine(result_type value = default_seed); + + /*! This method initializes this \p linear_feedback_shift_engine's state, and optionally accepts + * a seed value. + * + * \param value The seed used to initializes this \p linear_feedback_shift_engine's state. + */ + _CCCL_HOST_DEVICE void seed(result_type value = default_seed); + + // generating functions + + /*! This member function produces a new random value and updates this \p linear_feedback_shift_engine's state. + * \return A new random number. + */ + _CCCL_HOST_DEVICE result_type operator()(void); + + /*! This member function advances this \p linear_feedback_shift_engine's state a given number of times + * and discards the results. + * + * \param z The number of random values to discard. + * \note This function is provided because an implementation may be able to accelerate it. + */ + _CCCL_HOST_DEVICE void discard(unsigned long long z); + + /*! \cond + */ + +private: + result_type m_value; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const linear_feedback_shift_engine& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + + /*! \endcond + */ +}; // end linear_feedback_shift_engine + +/*! This function checks two \p linear_feedback_shift_engines for equality. + * \param lhs The first \p linear_feedback_shift_engine to test. + * \param rhs The second \p linear_feedback_shift_engine to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator==(const linear_feedback_shift_engine& lhs, + const linear_feedback_shift_engine& rhs); + +/*! This function checks two \p linear_feedback_shift_engines for inequality. + * \param lhs The first \p linear_feedback_shift_engine to test. + * \param rhs The second \p linear_feedback_shift_engine to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator!=(const linear_feedback_shift_engine& lhs, + const linear_feedback_shift_engine& rhs); + +/*! This function streams a linear_feedback_shift_engine to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param e The \p linear_feedback_shift_engine to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const linear_feedback_shift_engine& e); + +/*! This function streams a linear_feedback_shift_engine in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param e The \p linear_feedback_shift_engine to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, linear_feedback_shift_engine& e); + +/*! \} // end random_number_engine_templates + */ + +} // namespace random + +// import names into thrust:: +using random::linear_feedback_shift_engine; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/normal_distribution.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/normal_distribution.h new file mode 100644 index 0000000000000000000000000000000000000000..307ffc50a2beb8a70d8efa56a79ab5f532f1a868 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/normal_distribution.h @@ -0,0 +1,257 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file normal_distribution.h + * \brief A normal (Gaussian) distribution of real-valued numbers. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_distributions + * \{ + */ + +/*! \class normal_distribution + * \brief A \p normal_distribution random number distribution produces floating point + * Normally distributed random numbers. + * + * \tparam RealType The type of floating point number to produce. + * + * The following code snippet demonstrates examples of using a \p normal_distribution with a + * random number engine to produce random values drawn from the Normal distribution with a given + * mean and variance: + * + * \code + * #include + * #include + * + * int main() + * { + * // create a minstd_rand object to act as our source of randomness + * thrust::minstd_rand rng; + * + * // create a normal_distribution to produce floats from the Normal distribution + * // with mean 2.0 and standard deviation 3.5 + * thrust::random::normal_distribution dist(2.0f, 3.5f); + * + * // write a random number to standard output + * std::cout << dist(rng) << std::endl; + * + * // write the mean of the distribution, just in case we forgot + * std::cout << dist.mean() << std::endl; + * + * // 2.0 is printed + * + * // and the standard deviation + * std::cout << dist.stddev() << std::endl; + * + * // 3.5 is printed + * + * return 0; + * } + * \endcode + */ +template +class normal_distribution : public detail::normal_distribution_base::type +{ +private: + using super_t = typename detail::normal_distribution_base::type; + +public: + // types + + /*! \typedef result_type + * \brief The type of the floating point number produced by this \p normal_distribution. + */ + using result_type = RealType; + + /*! \typedef param_type + * \brief The type of the object encapsulating this \p normal_distribution's parameters. + */ + using param_type = thrust::pair; + + // constructors and reset functions + + /*! This constructor creates a new \p normal_distribution from two values defining the + * half-open interval of the distribution. + * + * \param mean The mean (expected value) of the distribution. Defaults to \c 0.0. + * \param stddev The standard deviation of the distribution. Defaults to \c 1.0. + */ + _CCCL_HOST_DEVICE explicit normal_distribution(RealType mean = 0.0, RealType stddev = 1.0); + + /*! This constructor creates a new \p normal_distribution from a \p param_type object + * encapsulating the range of the distribution. + * + * \param parm A \p param_type object encapsulating the parameters (i.e., the mean and standard deviation) of the + * distribution. + */ + _CCCL_HOST_DEVICE explicit normal_distribution(const param_type& parm); + + /*! Calling this member function guarantees that subsequent uses of this + * \p normal_distribution do not depend on values produced by any random + * number generator prior to invoking this function. + */ + _CCCL_HOST_DEVICE void reset(); + + // generating functions + + /*! This method produces a new Normal random integer drawn from this \p normal_distribution's + * range using a \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng); + + /*! This method produces a new Normal random integer as if by creating a new \p normal_distribution + * from the given \p param_type object, and calling its operator() method with the given + * \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + * \param parm A \p param_type object encapsulating the parameters of the \p normal_distribution + * to draw from. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng, const param_type& parm); + + // property functions + + /*! This method returns the value of the parameter with which this \p normal_distribution + * was constructed. + * + * \return The mean (expected value) of this \p normal_distribution's output. + */ + _CCCL_HOST_DEVICE result_type mean() const; + + /*! This method returns the value of the parameter with which this \p normal_distribution + * was constructed. + * + * \return The standard deviation of this \p uniform_real_distribution's output. + */ + _CCCL_HOST_DEVICE result_type stddev() const; + + /*! This method returns a \p param_type object encapsulating the parameters with which this + * \p normal_distribution was constructed. + * + * \return A \p param_type object encapsulating the parameters (i.e., the mean and standard deviation) of this \p + * normal_distribution. + */ + _CCCL_HOST_DEVICE param_type param() const; + + /*! This method changes the parameters of this \p normal_distribution using the values encapsulated + * in a given \p param_type object. + * + * \param parm A \p param_type object encapsulating the new parameters (i.e., the mean and variance) of this \p + * normal_distribution. + */ + _CCCL_HOST_DEVICE void param(const param_type& parm); + + /*! This method returns the smallest floating point number this \p normal_distribution can potentially produce. + * + * \return The lower bound of this \p normal_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type min THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! This method returns the smallest number larger than largest floating point number this \p + * uniform_real_distribution can potentially produce. + * + * \return The upper bound of this \p normal_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type max THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! \cond + */ + +private: + param_type m_param; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const normal_distribution& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + /*! \endcond + */ +}; // end normal_distribution + +/*! This function checks two \p normal_distributions for equality. + * \param lhs The first \p normal_distribution to test. + * \param rhs The second \p normal_distribution to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator==(const normal_distribution& lhs, const normal_distribution& rhs); + +/*! This function checks two \p normal_distributions for inequality. + * \param lhs The first \p normal_distribution to test. + * \param rhs The second \p normal_distribution to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator!=(const normal_distribution& lhs, const normal_distribution& rhs); + +/*! This function streams a normal_distribution to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param d The \p normal_distribution to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const normal_distribution& d); + +/*! This function streams a normal_distribution in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param d The \p normal_distribution to stream in. + * \return \p is + */ +template +std::basic_istream& operator>>(std::basic_istream& is, normal_distribution& d); + +/*! \} // end random_number_distributions + */ + +} // namespace random + +using random::normal_distribution; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/subtract_with_carry_engine.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/subtract_with_carry_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..c1b85f682ba22d59437779e23799eb05beae9d27 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/subtract_with_carry_engine.h @@ -0,0 +1,246 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file subtract_with_carry_engine.h + * \brief A subtract-with-carry pseudorandom number generator + * based on Marsaglia & Zaman. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include // for size_t +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_engine_templates + * \{ + */ + +/*! \class subtract_with_carry_engine + * \brief A \p subtract_with_carry_engine random number engine produces unsigned + * integer random numbers using the subtract with carry algorithm of Marsaglia & Zaman. + * + * The generation algorithm is performed as follows: + * -# Let Y = X_{i-s}- X_{i-r} - c. + * -# Set X_i to y = T mod m. Set \c c to \c 1 if Y < 0, otherwise set \c c to \c 0. + * + * This algorithm corresponds to a modular linear function of the form + * + * TA(x_i) = (a * x_i) mod b, where \c b is of the form m^r - m^s + 1 and + * a = b - (b-1)/m. + * + * \tparam UIntType The type of unsigned integer to produce. + * \tparam w The word size of the produced values ( w <= sizeof(UIntType)). + * \tparam s The short lag of the generation algorithm. + * \tparam r The long lag of the generation algorithm. + * + * \note Inexperienced users should not use this class template directly. Instead, use + * \p ranlux24_base or \p ranlux48_base, which are instances of \p subtract_with_carry_engine. + * + * \see thrust::random::ranlux24_base + * \see thrust::random::ranlux48_base + */ +template +class subtract_with_carry_engine +{ + /*! \cond + */ + +private: + static const UIntType modulus = UIntType(1) << w; + /*! \endcond + */ + +public: + // types + + /*! \typedef result_type + * \brief The type of the unsigned integer produced by this \p subtract_with_carry_engine. + */ + using result_type = UIntType; + + // engine characteristics + + /*! The word size of the produced values. + */ + static const size_t word_size = w; + + /*! The size of the short lag used in the generation algorithm. + */ + static const size_t short_lag = s; + + /*! The size of the long lag used in the generation algorithm. + */ + static const size_t long_lag = r; + + /*! The smallest value this \p subtract_with_carry_engine may potentially produce. + */ + static const result_type min = 0; + + /*! The largest value this \p subtract_with_carry_engine may potentially produce. + */ + static const result_type max = modulus - 1; + + /*! The default seed of this \p subtract_with_carry_engine. + */ + static const result_type default_seed = 19780503u; + + // constructors and seeding functions + + /*! This constructor, which optionally accepts a seed, initializes a new + * \p subtract_with_carry_engine. + * + * \param value The seed used to initialize this \p subtract_with_carry_engine's state. + */ + _CCCL_HOST_DEVICE explicit subtract_with_carry_engine(result_type value = default_seed); + + /*! This method initializes this \p subtract_with_carry_engine's state, and optionally accepts + * a seed value. + * + * \param value The seed used to initializes this \p subtract_with_carry_engine's state. + */ + _CCCL_HOST_DEVICE void seed(result_type value = default_seed); + + // generating functions + + /*! This member function produces a new random value and updates this \p subtract_with_carry_engine's state. + * \return A new random number. + */ + _CCCL_HOST_DEVICE result_type operator()(void); + + /*! This member function advances this \p subtract_with_carry_engine's state a given number of times + * and discards the results. + * + * \param z The number of random values to discard. + * \note This function is provided because an implementation may be able to accelerate it. + */ + _CCCL_HOST_DEVICE void discard(unsigned long long z); + + /*! \cond + */ + +private: + result_type m_x[long_lag]; + unsigned int m_k; + int m_carry; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const subtract_with_carry_engine& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + + /*! \endcond + */ +}; // end subtract_with_carry_engine + +/*! This function checks two \p subtract_with_carry_engines for equality. + * \param lhs The first \p subtract_with_carry_engine to test. + * \param rhs The second \p subtract_with_carry_engine to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator==(const subtract_with_carry_engine& lhs, + const subtract_with_carry_engine& rhs); + +/*! This function checks two \p subtract_with_carry_engines for inequality. + * \param lhs The first \p subtract_with_carry_engine to test. + * \param rhs The second \p subtract_with_carry_engine to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator!=(const subtract_with_carry_engine& lhs, + const subtract_with_carry_engine& rhs); + +/*! This function streams a subtract_with_carry_engine to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param e The \p subtract_with_carry_engine to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const subtract_with_carry_engine& e); + +/*! This function streams a subtract_with_carry_engine in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param e The \p subtract_with_carry_engine to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, subtract_with_carry_engine& e); + +/*! \} // end random_number_engine_templates + */ + +/*! \addtogroup predefined_random + * \{ + */ + +// XXX N2111 uses uint_fast32_t here + +/*! \typedef ranlux24_base + * \brief A random number engine with predefined parameters which implements the + * base engine of the \p ranlux24 random number engine. + * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux24_base + * shall produce the value \c 7937952 . + */ +using ranlux24_base = subtract_with_carry_engine; + +// XXX N2111 uses uint_fast64_t here + +/*! \typedef ranlux48_base + * \brief A random number engine with predefined parameters which implements the + * base engine of the \p ranlux48 random number engine. + * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux48_base + * shall produce the value \c 192113843633948 . + */ +using ranlux48_base = subtract_with_carry_engine; + +/*! \} // end predefined_random + */ + +} // namespace random + +// import names into thrust:: +using random::ranlux24_base; +using random::ranlux48_base; +using random::subtract_with_carry_engine; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_int_distribution.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_int_distribution.h new file mode 100644 index 0000000000000000000000000000000000000000..fe69a0f43fc3a6e40353f1f0e0a0efb08ce14af8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_int_distribution.h @@ -0,0 +1,260 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file uniform_int_distribution.h + * \brief A uniform distribution of integer-valued numbers + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_distributions Random Number Distributions Class Templates + * \ingroup random + * \{ + */ + +/*! \class uniform_int_distribution + * \brief A \p uniform_int_distribution random number distribution produces signed or unsigned integer + * uniform random numbers from a given range. + * + * \tparam IntType The type of integer to produce. + * + * The following code snippet demonstrates examples of using a \p uniform_int_distribution with a + * random number engine to produce random integers drawn from a given range: + * + * \code + * #include + * #include + * + * int main() + * { + * // create a minstd_rand object to act as our source of randomness + * thrust::minstd_rand rng; + * + * // create a uniform_int_distribution to produce ints from [-7,13] + * thrust::uniform_int_distribution dist(-7,13); + * + * // write a random number from the range [-7,13] to standard output + * std::cout << dist(rng) << std::endl; + * + * // write the range of the distribution, just in case we forgot + * std::cout << dist.min() << std::endl; + * + * // -7 is printed + * + * std::cout << dist.max() << std::endl; + * + * // 13 is printed + * + * // write the parameters of the distribution (which happen to be the bounds) to standard output + * std::cout << dist.a() << std::endl; + * + * // -7 is printed + * + * std::cout << dist.b() << std::endl; + * + * // 13 is printed + * + * return 0; + * } + * \endcode + */ +template +class uniform_int_distribution +{ +public: + // types + + /*! \typedef result_type + * \brief The type of the integer produced by this \p uniform_int_distribution. + */ + using result_type = IntType; + + /*! \typedef param_type + * \brief The type of the object encapsulating this \p uniform_int_distribution's parameters. + */ + using param_type = thrust::pair; + + // constructors and reset functions + + /*! This constructor creates a new \p uniform_int_distribution from two values defining the + * range of the distribution. + * + * \param a The smallest integer to potentially produce. Defaults to \c 0. + * \param b The largest integer to potentially produce. Defaults to the largest representable integer in + * the platform. + */ + _CCCL_HOST_DEVICE explicit uniform_int_distribution( + IntType a = 0, IntType b = THRUST_NS_QUALIFIER::detail::integer_traits::const_max); + + /*! This constructor creates a new \p uniform_int_distribution from a \p param_type object + * encapsulating the range of the distribution. + * + * \param parm A \p param_type object encapsulating the parameters (i.e., the range) of the distribution. + */ + _CCCL_HOST_DEVICE explicit uniform_int_distribution(const param_type& parm); + + /*! This does nothing. It is included to conform to the requirements of the RandomDistribution concept. + */ + _CCCL_HOST_DEVICE void reset(); + + // generating functions + + /*! This method produces a new uniform random integer drawn from this \p uniform_int_distribution's + * range using a \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng); + + /*! This method produces a new uniform random integer as if by creating a new \p uniform_int_distribution + * from the given \p param_type object, and calling its operator() method with the given + * \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + * \param parm A \p param_type object encapsulating the parameters of the \p uniform_int_distribution + * to draw from. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng, const param_type& parm); + + // property functions + + /*! This method returns the value of the parameter with which this \p uniform_int_distribution + * was constructed. + * + * \return The lower bound of this \p uniform_int_distribution's range. + */ + _CCCL_HOST_DEVICE result_type a() const; + + /*! This method returns the value of the parameter with which this \p uniform_int_distribution + * was constructed. + * + * \return The upper bound of this \p uniform_int_distribution's range. + */ + _CCCL_HOST_DEVICE result_type b() const; + + /*! This method returns a \p param_type object encapsulating the parameters with which this + * \p uniform_int_distribution was constructed. + * + * \return A \p param_type object enapsulating the range of this \p uniform_int_distribution. + */ + _CCCL_HOST_DEVICE param_type param() const; + + /*! This method changes the parameters of this \p uniform_int_distribution using the values encapsulated + * in a given \p param_type object. + * + * \param parm A \p param_type object encapsulating the new range of this \p uniform_int_distribution. + */ + _CCCL_HOST_DEVICE void param(const param_type& parm); + + /*! This method returns the smallest integer this \p uniform_int_distribution can potentially produce. + * + * \return The lower bound of this \p uniform_int_distribution's range. + */ + _CCCL_HOST_DEVICE result_type min THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! This method returns the largest integer this \p uniform_int_distribution can potentially produce. + * + * \return The upper bound of this \p uniform_int_distribution's range. + */ + _CCCL_HOST_DEVICE result_type max THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! \cond + */ + +private: + param_type m_param; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const uniform_int_distribution& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + /*! \endcond + */ +}; // end uniform_int_distribution + +/*! This function checks two \p uniform_int_distributions for equality. + * \param lhs The first \p uniform_int_distribution to test. + * \param rhs The second \p uniform_int_distribution to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator==(const uniform_int_distribution& lhs, const uniform_int_distribution& rhs); + +/*! This function checks two \p uniform_int_distributions for inequality. + * \param lhs The first \p uniform_int_distribution to test. + * \param rhs The second \p uniform_int_distribution to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator!=(const uniform_int_distribution& lhs, const uniform_int_distribution& rhs); + +/*! This function streams a uniform_int_distribution to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param d The \p uniform_int_distribution to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const uniform_int_distribution& d); + +/*! This function streams a uniform_int_distribution in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param d The \p uniform_int_distribution to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, uniform_int_distribution& d); + +/*! \} // end random_number_distributions + */ + +} // namespace random + +using random::uniform_int_distribution; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_real_distribution.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_real_distribution.h new file mode 100644 index 0000000000000000000000000000000000000000..3bfc00f26d81165392b963c812fd6d8cdacbd37c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/uniform_real_distribution.h @@ -0,0 +1,258 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file uniform_real_distribution.h + * \brief A uniform distribution of real-valued numbers + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_distributions + * \{ + */ + +/*! \class uniform_real_distribution + * \brief A \p uniform_real_distribution random number distribution produces floating point + * uniform random numbers from a half-open interval. + * + * \tparam RealType The type of floating point number to produce. + * + * The following code snippet demonstrates examples of using a \p uniform_real_distribution with a + * random number engine to produce random integers drawn from a given range: + * + * \code + * #include + * #include + * + * int main() + * { + * // create a minstd_rand object to act as our source of randomness + * thrust::minstd_rand rng; + * + * // create a uniform_real_distribution to produce floats from [-7,13) + * thrust::uniform_real_distribution dist(-7,13); + * + * // write a random number from the range [-7,13) to standard output + * std::cout << dist(rng) << std::endl; + * + * // write the range of the distribution, just in case we forgot + * std::cout << dist.min() << std::endl; + * + * // -7.0 is printed + * + * std::cout << dist.max() << std::endl; + * + * // 13.0 is printed + * + * // write the parameters of the distribution (which happen to be the bounds) to standard output + * std::cout << dist.a() << std::endl; + * + * // -7.0 is printed + * + * std::cout << dist.b() << std::endl; + * + * // 13.0 is printed + * + * return 0; + * } + * \endcode + */ +template +class uniform_real_distribution +{ +public: + // types + + /*! \typedef result_type + * \brief The type of the floating point number produced by this \p uniform_real_distribution. + */ + using result_type = RealType; + + /*! \typedef param_type + * \brief The type of the object encapsulating this \p uniform_real_distribution's parameters. + */ + using param_type = thrust::pair; + + // constructors and reset functions + + /*! This constructor creates a new \p uniform_real_distribution from two values defining the + * half-open interval of the distribution. + * + * \param a The smallest floating point number to potentially produce. Defaults to \c 0.0. + * \param b The smallest number larger than the largest floating point number to potentially produce. Defaults to + * \c 1.0. + */ + _CCCL_HOST_DEVICE explicit uniform_real_distribution(RealType a = 0.0, RealType b = 1.0); + + /*! This constructor creates a new \p uniform_real_distribution from a \p param_type object + * encapsulating the range of the distribution. + * + * \param parm A \p param_type object encapsulating the parameters (i.e., the range) of the distribution. + */ + _CCCL_HOST_DEVICE explicit uniform_real_distribution(const param_type& parm); + + /*! This does nothing. It is included to conform to the requirements of the RandomDistribution concept. + */ + _CCCL_HOST_DEVICE void reset(); + + // generating functions + + /*! This method produces a new uniform random integer drawn from this \p uniform_real_distribution's + * range using a \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng); + + /*! This method produces a new uniform random integer as if by creating a new \p uniform_real_distribution + * from the given \p param_type object, and calling its operator() method with the given + * \p UniformRandomNumberGenerator as a source of randomness. + * + * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness. + * \param parm A \p param_type object encapsulating the parameters of the \p uniform_real_distribution + * to draw from. + */ + template + _CCCL_HOST_DEVICE result_type operator()(UniformRandomNumberGenerator& urng, const param_type& parm); + + // property functions + + /*! This method returns the value of the parameter with which this \p uniform_real_distribution + * was constructed. + * + * \return The lower bound of this \p uniform_real_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type a() const; + + /*! This method returns the value of the parameter with which this \p uniform_real_distribution + * was constructed. + * + * \return The upper bound of this \p uniform_real_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type b() const; + + /*! This method returns a \p param_type object encapsulating the parameters with which this + * \p uniform_real_distribution was constructed. + * + * \return A \p param_type object enapsulating the half-open interval of this \p uniform_real_distribution. + */ + _CCCL_HOST_DEVICE param_type param() const; + + /*! This method changes the parameters of this \p uniform_real_distribution using the values encapsulated + * in a given \p param_type object. + * + * \param parm A \p param_type object encapsulating the new half-open interval of this \p uniform_real_distribution. + */ + _CCCL_HOST_DEVICE void param(const param_type& parm); + + /*! This method returns the smallest floating point number this \p uniform_real_distribution can potentially produce. + * + * \return The lower bound of this \p uniform_real_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type min THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! This method returns the smallest number larger than largest floating point number this \p + * uniform_real_distribution can potentially produce. + * + * \return The upper bound of this \p uniform_real_distribution's half-open interval. + */ + _CCCL_HOST_DEVICE result_type max THRUST_PREVENT_MACRO_SUBSTITUTION() const; + + /*! \cond + */ + +private: + param_type m_param; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const uniform_real_distribution& rhs) const; + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + /*! \endcond + */ +}; // end uniform_real_distribution + +/*! This function checks two \p uniform_real_distributions for equality. + * \param lhs The first \p uniform_real_distribution to test. + * \param rhs The second \p uniform_real_distribution to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator==(const uniform_real_distribution& lhs, const uniform_real_distribution& rhs); + +/*! This function checks two \p uniform_real_distributions for inequality. + * \param lhs The first \p uniform_real_distribution to test. + * \param rhs The second \p uniform_real_distribution to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool +operator!=(const uniform_real_distribution& lhs, const uniform_real_distribution& rhs); + +/*! This function streams a uniform_real_distribution to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param d The \p uniform_real_distribution to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const uniform_real_distribution& d); + +/*! This function streams a uniform_real_distribution in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param d The \p uniform_real_distribution to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, uniform_real_distribution& d); + +/*! \} // end random_number_distributions + */ + +} // namespace random + +using random::uniform_real_distribution; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/xor_combine_engine.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/xor_combine_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..2e49766ff91e9b87c4fe83816ec92b4da94719f5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/random/xor_combine_engine.h @@ -0,0 +1,253 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file xor_combine_engine.h + * \brief A pseudorandom number generator which produces pseudorandom + * numbers from two integer base engines by merging their + * pseudorandom numbers with bitwise exclusive-or. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include // for size_t +#include + +THRUST_NAMESPACE_BEGIN + +namespace random +{ + +/*! \addtogroup random_number_engine_adaptors + * \{ + */ + +/*! \class xor_combine_engine + * \brief An \p xor_combine_engine adapts two existing base random number engines and + * produces random values by combining the values produced by each. + * + * \tparam Engine1 The type of the first base random number engine to adapt. + * \tparam s1 The size of the first shift to use in the generation algorithm. + * \tparam Engine2 The type of the second base random number engine to adapt. + * \tparam s2 The second of the second shift to use in the generation algorithm. Defaults to \c 0. + * + * The following code snippet shows an example of using an \p xor_combine_engine instance: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * // create an xor_combine_engine from minstd_rand and minstd_rand0 + * // use a shift of 0 for each + * thrust::xor_combine_engine rng; + * + * // print a random number to standard output + * std::cout << rng() << std::endl; + * + * return 0; + * } + * \endcode + */ +template +class xor_combine_engine +{ +public: + // types + + /*! \typedef base1_type + * \brief The type of the first adapted base random number engine. + */ + using base1_type = Engine1; + + /*! \typedef base2_type + * \brief The type of the second adapted base random number engine. + */ + using base2_type = Engine2; + + /*! \typedef result_type + * \brief The type of the unsigned integer produced by this \p xor_combine_engine. + */ + using result_type = typename thrust::detail::eval_if< + (sizeof(typename base2_type::result_type) > sizeof(typename base1_type::result_type)), + thrust::detail::identity_, + thrust::detail::identity_>::type; + + /*! The size of the first shift used in the generation algorithm. + */ + static const size_t shift1 = s1; + + /*! The size of the second shift used in the generation algorithm. + */ + static const size_t shift2 = s2; + + /*! The smallest value this \p xor_combine_engine may potentially produce. + */ + static const result_type min = 0; + + /*! The largest value this \p xor_combine_engine may potentially produce. + */ + static const result_type max = detail::xor_combine_engine_max::value; + + // constructors and seeding functions + + /*! This constructor constructs a new \p xor_combine_engine and constructs + * its adapted engines using their null constructors. + */ + _CCCL_HOST_DEVICE xor_combine_engine(); + + /*! This constructor constructs a new \p xor_combine_engine using + * given \p base1_type and \p base2_type engines to initialize its adapted base engines. + * + * \param urng1 A \p base1_type to use to initialize this \p xor_combine_engine's + * first adapted base engine. + * \param urng2 A \p base2_type to use to initialize this \p xor_combine_engine's + * first adapted base engine. + */ + _CCCL_HOST_DEVICE xor_combine_engine(const base1_type& urng1, const base2_type& urng2); + + /*! This constructor initializes a new \p xor_combine_engine with a given seed. + * + * \param s The seed used to initialize this \p xor_combine_engine's adapted base engines. + */ + _CCCL_HOST_DEVICE xor_combine_engine(result_type s); + + /*! This method initializes the state of this \p xor_combine_engine's adapted base engines + * by using their \p default_seed values. + */ + _CCCL_HOST_DEVICE void seed(); + + /*! This method initializes the state of this \p xor_combine_engine's adapted base engines + * by using the given seed. + * + * \param s The seed with which to initialize this \p xor_combine_engine's adapted base engines. + */ + _CCCL_HOST_DEVICE void seed(result_type s); + + // generating functions + + /*! This member function produces a new random value and updates this \p xor_combine_engine's state. + * \return A new random number. + */ + _CCCL_HOST_DEVICE result_type operator()(void); + + /*! This member function advances this \p xor_combine_engine's state a given number of times + * and discards the results. + * + * \param z The number of random values to discard. + * \note This function is provided because an implementation may be able to accelerate it. + */ + _CCCL_HOST_DEVICE void discard(unsigned long long z); + + // property functions + + /*! This member function returns a const reference to this \p xor_combine_engine's + * first adapted base engine. + * + * \return A const reference to the first base engine this \p xor_combine_engine adapts. + */ + _CCCL_HOST_DEVICE const base1_type& base1() const; + + /*! This member function returns a const reference to this \p xor_combine_engine's + * second adapted base engine. + * + * \return A const reference to the second base engine this \p xor_combine_engine adapts. + */ + _CCCL_HOST_DEVICE const base2_type& base2() const; + + /*! \cond + */ + +private: + base1_type m_b1; + base2_type m_b2; + + friend struct thrust::random::detail::random_core_access; + + _CCCL_HOST_DEVICE bool equal(const xor_combine_engine& rhs) const; + + template + std::basic_istream& stream_in(std::basic_istream& is); + + template + std::basic_ostream& stream_out(std::basic_ostream& os) const; + + /*! \endcond + */ +}; // end xor_combine_engine + +/*! This function checks two \p xor_combine_engines for equality. + * \param lhs The first \p xor_combine_engine to test. + * \param rhs The second \p xor_combine_engine to test. + * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator==(const xor_combine_engine& lhs, + const xor_combine_engine& rhs); + +/*! This function checks two \p xor_combine_engines for inequality. + * \param lhs The first \p xor_combine_engine to test. + * \param rhs The second \p xor_combine_engine to test. + * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. + */ +template +_CCCL_HOST_DEVICE bool operator!=(const xor_combine_engine& lhs, + const xor_combine_engine& rhs); + +/*! This function streams a xor_combine_engine to a \p std::basic_ostream. + * \param os The \p basic_ostream to stream out to. + * \param e The \p xor_combine_engine to stream out. + * \return \p os + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const xor_combine_engine& e); + +/*! This function streams a xor_combine_engine in from a std::basic_istream. + * \param is The \p basic_istream to stream from. + * \param e The \p xor_combine_engine to stream in. + * \return \p is + */ +template +std::basic_istream& +operator>>(std::basic_istream& is, xor_combine_engine& e); + +/*! \} // end random_number_engine_adaptors + */ + +} // namespace random + +// import names into thrust:: +using random::xor_combine_engine; + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_execution_policy.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_execution_policy.h new file mode 100644 index 0000000000000000000000000000000000000000..8f61c42eb4dd132c67cc750d7adb666a1684d254 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_execution_policy.h @@ -0,0 +1,67 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief A type trait that determines if a type is an \a ExecutionPolicy. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup type_traits Type Traits + * \{ + */ + +/*! \brief UnaryTypeTrait + * that returns \c true_type if \c T is an \a ExecutionPolicy and \c false_type + * otherwise. + */ +template +using is_execution_policy = ::cuda::std::is_base_of; + +#if _CCCL_STD_VER >= 2014 +/*! \brief constexpr bool that is \c true if \c T is an + * \a ExecutionPolicy and \c false otherwise. + */ +template +constexpr bool is_execution_policy_v = is_execution_policy::value; +#endif + +/*! \} // type traits + */ + +/*! \} // utility + */ + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_trivially_relocatable.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_trivially_relocatable.h new file mode 100644 index 0000000000000000000000000000000000000000..8566a5105785358525cac9bd75cef60c4fb0c064 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/is_trivially_relocatable.h @@ -0,0 +1,313 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief P1144's proposed + * \c std::is_trivially_relocatable, an extensible type trait indicating + * whether a type can be bitwise copied with a facility like + * std::memcpy. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include + +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup type_traits Type Traits + * \{ + */ + +/*! \cond + */ + +namespace detail +{ + +template +struct is_trivially_relocatable_impl; + +} // namespace detail + +/*! \endcond + */ + +/*! \brief UnaryTypeTrait + * that returns \c true_type if \c T is + * TriviallyRelocatable, + * aka can be bitwise copied with a facility like + * std::memcpy, + * and \c false_type otherwise. + * + * \see is_trivially_relocatable_v + * \see is_trivially_relocatable_to + * \see is_indirectly_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +using is_trivially_relocatable = detail::is_trivially_relocatable_impl; + +#if _CCCL_STD_VER >= 2014 +/*! \brief constexpr bool that is \c true if \c T is + * TriviallyRelocatable, + * aka can be bitwise copied with a facility like + * std::memcpy, + * and \c false otherwise. + * + * \see is_trivially_relocatable + * \see is_trivially_relocatable_to + * \see is_indirectly_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +constexpr bool is_trivially_relocatable_v = is_trivially_relocatable::value; +#endif + +/*! \brief BinaryTypeTrait + * that returns \c true_type if \c From is + * TriviallyRelocatable, + * to \c To, aka can be bitwise copied with a facility like + * std::memcpy, + * and \c false_type otherwise. + * + * \see is_trivially_relocatable_to_v + * \see is_trivially_relocatable + * \see is_indirectly_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +using is_trivially_relocatable_to = + integral_constant::value && is_trivially_relocatable::value>; + +#if _CCCL_STD_VER >= 2014 +/*! \brief constexpr bool that is \c true if \c From is + * TriviallyRelocatable, + * to \c To, aka can be bitwise copied with a facility like + * std::memcpy, + * and \c false otherwise. + * + * \see is_trivially_relocatable_to + * \see is_trivially_relocatable + * \see is_indirectly_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +constexpr bool is_trivially_relocatable_to_v = is_trivially_relocatable_to::value; +#endif + +/*! \brief BinaryTypeTrait + * that returns \c true_type if the element type of \c FromIterator is + * TriviallyRelocatable, + * to the element type of \c ToIterator, aka can be bitwise copied with a + * facility like + * std::memcpy, + * and \c false_type otherwise. + * + * \see is_indirectly_trivially_relocatable_to_v + * \see is_trivially_relocatable + * \see is_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +using is_indirectly_trivially_relocatable_to = + integral_constant::value && is_contiguous_iterator::value + && is_trivially_relocatable_to::value_type, + typename thrust::iterator_traits::value_type>::value>; + +#if _CCCL_STD_VER >= 2014 +/*! \brief constexpr bool that is \c true if the element type of + * \c FromIterator is + * TriviallyRelocatable, + * to the element type of \c ToIterator, aka can be bitwise copied with a + * facility like + * std::memcpy, + * and \c false otherwise. + * + * \see is_indirectly_trivially_relocatable_to + * \see is_trivially_relocatable + * \see is_trivially_relocatable_to + * \see proclaim_trivially_relocatable + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +constexpr bool is_indirectly_trivially_relocate_to_v = + is_indirectly_trivially_relocatable_to::value; +#endif + +/*! \brief customization point + * that can be specialized customized to indicate that a type \c T is + * TriviallyRelocatable, + * aka it can be bitwise copied with a facility like + * std::memcpy. + * + * \see is_indirectly_trivially_relocatable_to + * \see is_trivially_relocatable + * \see is_trivially_relocatable_to + * \see THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE + */ +template +struct proclaim_trivially_relocatable : false_type +{}; + +/*! \brief Declares that the type \c T is + * TriviallyRelocatable, + * aka it can be bitwise copied with a facility like + * std::memcpy, + * by specializing \c proclaim_trivially_relocatable. + * + * \see is_indirectly_trivially_relocatable_to + * \see is_trivially_relocatable + * \see is_trivially_relocatable_to + * \see proclaim_trivially_relocatable + */ +#define THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(T) \ + THRUST_NAMESPACE_BEGIN \ + template <> \ + struct proclaim_trivially_relocatable : THRUST_NS_QUALIFIER::true_type \ + {}; \ + THRUST_NAMESPACE_END \ + /**/ + +/////////////////////////////////////////////////////////////////////////////// + +/*! \cond + */ + +namespace detail +{ + +// https://wg21.link/P1144R0#wording-inheritance +template +struct is_trivially_relocatable_impl + : integral_constant::value || proclaim_trivially_relocatable::value> +{}; + +template +struct is_trivially_relocatable_impl : is_trivially_relocatable_impl +{}; + +} // namespace detail + +THRUST_NAMESPACE_END + +#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA + +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(char1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(char2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(char3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(char4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uchar1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uchar2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uchar3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uchar4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(short1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(short2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(short3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(short4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ushort1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ushort2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ushort3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ushort4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(int1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(int2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(int3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(int4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uint1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uint2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uint3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(uint4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(long1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(long2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(long3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(long4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulong1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulong2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulong3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulong4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(longlong1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(longlong2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(longlong3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(longlong4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulonglong1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulonglong2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulonglong3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(ulonglong4) + +struct __half; +struct __half2; + +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(__half) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(__half2) + +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(float1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(float2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(float3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(float4) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(double1) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(double2) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(double3) +THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(double4) +#endif + +THRUST_NAMESPACE_BEGIN +template +struct proclaim_trivially_relocatable<::cuda::std::pair> + : ::cuda::std::conjunction, is_trivially_relocatable> +{}; + +template +struct proclaim_trivially_relocatable<::cuda::std::tuple> + : ::cuda::std::conjunction...> +{}; +THRUST_NAMESPACE_END + +/*! \endcond + */ + +/////////////////////////////////////////////////////////////////////////////// + +/*! \} // type traits + */ + +/*! \} // utility + */ diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/void_t.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/void_t.h new file mode 100644 index 0000000000000000000000000000000000000000..a893c4fb507db14a9efe7f11bf01bef1a0687827 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/thrust/thrust/type_traits/void_t.h @@ -0,0 +1,58 @@ +/* + * Copyright 2018-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief C++17's `void_t`. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup type_traits Type Traits + * \{ + */ + +template +struct CCCL_DEPRECATED_BECAUSE("Use ::cuda::std::void_t") voider +{ + using type = void; +}; + +template +using void_t CCCL_DEPRECATED_BECAUSE("Use ::cuda::std::void_t") = void; + +/*! \} // type traits + */ + +/*! \} // utility + */ + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/LICENSE b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..20a9c8a7b4dce845c8bfa24bf5cd8fbd6beab479 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 by Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/dlpack.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..bcb77949a8daa22ca60285f183a506a81433a1cc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_dlpack/dlpack.h @@ -0,0 +1,332 @@ +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +/** + * \brief Compatibility with C++ + */ +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current major version of dlpack */ +#define DLPACK_MAJOR_VERSION 1 + +/*! \brief The current minor version of dlpack */ +#define DLPACK_MINOR_VERSION 0 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \brief The DLPack version. + * + * A change in major version indicates that we have changed the + * data layout of the ABI - DLManagedTensorVersioned. + * + * A change in minor version indicates that we have added new + * code, such as a new device type, but the ABI is kept the same. + * + * If an obtained DLPack tensor has a major version that disagrees + * with the version number specified in this header file + * (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter + * (and it is safe to do so). It is not safe to access any other fields + * as the memory layout will have changed. + * + * In the case of a minor version mismatch, the tensor can be safely used as + * long as the consumer knows how to interpret all fields. Minor version + * updates indicate the addition of enumeration values. + */ +typedef struct { + /*! \brief DLPack major version. */ + uint32_t major; + /*! \brief DLPack minor version. */ + uint32_t minor; +} DLPackVersion; + +/*! + * \brief The device type in DLDevice. + */ +#ifdef __cplusplus +typedef enum : int32_t { +#else +typedef enum { +#endif + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, + /*! + * \brief Unified shared memory allocated on a oneAPI non-partititioned + * device. Call to oneAPI runtime is required to determine the device + * type, the USM allocation type and the sycl context it is bound to. + * + */ + kDLOneAPI = 14, + /*! \brief GPU support for next generation WebGPU standard. */ + kDLWebGPU = 15, + /*! \brief Qualcomm Hexagon DSP */ + kDLHexagon = 16, + /*! \brief Microsoft MAIA devices */ + kDLMAIA = 17, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int32_t device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, + /*! \brief boolean */ + kDLBool = 6U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. The data type is assumed to follow the + * native endian-ness. An explicit error message should be raised when attempting to + * export an array with non-native endianness + * + * Examples + * - float: type_code = 2, bits = 32, lanes = 1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4 + * - int8: type_code = 0, bits = 8, lanes = 1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention, the underlying storage size of bool is 8 bits) + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The data pointer points to the allocated data. This will be CUDA + * device pointer or cl_mem handle in OpenCL. It may be opaque on some device + * types. This pointer is always aligned to 256 bytes as in CUDA. The + * `byte_offset` field should be used to point to the beginning of the data. + * + * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed + * (after which this note will be updated); at the moment it is recommended + * to not rely on the data pointer being correctly aligned. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + * + * Note that if the tensor is of size zero, then the data pointer should be + * set to `NULL`. + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int32_t ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + * + * \note This data structure is used as Legacy DLManagedTensor + * in DLPack exchange and is deprecated after DLPack v0.8 + * Use DLManagedTensorVersioned instead. + * This data structure may get renamed or deleted in future versions. + * + * \sa DLManagedTensorVersioned + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void * manager_ctx; + /*! + * \brief Destructor - this should be called + * to destruct the manager_ctx which backs the DLManagedTensor. It can be + * NULL if there is no way for the caller to provide a reasonable destructor. + * The destructor deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor * self); +} DLManagedTensor; + +// bit masks used in in the DLManagedTensorVersioned + +/*! \brief bit mask to indicate that the tensor is read only. */ +#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) + +/*! + * \brief bit mask to indicate that the tensor is a copy made by the producer. + * + * If set, the tensor is considered solely owned throughout its lifetime by the + * consumer, until the producer-provided deleter is invoked. + */ +#define DLPACK_FLAG_BITMASK_IS_COPIED (1UL << 1UL) + +/*! + * \brief A versioned and managed C Tensor object, manage memory of DLTensor. + * + * This data structure is intended to facilitate the borrowing of DLTensor by + * another framework. It is not meant to transfer the tensor. When the borrowing + * framework doesn't need the tensor, it should call the deleter to notify the + * host that the resource is no longer needed. + * + * \note This is the current standard DLPack exchange data structure. + */ +struct DLManagedTensorVersioned { + /*! + * \brief The API and ABI version of the current managed Tensor + */ + DLPackVersion version; + /*! + * \brief the context of the original host framework. + * + * Stores DLManagedTensorVersioned is used in the + * framework. It can also be NULL. + */ + void *manager_ctx; + /*! + * \brief Destructor. + * + * This should be called to destruct manager_ctx which holds the DLManagedTensorVersioned. + * It can be NULL if there is no way for the caller to provide a reasonable + * destructor. The destructor deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensorVersioned *self); + /*! + * \brief Additional bitmask flags information about the tensor. + * + * By default the flags should be set to 0. + * + * \note Future ABI changes should keep everything until this field + * stable, to ensure that deleter can be correctly called. + * + * \sa DLPACK_FLAG_BITMASK_READ_ONLY + * \sa DLPACK_FLAG_BITMASK_IS_COPIED + */ + uint64_t flags; + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; +}; + +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrig.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrig.h new file mode 100644 index 0000000000000000000000000000000000000000..f8c5a7970e7a3056101c927a141d3fb408a02440 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrig.h @@ -0,0 +1,732 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Adapted from FreeBSD by Filipe Maia : + * freebsd/lib/msun/src/catrig.c + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline void raise_inexact() { + const volatile float tiny = 7.888609052210118054117286e-31; /* 0x1p-100; */ + // needs the volatile to prevent compiler from ignoring it + volatile float junk = 1 + tiny; + (void)junk; +} + +__host__ __device__ inline complex clog_for_large_values(complex z); + +/* + * Testing indicates that all these functions are accurate up to 4 ULP. + * The functions casin(h) and cacos(h) are about 2.5 times slower than asinh. + * The functions catan(h) are a little under 2 times slower than atanh. + * + * The code for casinh, casin, cacos, and cacosh comes first. The code is + * rather complicated, and the four functions are highly interdependent. + * + * The code for catanh and catan comes at the end. It is much simpler than + * the other functions, and the code for these can be disconnected from the + * rest of the code. + */ + +/* + * ================================ + * | casinh, casin, cacos, cacosh | + * ================================ + */ + +/* + * The algorithm is very close to that in "Implementing the complex arcsine + * and arccosine functions using exception handling" by T. E. Hull, Thomas F. + * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on + * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335, + * http://dl.acm.org/citation.cfm?id=275324. + * + * Throughout we use the convention z = x + I*y. + * + * casinh(z) = sign(x)*log(A+sqrt(A*A-1)) + I*asin(B) + * where + * A = (|z+I| + |z-I|) / 2 + * B = (|z+I| - |z-I|) / 2 = y/A + * + * These formulas become numerically unstable: + * (a) for Re(casinh(z)) when z is close to the line segment [-I, I] (that + * is, Re(casinh(z)) is close to 0); + * (b) for Im(casinh(z)) when z is close to either of the intervals + * [I, I*infinity) or (-I*infinity, -I] (that is, |Im(casinh(z))| is + * close to PI/2). + * + * These numerical problems are overcome by defining + * f(a, b) = (hypot(a, b) - b) / 2 = a*a / (hypot(a, b) + b) / 2 + * Then if A < A_crossover, we use + * log(A + sqrt(A*A-1)) = log1p((A-1) + sqrt((A-1)*(A+1))) + * A-1 = f(x, 1+y) + f(x, 1-y) + * and if B > B_crossover, we use + * asin(B) = atan2(y, sqrt(A*A - y*y)) = atan2(y, sqrt((A+y)*(A-y))) + * A-y = f(x, y+1) + f(x, y-1) + * where without loss of generality we have assumed that x and y are + * non-negative. + * + * Much of the difficulty comes because the intermediate computations may + * produce overflows or underflows. This is dealt with in the paper by Hull + * et al by using exception handling. We do this by detecting when + * computations risk underflow or overflow. The hardest part is handling the + * underflows when computing f(a, b). + * + * Note that the function f(a, b) does not appear explicitly in the paper by + * Hull et al, but the idea may be found on pages 308 and 309. Introducing the + * function f(a, b) allows us to concentrate many of the clever tricks in this + * paper into one function. + */ + +/* + * Function f(a, b, hypot_a_b) = (hypot(a, b) - b) / 2. + * Pass hypot(a, b) as the third argument. + */ +__host__ __device__ inline double f(double a, double b, double hypot_a_b) { + if (b < 0) return ((hypot_a_b - b) / 2); + if (b == 0) return (a / 2); + return (a * a / (hypot_a_b + b) / 2); +} + +/* + * All the hard work is contained in this function. + * x and y are assumed positive or zero, and less than RECIP_EPSILON. + * Upon return: + * rx = Re(casinh(z)) = -Im(cacos(y + I*x)). + * B_is_usable is set to 1 if the value of B is usable. + * If B_is_usable is set to 0, sqrt_A2my2 = sqrt(A*A - y*y), and new_y = y. + * If returning sqrt_A2my2 has potential to result in an underflow, it is + * rescaled, and new_y is similarly rescaled. + */ +__host__ __device__ inline void do_hard_work(double x, double y, double* rx, + int* B_is_usable, double* B, + double* sqrt_A2my2, double* new_y) { + double R, S, A; /* A, B, R, and S are as in Hull et al. */ + double Am1, Amy; /* A-1, A-y. */ + const double A_crossover = + 10; /* Hull et al suggest 1.5, but 10 works better */ + const double FOUR_SQRT_MIN = + 5.966672584960165394632772e-154; /* =0x1p-509; >= 4 * sqrt(DBL_MIN) */ + const double B_crossover = 0.6417; /* suggested by Hull et al */ + + R = hypot(x, y + 1); /* |z+I| */ + S = hypot(x, y - 1); /* |z-I| */ + + /* A = (|z+I| + |z-I|) / 2 */ + A = (R + S) / 2; + /* + * Mathematically A >= 1. There is a small chance that this will not + * be so because of rounding errors. So we will make certain it is + * so. + */ + if (A < 1) A = 1; + + if (A < A_crossover) { + /* + * Am1 = fp + fm, where fp = f(x, 1+y), and fm = f(x, 1-y). + * rx = log1p(Am1 + sqrt(Am1*(A+1))) + */ + if (y == 1 && x < DBL_EPSILON * DBL_EPSILON / 128) { + /* + * fp is of order x^2, and fm = x/2. + * A = 1 (inexactly). + */ + *rx = sqrt(x); + } else if (x >= DBL_EPSILON * fabs(y - 1)) { + /* + * Underflow will not occur because + * x >= DBL_EPSILON^2/128 >= FOUR_SQRT_MIN + */ + Am1 = f(x, 1 + y, R) + f(x, 1 - y, S); + *rx = log1p(Am1 + sqrt(Am1 * (A + 1))); + } else if (y < 1) { + /* + * fp = x*x/(1+y)/4, fm = x*x/(1-y)/4, and + * A = 1 (inexactly). + */ + *rx = x / sqrt((1 - y) * (1 + y)); + } else { /* if (y > 1) */ + /* + * A-1 = y-1 (inexactly). + */ + *rx = log1p((y - 1) + sqrt((y - 1) * (y + 1))); + } + } else { + *rx = log(A + sqrt(A * A - 1)); + } + + *new_y = y; + + if (y < FOUR_SQRT_MIN) { + /* + * Avoid a possible underflow caused by y/A. For casinh this + * would be legitimate, but will be picked up by invoking atan2 + * later on. For cacos this would not be legitimate. + */ + *B_is_usable = 0; + *sqrt_A2my2 = A * (2 / DBL_EPSILON); + *new_y = y * (2 / DBL_EPSILON); + return; + } + + /* B = (|z+I| - |z-I|) / 2 = y/A */ + *B = y / A; + *B_is_usable = 1; + + if (*B > B_crossover) { + *B_is_usable = 0; + /* + * Amy = fp + fm, where fp = f(x, y+1), and fm = f(x, y-1). + * sqrt_A2my2 = sqrt(Amy*(A+y)) + */ + if (y == 1 && x < DBL_EPSILON / 128) { + /* + * fp is of order x^2, and fm = x/2. + * A = 1 (inexactly). + */ + *sqrt_A2my2 = sqrt(x) * sqrt((A + y) / 2); + } else if (x >= DBL_EPSILON * fabs(y - 1)) { + /* + * Underflow will not occur because + * x >= DBL_EPSILON/128 >= FOUR_SQRT_MIN + * and + * x >= DBL_EPSILON^2 >= FOUR_SQRT_MIN + */ + Amy = f(x, y + 1, R) + f(x, y - 1, S); + *sqrt_A2my2 = sqrt(Amy * (A + y)); + } else if (y > 1) { + /* + * fp = x*x/(y+1)/4, fm = x*x/(y-1)/4, and + * A = y (inexactly). + * + * y < RECIP_EPSILON. So the following + * scaling should avoid any underflow problems. + */ + *sqrt_A2my2 = + x * (4 / DBL_EPSILON / DBL_EPSILON) * y / sqrt((y + 1) * (y - 1)); + *new_y = y * (4 / DBL_EPSILON / DBL_EPSILON); + } else { /* if (y < 1) */ + /* + * fm = 1-y >= DBL_EPSILON, fp is of order x^2, and + * A = 1 (inexactly). + */ + *sqrt_A2my2 = sqrt((1 - y) * (1 + y)); + } + } +} + +/* + * casinh(z) = z + O(z^3) as z -> 0 + * + * casinh(z) = sign(x)*clog(sign(x)*z) + O(1/z^2) as z -> infinity + * The above formula works for the imaginary part as well, because + * Im(casinh(z)) = sign(x)*atan2(sign(x)*y, fabs(x)) + O(y/z^3) + * as z -> infinity, uniformly in y + */ +__host__ __device__ inline complex casinh(complex z) { + double x, y, ax, ay, rx, ry, B, sqrt_A2my2, new_y; + int B_is_usable; + complex w; + const double RECIP_EPSILON = 1.0 / DBL_EPSILON; + const double m_ln2 = 6.9314718055994531e-1; /* 0x162e42fefa39ef.0p-53 */ + x = z.real(); + y = z.imag(); + ax = fabs(x); + ay = fabs(y); + + if (isnan(x) || isnan(y)) { + /* casinh(+-Inf + I*NaN) = +-Inf + I*NaN */ + if (isinf(x)) return (complex(x, y + y)); + /* casinh(NaN + I*+-Inf) = opt(+-)Inf + I*NaN */ + if (isinf(y)) return (complex(y, x + x)); + /* casinh(NaN + I*0) = NaN + I*0 */ + if (y == 0) return (complex(x + x, y)); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (complex(x + 0.0 + (y + 0.0), x + 0.0 + (y + 0.0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + /* clog...() will raise inexact unless x or y is infinite. */ + if (signbit(x) == 0) + w = clog_for_large_values(z) + m_ln2; + else + w = clog_for_large_values(-z) + m_ln2; + return (complex(copysign(w.real(), x), copysign(w.imag(), y))); + } + + /* Avoid spuriously raising inexact for z = 0. */ + if (x == 0 && y == 0) return (z); + + /* All remaining cases are inexact. */ + raise_inexact(); + + const double SQRT_6_EPSILON = + 3.6500241499888571e-8; /* 0x13988e1409212e.0p-77 */ + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) return (z); + + do_hard_work(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y); + if (B_is_usable) + ry = asin(B); + else + ry = atan2(new_y, sqrt_A2my2); + return (complex(copysign(rx, x), copysign(ry, y))); +} + +/* + * casin(z) = reverse(casinh(reverse(z))) + * where reverse(x + I*y) = y + I*x = I*conj(z). + */ +__host__ __device__ inline complex casin(complex z) { + complex w = casinh(complex(z.imag(), z.real())); + + return (complex(w.imag(), w.real())); +} + +/* + * cacos(z) = PI/2 - casin(z) + * but do the computation carefully so cacos(z) is accurate when z is + * close to 1. + * + * cacos(z) = PI/2 - z + O(z^3) as z -> 0 + * + * cacos(z) = -sign(y)*I*clog(z) + O(1/z^2) as z -> infinity + * The above formula works for the real part as well, because + * Re(cacos(z)) = atan2(fabs(y), x) + O(y/z^3) + * as z -> infinity, uniformly in y + */ +__host__ __device__ inline complex cacos(complex z) { + double x, y, ax, ay, rx, ry, B, sqrt_A2mx2, new_x; + int sx, sy; + int B_is_usable; + complex w; + const double pio2_hi = 1.5707963267948966e0; /* 0x1921fb54442d18.0p-52 */ + const volatile double pio2_lo = + 6.1232339957367659e-17; /* 0x11a62633145c07.0p-106 */ + const double m_ln2 = 6.9314718055994531e-1; /* 0x162e42fefa39ef.0p-53 */ + + x = z.real(); + y = z.imag(); + sx = signbit(x); + sy = signbit(y); + ax = fabs(x); + ay = fabs(y); + + if (isnan(x) || isnan(y)) { + /* cacos(+-Inf + I*NaN) = NaN + I*opt(-)Inf */ + if (isinf(x)) return (complex(y + y, -infinity())); + /* cacos(NaN + I*+-Inf) = NaN + I*-+Inf */ + if (isinf(y)) return (complex(x + x, -y)); + /* cacos(0 + I*NaN) = PI/2 + I*NaN with inexact */ + if (x == 0) return (complex(pio2_hi + pio2_lo, y + y)); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (complex(x + 0.0 + (y + 0), x + 0.0 + (y + 0))); + } + + const double RECIP_EPSILON = 1.0 / DBL_EPSILON; + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + /* clog...() will raise inexact unless x or y is infinite. */ + w = clog_for_large_values(z); + rx = fabs(w.imag()); + ry = w.real() + m_ln2; + if (sy == 0) ry = -ry; + return (complex(rx, ry)); + } + + /* Avoid spuriously raising inexact for z = 1. */ + if (x == 1.0 && y == 0.0) return (complex(0, -y)); + + /* All remaining cases are inexact. */ + raise_inexact(); + + const double SQRT_6_EPSILON = + 3.6500241499888571e-8; /* 0x13988e1409212e.0p-77 */ + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (complex(pio2_hi - (x - pio2_lo), -y)); + + do_hard_work(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x); + if (B_is_usable) { + if (sx == 0) + rx = acos(B); + else + rx = acos(-B); + } else { + if (sx == 0) + rx = atan2(sqrt_A2mx2, new_x); + else + rx = atan2(sqrt_A2mx2, -new_x); + } + if (sy == 0) ry = -ry; + return (complex(rx, ry)); +} + +/* + * cacosh(z) = I*cacos(z) or -I*cacos(z) + * where the sign is chosen so Re(cacosh(z)) >= 0. + */ +__host__ __device__ inline complex cacosh(complex z) { + complex w; + double rx, ry; + + w = cacos(z); + rx = w.real(); + ry = w.imag(); + /* cacosh(NaN + I*NaN) = NaN + I*NaN */ + if (isnan(rx) && isnan(ry)) return (complex(ry, rx)); + /* cacosh(NaN + I*+-Inf) = +Inf + I*NaN */ + /* cacosh(+-Inf + I*NaN) = +Inf + I*NaN */ + if (isnan(rx)) return (complex(fabs(ry), rx)); + /* cacosh(0 + I*NaN) = NaN + I*NaN */ + if (isnan(ry)) return (complex(ry, ry)); + return (complex(fabs(ry), copysign(rx, z.imag()))); +} + +/* + * Optimized version of clog() for |z| finite and larger than ~RECIP_EPSILON. + */ +__host__ __device__ inline complex clog_for_large_values(complex z) { + double x, y; + double ax, ay, t; + const double m_e = 2.7182818284590452e0; /* 0x15bf0a8b145769.0p-51 */ + + x = z.real(); + y = z.imag(); + ax = fabs(x); + ay = fabs(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + /* + * Avoid overflow in hypot() when x and y are both very large. + * Divide x and y by E, and then add 1 to the logarithm. This depends + * on E being larger than sqrt(2). + * Dividing by E causes an insignificant loss of accuracy; however + * this method is still poor since it is unnecessarily slow. + */ + if (ax > DBL_MAX / 2) + return (complex(log(hypot(x / m_e, y / m_e)) + 1, atan2(y, x))); + + /* + * Avoid overflow when x or y is large. Avoid underflow when x or + * y is small. + */ + const double QUARTER_SQRT_MAX = + 5.966672584960165394632772e-154; /* = 0x1p509; <= sqrt(DBL_MAX) / 4 */ + const double SQRT_MIN = + 1.491668146240041348658193e-154; /* = 0x1p-511; >= sqrt(DBL_MIN) */ + if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) + return (complex(log(hypot(x, y)), atan2(y, x))); + + return (complex(log(ax * ax + ay * ay) / 2, atan2(y, x))); +} + +/* + * ================= + * | catanh, catan | + * ================= + */ + +/* + * sum_squares(x,y) = x*x + y*y (or just x*x if y*y would underflow). + * Assumes x*x and y*y will not overflow. + * Assumes x and y are finite. + * Assumes y is non-negative. + * Assumes fabs(x) >= DBL_EPSILON. + */ +__host__ __device__ inline double sum_squares(double x, double y) { + const double SQRT_MIN = + 1.491668146240041348658193e-154; /* = 0x1p-511; >= sqrt(DBL_MIN) */ + /* Avoid underflow when y is small. */ + if (y < SQRT_MIN) return (x * x); + + return (x * x + y * y); +} + +/* + * real_part_reciprocal(x, y) = Re(1/(x+I*y)) = x/(x*x + y*y). + * Assumes x and y are not NaN, and one of x and y is larger than + * RECIP_EPSILON. We avoid unwarranted underflow. It is important to not use + * the code creal(1/z), because the imaginary part may produce an unwanted + * underflow. + * This is only called in a context where inexact is always raised before + * the call, so no effort is made to avoid or force inexact. + */ +__host__ __device__ inline double real_part_reciprocal(double x, double y) { + double scale; + uint32_t hx, hy; + int32_t ix, iy; + + /* + * This code is inspired by the C99 document n1124.pdf, Section G.5.1, + * example 2. + */ + get_high_word(hx, x); + ix = hx & 0x7ff00000; + get_high_word(hy, y); + iy = hy & 0x7ff00000; + //#define BIAS (DBL_MAX_EXP - 1) + const int BIAS = DBL_MAX_EXP - 1; + /* XXX more guard digits are useful iff there is extra precision. */ + //#define CUTOFF (DBL_MANT_DIG / 2 + 1) /* just half or 1 guard digit */ + const int CUTOFF = (DBL_MANT_DIG / 2 + 1); + if (ix - iy >= CUTOFF << 20 || isinf(x)) + return (1 / x); /* +-Inf -> +-0 is special */ + if (iy - ix >= CUTOFF << 20) + return (x / y / y); /* should avoid double div, but hard */ + if (ix <= (BIAS + DBL_MAX_EXP / 2 - CUTOFF) << 20) + return (x / (x * x + y * y)); + scale = 1; + set_high_word(scale, 0x7ff00000 - ix); /* 2**(1-ilogb(x)) */ + x *= scale; + y *= scale; + return (x / (x * x + y * y) * scale); +} + +/* + * catanh(z) = log((1+z)/(1-z)) / 2 + * = log1p(4*x / |z-1|^2) / 4 + * + I * atan2(2*y, (1-x)*(1+x)-y*y) / 2 + * + * catanh(z) = z + O(z^3) as z -> 0 + * + * catanh(z) = 1/z + sign(y)*I*PI/2 + O(1/z^3) as z -> infinity + * The above formula works for the real part as well, because + * Re(catanh(z)) = x/|z|^2 + O(x/z^4) + * as z -> infinity, uniformly in x + */ +#if __cplusplus >= 201103L || !defined _MSC_VER +__host__ __device__ inline complex catanh(complex z) { + double x, y, ax, ay, rx, ry; + const volatile double pio2_lo = + 6.1232339957367659e-17; /* 0x11a62633145c07.0p-106 */ + const double pio2_hi = 1.5707963267948966e0; /* 0x1921fb54442d18.0p-52 */ + + x = z.real(); + y = z.imag(); + ax = fabs(x); + ay = fabs(y); + + /* This helps handle many cases. */ + if (y == 0 && ax <= 1) return (complex(atanh(x), y)); + + /* To ensure the same accuracy as atan(), and to filter out z = 0. */ + if (x == 0) return (complex(x, atan(y))); + + if (isnan(x) || isnan(y)) { + /* catanh(+-Inf + I*NaN) = +-0 + I*NaN */ + if (isinf(x)) return (complex(copysign(0.0, x), y + y)); + /* catanh(NaN + I*+-Inf) = sign(NaN)0 + I*+-PI/2 */ + if (isinf(y)) + return ( + complex(copysign(0.0, x), copysign(pio2_hi + pio2_lo, y))); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (complex(x + 0.0 + (y + 0), x + 0.0 + (y + 0))); + } + + const double RECIP_EPSILON = 1.0 / DBL_EPSILON; + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) + return (complex(real_part_reciprocal(x, y), + copysign(pio2_hi + pio2_lo, y))); + + const double SQRT_3_EPSILON = + 2.5809568279517849e-8; /* 0x1bb67ae8584caa.0p-78 */ + if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) { + /* + * z = 0 was filtered out above. All other cases must raise + * inexact, but this is the only only that needs to do it + * explicitly. + */ + raise_inexact(); + return (z); + } + + const double m_ln2 = 6.9314718055994531e-1; /* 0x162e42fefa39ef.0p-53 */ + if (ax == 1 && ay < DBL_EPSILON) + rx = (m_ln2 - log(ay)) / 2; + else + rx = log1p(4 * ax / sum_squares(ax - 1, ay)) / 4; + + if (ax == 1) + ry = atan2(2.0, -ay) / 2; + else if (ay < DBL_EPSILON) + ry = atan2(2 * ay, (1 - ax) * (1 + ax)) / 2; + else + ry = atan2(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2; + + return (complex(copysign(rx, x), copysign(ry, y))); +} + +/* + * catan(z) = reverse(catanh(reverse(z))) + * where reverse(x + I*y) = y + I*x = I*conj(z). + */ +__host__ __device__ inline complex catan(complex z) { + complex w = catanh(complex(z.imag(), z.real())); + return (complex(w.imag(), w.real())); +} + +#endif + +} // namespace complex + +} // namespace detail + +template +__host__ __device__ inline complex acos(const complex& z) { + const complex ret = thrust::asin(z); + const ValueType pi = ValueType(3.14159265358979323846); + return complex(pi / 2 - ret.real(), -ret.imag()); +} + +template +__host__ __device__ inline complex asin(const complex& z) { + const complex i(0, 1); + return -i * asinh(i * z); +} + +template +__host__ __device__ inline complex atan(const complex& z) { + const complex i(0, 1); + return -i * thrust::atanh(i * z); +} + +template +__host__ __device__ inline complex acosh(const complex& z) { + thrust::complex ret( + (z.real() - z.imag()) * (z.real() + z.imag()) - ValueType(1.0), + ValueType(2.0) * z.real() * z.imag()); + ret = thrust::sqrt(ret); + if (z.real() < ValueType(0.0)) { + ret = -ret; + } + ret += z; + ret = thrust::log(ret); + if (ret.real() < ValueType(0.0)) { + ret = -ret; + } + return ret; +} + +template +__host__ __device__ inline complex asinh(const complex& z) { + return thrust::log(thrust::sqrt(z * z + ValueType(1)) + z); +} + +template +__host__ __device__ inline complex atanh(const complex& z) { + ValueType imag2 = z.imag() * z.imag(); + ValueType n = ValueType(1.0) + z.real(); + n = imag2 + n * n; + + ValueType d = ValueType(1.0) - z.real(); + d = imag2 + d * d; + complex ret(ValueType(0.25) * (::log(n) - ::log(d)), 0); + + d = ValueType(1.0) - z.real() * z.real() - imag2; + + ret.imag(ValueType(0.5) * ::atan2(ValueType(2.0) * z.imag(), d)); + return ret; +} + +template <> +__host__ __device__ inline complex acos(const complex& z) { + return detail::complex::cacos(z); +} + +template <> +__host__ __device__ inline complex asin(const complex& z) { + return detail::complex::casin(z); +} + +#if __cplusplus >= 201103L || !defined _MSC_VER +template <> +__host__ __device__ inline complex atan(const complex& z) { + return detail::complex::catan(z); +} +#endif + +template <> +__host__ __device__ inline complex acosh(const complex& z) { + return detail::complex::cacosh(z); +} + +template <> +__host__ __device__ inline complex asinh(const complex& z) { + return detail::complex::casinh(z); +} + +#if __cplusplus >= 201103L || !defined _MSC_VER +template <> +__host__ __device__ inline complex atanh(const complex& z) { + return detail::complex::catanh(z); +} +#endif + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrigf.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrigf.h new file mode 100644 index 0000000000000000000000000000000000000000..bf834e5888110f7a025624102b43f821c60ee8f3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/catrigf.h @@ -0,0 +1,446 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Adapted from FreeBSD by Filipe Maia : + * freebsd/lib/msun/src/catrig.c + */ + + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline complex clog_for_large_values(complex z); + +/* + * The algorithm is very close to that in "Implementing the complex arcsine + * and arccosine functions using exception handling" by T. E. Hull, Thomas F. + * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on + * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335, + * http://dl.acm.org/citation.cfm?id=275324. + * + * See catrig.c for complete comments. + * + * XXX comments were removed automatically, and even short ones on the right + * of statements were removed (all of them), contrary to normal style. Only + * a few comments on the right of declarations remain. + */ + +__host__ __device__ inline float f(float a, float b, float hypot_a_b) { + if (b < 0.0f) return ((hypot_a_b - b) / 2.0f); + if (b == 0.0f) return (a / 2.0f); + return (a * a / (hypot_a_b + b) / 2.0f); +} + +/* + * All the hard work is contained in this function. + * x and y are assumed positive or zero, and less than RECIP_EPSILON. + * Upon return: + * rx = Re(casinh(z)) = -Im(cacos(y + I*x)). + * B_is_usable is set to 1 if the value of B is usable. + * If B_is_usable is set to 0, sqrt_A2my2 = sqrt(A*A - y*y), and new_y = y. + * If returning sqrt_A2my2 has potential to result in an underflow, it is + * rescaled, and new_y is similarly rescaled. + */ +__host__ __device__ inline void do_hard_work(float x, float y, float* rx, + int* B_is_usable, float* B, + float* sqrt_A2my2, float* new_y) { + float R, S, A; /* A, B, R, and S are as in Hull et al. */ + float Am1, Amy; /* A-1, A-y. */ + const float A_crossover = + 10; /* Hull et al suggest 1.5, but 10 works better */ + const float FOUR_SQRT_MIN = 4.336808689942017736029811e-19f; + ; /* =0x1p-61; >= 4 * sqrt(FLT_MIN) */ + const float B_crossover = 0.6417f; /* suggested by Hull et al */ + R = hypotf(x, y + 1); + S = hypotf(x, y - 1); + + A = (R + S) / 2; + if (A < 1) A = 1; + + if (A < A_crossover) { + if (y == 1 && x < FLT_EPSILON * FLT_EPSILON / 128) { + *rx = sqrtf(x); + } else if (x >= FLT_EPSILON * fabsf(y - 1)) { + Am1 = f(x, 1 + y, R) + f(x, 1 - y, S); + *rx = log1pf(Am1 + sqrtf(Am1 * (A + 1))); + } else if (y < 1) { + *rx = x / sqrtf((1 - y) * (1 + y)); + } else { + *rx = log1pf((y - 1) + sqrtf((y - 1) * (y + 1))); + } + } else { + *rx = logf(A + sqrtf(A * A - 1)); + } + + *new_y = y; + + if (y < FOUR_SQRT_MIN) { + *B_is_usable = 0; + *sqrt_A2my2 = A * (2 / FLT_EPSILON); + *new_y = y * (2 / FLT_EPSILON); + return; + } + + *B = y / A; + *B_is_usable = 1; + + if (*B > B_crossover) { + *B_is_usable = 0; + if (y == 1 && x < FLT_EPSILON / 128) { + *sqrt_A2my2 = sqrtf(x) * sqrtf((A + y) / 2); + } else if (x >= FLT_EPSILON * fabsf(y - 1)) { + Amy = f(x, y + 1, R) + f(x, y - 1, S); + *sqrt_A2my2 = sqrtf(Amy * (A + y)); + } else if (y > 1) { + *sqrt_A2my2 = + x * (4 / FLT_EPSILON / FLT_EPSILON) * y / sqrtf((y + 1) * (y - 1)); + *new_y = y * (4 / FLT_EPSILON / FLT_EPSILON); + } else { + *sqrt_A2my2 = sqrtf((1 - y) * (1 + y)); + } + } +} + +__host__ __device__ inline complex casinhf(complex z) { + float x, y, ax, ay, rx, ry, B, sqrt_A2my2, new_y; + int B_is_usable; + complex w; + const float RECIP_EPSILON = 1.0 / FLT_EPSILON; + const float m_ln2 = 6.9314718055994531e-1f; /* 0x162e42fefa39ef.0p-53 */ + x = z.real(); + y = z.imag(); + ax = fabsf(x); + ay = fabsf(y); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) return (complex(x, y + y)); + if (isinf(y)) return (complex(y, x + x)); + if (y == 0) return (complex(x + x, y)); + return (complex(x + 0.0f + (y + 0), x + 0.0f + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + if (signbit(x) == 0) + w = clog_for_large_values(z) + m_ln2; + else + w = clog_for_large_values(-z) + m_ln2; + return (complex(copysignf(w.real(), x), copysignf(w.imag(), y))); + } + + if (x == 0 && y == 0) return (z); + + raise_inexact(); + + const float SQRT_6_EPSILON = 8.4572793338e-4f; /* 0xddb3d7.0p-34 */ + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) return (z); + + do_hard_work(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y); + if (B_is_usable) + ry = asinf(B); + else + ry = atan2f(new_y, sqrt_A2my2); + return (complex(copysignf(rx, x), copysignf(ry, y))); +} + +__host__ __device__ inline complex casinf(complex z) { + complex w = casinhf(complex(z.imag(), z.real())); + + return (complex(w.imag(), w.real())); +} + +__host__ __device__ inline complex cacosf(complex z) { + float x, y, ax, ay, rx, ry, B, sqrt_A2mx2, new_x; + int sx, sy; + int B_is_usable; + complex w; + const float pio2_hi = 1.5707963267948966e0f; /* 0x1921fb54442d18.0p-52 */ + const volatile float pio2_lo = + 6.1232339957367659e-17f; /* 0x11a62633145c07.0p-106 */ + const float m_ln2 = 6.9314718055994531e-1f; /* 0x162e42fefa39ef.0p-53 */ + + x = z.real(); + y = z.imag(); + sx = signbit(x); + sy = signbit(y); + ax = fabsf(x); + ay = fabsf(y); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) return (complex(y + y, -infinity())); + if (isinf(y)) return (complex(x + x, -y)); + if (x == 0) return (complex(pio2_hi + pio2_lo, y + y)); + return (complex(x + 0.0f + (y + 0), x + 0.0f + (y + 0))); + } + + const float RECIP_EPSILON = 1.0 / FLT_EPSILON; + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + w = clog_for_large_values(z); + rx = fabsf(w.imag()); + ry = w.real() + m_ln2; + if (sy == 0) ry = -ry; + return (complex(rx, ry)); + } + + if (x == 1 && y == 0) return (complex(0, -y)); + + raise_inexact(); + + const float SQRT_6_EPSILON = 8.4572793338e-4f; /* 0xddb3d7.0p-34 */ + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (complex(pio2_hi - (x - pio2_lo), -y)); + + do_hard_work(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x); + if (B_is_usable) { + if (sx == 0) + rx = acosf(B); + else + rx = acosf(-B); + } else { + if (sx == 0) + rx = atan2f(sqrt_A2mx2, new_x); + else + rx = atan2f(sqrt_A2mx2, -new_x); + } + if (sy == 0) ry = -ry; + return (complex(rx, ry)); +} + +__host__ __device__ inline complex cacoshf(complex z) { + complex w; + float rx, ry; + + w = cacosf(z); + rx = w.real(); + ry = w.imag(); + /* cacosh(NaN + I*NaN) = NaN + I*NaN */ + if (isnan(rx) && isnan(ry)) return (complex(ry, rx)); + /* cacosh(NaN + I*+-Inf) = +Inf + I*NaN */ + /* cacosh(+-Inf + I*NaN) = +Inf + I*NaN */ + if (isnan(rx)) return (complex(fabsf(ry), rx)); + /* cacosh(0 + I*NaN) = NaN + I*NaN */ + if (isnan(ry)) return (complex(ry, ry)); + return (complex(fabsf(ry), copysignf(rx, z.imag()))); +} + +/* + * Optimized version of clog() for |z| finite and larger than ~RECIP_EPSILON. + */ +__host__ __device__ inline complex clog_for_large_values(complex z) { + float x, y; + float ax, ay, t; + const float m_e = 2.7182818284590452e0f; /* 0x15bf0a8b145769.0p-51 */ + + x = z.real(); + y = z.imag(); + ax = fabsf(x); + ay = fabsf(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + if (ax > FLT_MAX / 2) + return (complex(logf(hypotf(x / m_e, y / m_e)) + 1, atan2f(y, x))); + + const float QUARTER_SQRT_MAX = + 2.3058430092136939520000000e+18f; /* = 0x1p61; <= sqrt(FLT_MAX) / 4 */ + const float SQRT_MIN = + 1.084202172485504434007453e-19f; /* 0x1p-63; >= sqrt(FLT_MIN) */ + if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) + return (complex(logf(hypotf(x, y)), atan2f(y, x))); + + return (complex(logf(ax * ax + ay * ay) / 2, atan2f(y, x))); +} + +/* + * ================= + * | catanh, catan | + * ================= + */ + +/* + * sum_squares(x,y) = x*x + y*y (or just x*x if y*y would underflow). + * Assumes x*x and y*y will not overflow. + * Assumes x and y are finite. + * Assumes y is non-negative. + * Assumes fabsf(x) >= FLT_EPSILON. + */ +__host__ __device__ inline float sum_squares(float x, float y) { + const float SQRT_MIN = + 1.084202172485504434007453e-19f; /* 0x1p-63; >= sqrt(FLT_MIN) */ + /* Avoid underflow when y is small. */ + if (y < SQRT_MIN) return (x * x); + + return (x * x + y * y); +} + +__host__ __device__ inline float real_part_reciprocal(float x, float y) { + float scale; + uint32_t hx, hy; + int32_t ix, iy; + + get_float_word(hx, x); + ix = hx & 0x7f800000; + get_float_word(hy, y); + iy = hy & 0x7f800000; + //#define BIAS (FLT_MAX_EXP - 1) + const int BIAS = FLT_MAX_EXP - 1; + //#define CUTOFF (FLT_MANT_DIG / 2 + 1) + const int CUTOFF = (FLT_MANT_DIG / 2 + 1); + if (ix - iy >= CUTOFF << 23 || isinf(x)) return (1 / x); + if (iy - ix >= CUTOFF << 23) return (x / y / y); + if (ix <= (BIAS + FLT_MAX_EXP / 2 - CUTOFF) << 23) + return (x / (x * x + y * y)); + set_float_word(scale, 0x7f800000 - ix); + x *= scale; + y *= scale; + return (x / (x * x + y * y) * scale); +} + +#if __cplusplus >= 201103L || !defined _MSC_VER +__host__ __device__ inline complex catanhf(complex z) { + float x, y, ax, ay, rx, ry; + const volatile float pio2_lo = + 6.1232339957367659e-17; /* 0x11a62633145c07.0p-106 */ + const float pio2_hi = 1.5707963267948966e0; /* 0x1921fb54442d18.0p-52 */ + + x = z.real(); + y = z.imag(); + ax = fabsf(x); + ay = fabsf(y); + + if (y == 0 && ax <= 1) return (complex(atanhf(x), y)); + + if (x == 0) return (complex(x, atanf(y))); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) return (complex(copysignf(0, x), y + y)); + if (isinf(y)) + return (complex(copysignf(0, x), copysignf(pio2_hi + pio2_lo, y))); + return (complex(x + 0.0f + (y + 0.0f), x + 0.0f + (y + 0.0f))); + } + + const float RECIP_EPSILON = 1.0f / FLT_EPSILON; + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) + return (complex(real_part_reciprocal(x, y), + copysignf(pio2_hi + pio2_lo, y))); + + const float SQRT_3_EPSILON = 5.9801995673e-4; /* 0x9cc471.0p-34 */ + if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) { + raise_inexact(); + return (z); + } + + const float m_ln2 = 6.9314718056e-1f; /* 0xb17218.0p-24 */ + if (ax == 1 && ay < FLT_EPSILON) + rx = (m_ln2 - logf(ay)) / 2; + else + rx = log1pf(4 * ax / sum_squares(ax - 1, ay)) / 4; + + if (ax == 1) + ry = atan2f(2, -ay) / 2; + else if (ay < FLT_EPSILON) + ry = atan2f(2 * ay, (1 - ax) * (1 + ax)) / 2; + else + ry = atan2f(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2; + + return (complex(copysignf(rx, x), copysignf(ry, y))); +} + +__host__ __device__ inline complex catanf(complex z) { + complex w = catanhf(complex(z.imag(), z.real())); + return (complex(w.imag(), w.real())); +} +#endif + +} // namespace complex + +} // namespace detail + +template <> +__host__ __device__ inline complex acos(const complex& z) { + return detail::complex::cacosf(z); +} + +template <> +__host__ __device__ inline complex asin(const complex& z) { + return detail::complex::casinf(z); +} + +#if __cplusplus >= 201103L || !defined _MSC_VER +template <> +__host__ __device__ inline complex atan(const complex& z) { + return detail::complex::catanf(z); +} +#endif + +template <> +__host__ __device__ inline complex acosh(const complex& z) { + return detail::complex::cacoshf(z); +} + +template <> +__host__ __device__ inline complex asinh(const complex& z) { + return detail::complex::casinhf(z); +} + +#if __cplusplus >= 201103L || !defined _MSC_VER +template <> +__host__ __device__ inline complex atanh(const complex& z) { + return detail::complex::catanhf(z); +} +#endif + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexp.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexp.h new file mode 100644 index 0000000000000000000000000000000000000000..852c14caa3eab951858a460f62901885efca44cd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexp.h @@ -0,0 +1,175 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2011 David Schultz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* adapted from FreeBSD: + * lib/msun/src/s_cexp.c + * lib/msun/src/k_exp.c + * + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +/* + * Compute exp(x), scaled to avoid spurious overflow. An exponent is + * returned separately in 'expt'. + * + * Input: ln(DBL_MAX) <= x < ln(2 * DBL_MAX / DBL_MIN_DENORM) ~= 1454.91 + * Output: 2**1023 <= y < 2**1024 + */ +__host__ __device__ inline double frexp_exp(double x, int* expt) { + const uint32_t k = 1799; /* constant for reduction */ + const double kln2 = 1246.97177782734161156; /* k * ln2 */ + + double exp_x; + uint32_t hx; + + /* + * We use exp(x) = exp(x - kln2) * 2**k, carefully chosen to + * minimize |exp(kln2) - 2**k|. We also scale the exponent of + * exp_x to MAX_EXP so that the result can be multiplied by + * a tiny number without losing accuracy due to denormalization. + */ + exp_x = exp(x - kln2); + get_high_word(hx, exp_x); + *expt = (hx >> 20) - (0x3ff + 1023) + k; + set_high_word(exp_x, (hx & 0xfffff) | ((0x3ff + 1023) << 20)); + return (exp_x); +} + +__host__ __device__ inline complex ldexp_cexp(complex z, int expt) { + double x, y, exp_x, scale1, scale2; + int ex_expt, half_expt; + + x = z.real(); + y = z.imag(); + exp_x = frexp_exp(x, &ex_expt); + expt += ex_expt; + + /* + * Arrange so that scale1 * scale2 == 2**expt. We use this to + * compensate for scalbn being horrendously slow. + */ + half_expt = expt / 2; + insert_words(scale1, (0x3ff + half_expt) << 20, 0); + half_expt = expt - half_expt; + insert_words(scale2, (0x3ff + half_expt) << 20, 0); + + return (complex(::cos(y) * exp_x * scale1 * scale2, + ::sin(y) * exp_x * scale1 * scale2)); +} + +__host__ __device__ inline complex cexp(const complex& z) { + double x, y, exp_x; + uint32_t hx, hy, lx, ly; + + const uint32_t exp_ovfl = 0x40862e42, /* high bits of MAX_EXP * ln2 ~= 710 */ + cexp_ovfl = 0x4096b8e4; /* (MAX_EXP - MIN_DENORM_EXP) * ln2 */ + + x = z.real(); + y = z.imag(); + + extract_words(hy, ly, y); + hy &= 0x7fffffff; + + /* cexp(x + I 0) = exp(x) + I 0 */ + if ((hy | ly) == 0) return (complex(exp(x), y)); + extract_words(hx, lx, x); + /* cexp(0 + I y) = cos(y) + I sin(y) */ + if (((hx & 0x7fffffff) | lx) == 0) return (complex(cos(y), sin(y))); + + if (hy >= 0x7ff00000) { + if (lx != 0 || (hx & 0x7fffffff) != 0x7ff00000) { + /* cexp(finite|NaN +- I Inf|NaN) = NaN + I NaN */ + return (complex(y - y, y - y)); + } else if (hx & 0x80000000) { + /* cexp(-Inf +- I Inf|NaN) = 0 + I 0 */ + return (complex(0.0, 0.0)); + } else { + /* cexp(+Inf +- I Inf|NaN) = Inf + I NaN */ + return (complex(x, y - y)); + } + } + + if (hx >= exp_ovfl && hx <= cexp_ovfl) { + /* + * x is between 709.7 and 1454.3, so we must scale to avoid + * overflow in exp(x). + */ + return (ldexp_cexp(z, 0)); + } else { + /* + * Cases covered here: + * - x < exp_ovfl and exp(x) won't overflow (common case) + * - x > cexp_ovfl, so exp(x) * s overflows for all s > 0 + * - x = +-Inf (generated by exp()) + * - x = NaN (spurious inexact exception from y) + */ + exp_x = ::exp(x); + return (complex(exp_x * cos(y), exp_x * sin(y))); + } +} + +} // namespace complex + +} // namespace detail + +template +__host__ __device__ inline complex exp(const complex& z) { + return polar(::exp(z.real()), z.imag()); +} + +template <> +__host__ __device__ inline complex exp(const complex& z) { + return detail::complex::cexp(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexpf.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexpf.h new file mode 100644 index 0000000000000000000000000000000000000000..ec3a1a941d619de3101c011ec0f12aa4bd29729e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/cexpf.h @@ -0,0 +1,155 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2011 David Schultz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* adapted from FreeBSD: + * lib/msun/src/s_cexpf.c + * lib/msun/src/k_exp.c + * + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +__host__ __device__ inline float frexp_expf(float x, int* expt) { + const uint32_t k = 235; /* constant for reduction */ + const float kln2 = 162.88958740F; /* k * ln2 */ + + // should this be a double instead? + float exp_x; + uint32_t hx; + + exp_x = expf(x - kln2); + get_float_word(hx, exp_x); + *expt = (hx >> 23) - (0x7f + 127) + k; + set_float_word(exp_x, (hx & 0x7fffff) | ((0x7f + 127) << 23)); + return (exp_x); +} + +__host__ __device__ inline complex ldexp_cexpf(complex z, int expt) { + float x, y, exp_x, scale1, scale2; + int ex_expt, half_expt; + + x = z.real(); + y = z.imag(); + exp_x = frexp_expf(x, &ex_expt); + expt += ex_expt; + + half_expt = expt / 2; + set_float_word(scale1, (0x7f + half_expt) << 23); + half_expt = expt - half_expt; + set_float_word(scale2, (0x7f + half_expt) << 23); + + return (complex(cos(y) * exp_x * scale1 * scale2, + sin(y) * exp_x * scale1 * scale2)); +} + +__host__ __device__ inline complex cexpf(const complex& z) { + float x, y, exp_x; + uint32_t hx, hy; + + const uint32_t exp_ovfl = 0x42b17218, /* MAX_EXP * ln2 ~= 88.722839355 */ + cexp_ovfl = 0x43400074; /* (MAX_EXP - MIN_DENORM_EXP) * ln2 */ + + x = z.real(); + y = z.imag(); + + get_float_word(hy, y); + hy &= 0x7fffffff; + + /* cexp(x + I 0) = exp(x) + I 0 */ + if (hy == 0) return (complex(exp(x), y)); + get_float_word(hx, x); + /* cexp(0 + I y) = cos(y) + I sin(y) */ + if ((hx & 0x7fffffff) == 0) { + return (complex(cos(y), sin(y))); + } + if (hy >= 0x7f800000) { + if ((hx & 0x7fffffff) != 0x7f800000) { + /* cexp(finite|NaN +- I Inf|NaN) = NaN + I NaN */ + return (complex(y - y, y - y)); + } else if (hx & 0x80000000) { + /* cexp(-Inf +- I Inf|NaN) = 0 + I 0 */ + return (complex(0.0, 0.0)); + } else { + /* cexp(+Inf +- I Inf|NaN) = Inf + I NaN */ + return (complex(x, y - y)); + } + } + + if (hx >= exp_ovfl && hx <= cexp_ovfl) { + /* + * x is between 88.7 and 192, so we must scale to avoid + * overflow in expf(x). + */ + return (ldexp_cexpf(z, 0)); + } else { + /* + * Cases covered here: + * - x < exp_ovfl and exp(x) won't overflow (common case) + * - x > cexp_ovfl, so exp(x) * s overflows for all s > 0 + * - x = +-Inf (generated by exp()) + * - x = NaN (spurious inexact exception from y) + */ + exp_x = ::exp(x); + return (complex(exp_x * ::cos(y), exp_x * ::sin(y))); + } +} + +} // namespace complex + +} // namespace detail + +template <> +__host__ __device__ inline complex exp(const complex& z) { + return detail::complex::cexpf(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clog.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..711262628b9f800c215bd73c9eeab0e7b5d826b3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clog.h @@ -0,0 +1,205 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* adapted from FreeBSDs msun:*/ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +/* round down to 18 = 54/3 bits */ +__host__ __device__ inline double trim(double x) { + uint32_t hi; + get_high_word(hi, x); + insert_words(x, hi & 0xfffffff8, 0); + return x; +} + +__host__ __device__ inline complex clog(const complex& z) { + // Adapted from FreeBSDs msun + double x, y; + double ax, ay; + double x0, y0, x1, y1, x2, y2, t, hm1; + double val[12]; + int i, sorted; + const double e = 2.7182818284590452354; + + x = z.real(); + y = z.imag(); + + /* Handle NaNs using the general formula to mix them right. */ + if (x != x || y != y) { + return (complex(::log(norm(z)), ::atan2(y, x))); + } + + ax = ::abs(x); + ay = ::abs(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + /* + * To avoid unnecessary overflow, if x and y are very large, divide x + * and y by M_E, and then add 1 to the logarithm. This depends on + * M_E being larger than sqrt(2). + * There is a potential loss of accuracy caused by dividing by M_E, + * but this case should happen extremely rarely. + */ + // if (ay > 5e307){ + // For high values of ay -> hypotf(DBL_MAX,ay) = inf + // We expect that for values at or below ay = 5e307 this should not happen + if (ay > 5e307) { + return (complex(::log(hypot(x / e, y / e)) + 1.0, ::atan2(y, x))); + } + if (ax == 1.) { + if (ay < 1e-150) { + return (complex((ay * 0.5) * ay, ::atan2(y, x))); + } + return (complex(log1p(ay * ay) * 0.5, ::atan2(y, x))); + } + + /* + * Because atan2 and hypot conform to C99, this also covers all the + * edge cases when x or y are 0 or infinite. + */ + if (ax < 1e-50 || ay < 1e-50 || ax > 1e50 || ay > 1e50) { + return (complex(::log(hypot(x, y)), ::atan2(y, x))); + } + + /* + * From this point on, we don't need to worry about underflow or + * overflow in calculating ax*ax or ay*ay. + */ + + /* Some easy cases. */ + + if (ax >= 1.0) { + return (complex(log1p((ax - 1) * (ax + 1) + ay * ay) * 0.5, + atan2(y, x))); + } + + if (ax * ax + ay * ay <= 0.7) { + return (complex(::log(ax * ax + ay * ay) * 0.5, ::atan2(y, x))); + } + + /* + * Take extra care so that ULP of real part is small if hypot(x,y) is + * moderately close to 1. + */ + + x0 = trim(ax); + ax = ax - x0; + x1 = trim(ax); + x2 = ax - x1; + y0 = trim(ay); + ay = ay - y0; + y1 = trim(ay); + y2 = ay - y1; + + val[0] = x0 * x0; + val[1] = y0 * y0; + val[2] = 2 * x0 * x1; + val[3] = 2 * y0 * y1; + val[4] = x1 * x1; + val[5] = y1 * y1; + val[6] = 2 * x0 * x2; + val[7] = 2 * y0 * y2; + val[8] = 2 * x1 * x2; + val[9] = 2 * y1 * y2; + val[10] = x2 * x2; + val[11] = y2 * y2; + + /* Bubble sort. */ + + do { + sorted = 1; + for (i = 0; i < 11; i++) { + if (val[i] < val[i + 1]) { + sorted = 0; + t = val[i]; + val[i] = val[i + 1]; + val[i + 1] = t; + } + } + } while (!sorted); + + hm1 = -1; + for (i = 0; i < 12; i++) { + hm1 += val[i]; + } + return (complex(0.5 * log1p(hm1), atan2(y, x))); +} + +} // namespace complex + +} // namespace detail + +template +__host__ __device__ inline complex log(const complex& z) { + return complex(::log(thrust::abs(z)), thrust::arg(z)); +} + +template <> +__host__ __device__ inline complex log(const complex& z) { + return detail::complex::clog(z); +} + +template +__host__ __device__ inline complex log10(const complex& z) { + // Using the explicit literal prevents compile time warnings in + // devices that don't support doubles + return thrust::log(z) / ValueType(2.30258509299404568402); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clogf.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clogf.h new file mode 100644 index 0000000000000000000000000000000000000000..9fbd9950590bf6ba07592ee65fa677fabd58a599 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/clogf.h @@ -0,0 +1,194 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* adapted from FreeBSDs msun:*/ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +/* round down to 8 = 24/3 bits */ +__host__ __device__ inline float trim(float x) { + uint32_t hx; + get_float_word(hx, x); + hx &= 0xffff0000; + float ret; + set_float_word(ret, hx); + return ret; +} + +__host__ __device__ inline complex clogf(const complex& z) { + // Adapted from FreeBSDs msun + float x, y; + float ax, ay; + float x0, y0, x1, y1, x2, y2, t, hm1; + float val[12]; + int i, sorted; + const float e = 2.7182818284590452354f; + + x = z.real(); + y = z.imag(); + + /* Handle NaNs using the general formula to mix them right. */ + if (x != x || y != y) { + return (complex(::log(norm(z)), ::atan2(y, x))); + } + + ax = ::abs(x); + ay = ::abs(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + /* + * To avoid unnecessary overflow, if x and y are very large, divide x + * and y by M_E, and then add 1 to the logarithm. This depends on + * M_E being larger than sqrt(2). + * There is a potential loss of accuracy caused by dividing by M_E, + * but this case should happen extremely rarely. + */ + // For high values of ay -> hypotf(FLT_MAX,ay) = inf + // We expect that for values at or below ay = 1e34f this should not happen + if (ay > 1e34f) { + return (complex(::log(hypotf(x / e, y / e)) + 1.0f, ::atan2(y, x))); + } + if (ax == 1.f) { + if (ay < 1e-19f) { + return (complex((ay * 0.5f) * ay, ::atan2(y, x))); + } + return (complex(log1pf(ay * ay) * 0.5f, ::atan2(y, x))); + } + + /* + * Because atan2 and hypot conform to C99, this also covers all the + * edge cases when x or y are 0 or infinite. + */ + if (ax < 1e-6f || ay < 1e-6f || ax > 1e6f || ay > 1e6f) { + return (complex(::log(hypotf(x, y)), ::atan2(y, x))); + } + + /* + * From this point on, we don't need to worry about underflow or + * overflow in calculating ax*ax or ay*ay. + */ + + /* Some easy cases. */ + + if (ax >= 1.0f) { + return (complex(log1pf((ax - 1.f) * (ax + 1.f) + ay * ay) * 0.5f, + atan2(y, x))); + } + + if (ax * ax + ay * ay <= 0.7f) { + return (complex(::log(ax * ax + ay * ay) * 0.5f, ::atan2(y, x))); + } + + /* + * Take extra care so that ULP of real part is small if hypot(x,y) is + * moderately close to 1. + */ + + x0 = trim(ax); + ax = ax - x0; + x1 = trim(ax); + x2 = ax - x1; + y0 = trim(ay); + ay = ay - y0; + y1 = trim(ay); + y2 = ay - y1; + + val[0] = x0 * x0; + val[1] = y0 * y0; + val[2] = 2 * x0 * x1; + val[3] = 2 * y0 * y1; + val[4] = x1 * x1; + val[5] = y1 * y1; + val[6] = 2 * x0 * x2; + val[7] = 2 * y0 * y2; + val[8] = 2 * x1 * x2; + val[9] = 2 * y1 * y2; + val[10] = x2 * x2; + val[11] = y2 * y2; + + /* Bubble sort. */ + + do { + sorted = 1; + for (i = 0; i < 11; i++) { + if (val[i] < val[i + 1]) { + sorted = 0; + t = val[i]; + val[i] = val[i + 1]; + val[i + 1] = t; + } + } + } while (!sorted); + + hm1 = -1; + for (i = 0; i < 12; i++) { + hm1 += val[i]; + } + return (complex(0.5f * log1pf(hm1), atan2(y, x))); +} + +} // namespace complex + +} // namespace detail + +template <> +__host__ __device__ inline complex log(const complex& z) { + return detail::complex::clogf(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex.h new file mode 100644 index 0000000000000000000000000000000000000000..7f8e87ef2da55e443cccb48aa48e5b392abf62a3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex.h @@ -0,0 +1,676 @@ +/* Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file complex.h + * \brief Complex numbers + */ + +#pragma once + +#include + +THRUST_NAMESPACE_BEGIN + +template +struct _select_greater_type_impl { + typedef T type; +}; + +template +struct _select_greater_type_impl { + typedef U type; +}; + +template +struct _select_greater_type + : _select_greater_type_impl sizeof(U))> {}; + +/* + * Calls to the standard math library from inside the thrust namespace + * with real arguments require explicit scope otherwise they will fail + * to resolve as it will find the equivalent complex function but then + * fail to match the template, and give up looking for other scopes. + */ + +/*! \addtogroup numerics + * \{ + */ + +/*! \addtogroup complex_numbers Complex Numbers + * \{ + */ + +/*! \p complex is the Thrust equivalent to std::complex. It is + * functionally + * equivalent to it, but can also be used in device code which + * std::complex currently cannot. + * + * \tparam T The type used to hold the real and imaginary parts. Should be + * float + * or double. Others types are not supported. + * + */ +template +#if defined(__CUDACC__) +struct __align__(sizeof(T)*2) complex { +#else +// ROCm (hipcc) does not support `__align__` +struct complex { +#endif + public: + /*! \p value_type is the type of \p complex's real and imaginary parts. + */ + typedef T value_type; + + /* --- Constructors --- */ + + /*! Construct a complex number with an imaginary part of 0. + * + * \param re The real part of the number. + */ + inline __host__ __device__ complex(const T& re); + + /*! Construct a complex number from its real and imaginary parts. + * + * \param re The real part of the number. + * \param im The imaginary part of the number. + */ + inline __host__ __device__ complex(const T& re, const T& im); + +#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900) + /*! Default construct a complex number. + */ + inline complex() = default; + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + inline complex(const complex& z) = default; +#else + /*! Default construct a complex number. + */ + inline __host__ __device__ complex(); + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + inline __host__ __device__ complex(const complex& z); +#endif // c++11 + + /*! This copy constructor copies from a \p complex with a type that + * is convertible to this \p complex \c value_type. + * + * \param z The \p complex to copy from. + * + * \tparam X is convertible to \c value_type. + */ + template + inline __host__ __device__ complex(const complex& z); + + /* --- Assignment Operators --- */ + + /*! Assign `re` to the real part of this \p complex and set the imaginary part + * to 0. + * + * \param re The real part of the number. + */ + inline __host__ __device__ complex& operator=(const T& re); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + inline __host__ __device__ complex& operator=(const complex& z); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + inline __host__ __device__ complex& operator=(const complex& z); + + /* --- Compound Assignment Operators --- */ + + /*! Adds a \p complex to this \p complex and + * assigns the result to this \p complex. + * + * \param z The \p complex to be Added. + */ + __host__ __device__ inline complex& operator+=(const complex z); + + /*! Subtracts a \p complex from this \p complex and + * assigns the result to this \p complex. + * + * \param z The \p complex to be subtracted. + */ + __host__ __device__ inline complex& operator-=(const complex z); + + /*! Multiplies this \p complex by another \p complex and + * assigns the result to this \p complex. + * + * \param z The \p complex to be multiplied. + */ + __host__ __device__ inline complex& operator*=(const complex z); + + /*! Divides this \p complex by another \p complex and + * assigns the result to this \p complex. + * + * \param z The \p complex to be divided. + */ + __host__ __device__ inline complex& operator/=(const complex z); + + /* --- Getter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ inline T real() const volatile { return m_data[0]; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ inline T imag() const volatile { return m_data[1]; } + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ inline T real() const { return m_data[0]; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ inline T imag() const { return m_data[1]; } + + /* --- Setter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ inline void real(T re) volatile { m_data[0] = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex.e + */ + __host__ __device__ inline void imag(T im) volatile { m_data[1] = im; } + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ inline void real(T re) { m_data[0] = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex. + */ + __host__ __device__ inline void imag(T im) { m_data[1] = im; } + + private: + T m_data[2]; +}; + +/* --- General Functions --- */ + +/*! Returns the magnitude (also known as absolute value) of a \p complex. + * + * \param z The \p complex from which to calculate the absolute value. + */ +template +__host__ __device__ inline T abs(const complex& z); + +/*! Returns the phase angle (also known as argument) in radians of a \p complex. + * + * \param z The \p complex from which to calculate the phase angle. + */ +template +__host__ __device__ inline T arg(const complex& z); + +/*! Returns the square of the magnitude of a \p complex. + * + * \param z The \p complex from which to calculate the norm. + */ +template +__host__ __device__ inline T norm(const complex& z); + +/*! Returns the complex conjugate of a \p complex. + * + * \param z The \p complex from which to calculate the complex conjugate. + */ +template +__host__ __device__ inline complex conj(const complex& z); + +/*! Returns the real part of a \p complex. + * + * \param z The \p complex from which to return the real part + */ +template +__host__ __device__ inline T real(const complex& z); + +/*! Returns the imaginary part of a \p complex. + * + * \param z The \p complex from which to return the imaginary part + */ +template +__host__ __device__ inline T imag(const complex& z); + +/*! Returns a \p complex with the specified magnitude and phase. + * + * \param m The magnitude of the returned \p complex. + * \param theta The phase of the returned \p complex in radians. + */ +template +__host__ __device__ inline complex polar(const T& m, const T& theta = 0); + +/*! Returns the projection of a \p complex on the Riemann sphere. + * For all finite \p complex it returns the argument. For \p complexs + * with a non finite part returns (INFINITY,+/-0) where the sign of + * the zero matches the sign of the imaginary part of the argument. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ inline complex proj(const T& z); + +/* --- Binary Arithmetic operators --- */ + +/*! Multiplies two \p complex numbers. + * + * \param lhs The first \p complex. + * \param rhs The second \p complex. + */ +template +__host__ __device__ inline complex operator*(const complex& lhs, + const complex& rhs); + +/*! Multiplies a \p complex number by a scalar. + * + * \param lhs The \p complex. + * \param rhs The scalar. + */ +template +__host__ __device__ inline complex operator*(const complex& lhs, const T& rhs); + +/*! Multiplies a scalar by a \p complex number. + * + * \param lhs The scalar. + * \param rhs The \p complex. + */ +template +__host__ __device__ inline complex operator*(const T& lhs, const complex& rhs); + +/*! Divides two \p complex numbers. + * + * \param lhs The numerator (dividend). + * \param rhs The denomimator (divisor). + */ +template +__host__ __device__ inline complex operator/(const complex& lhs, + const complex& rhs); + +/*! Divides a \p complex number by a scalar. + * + * \param lhs The complex numerator (dividend). + * \param rhs The scalar denomimator (divisor). + */ +template +__host__ __device__ inline complex operator/(const complex& lhs, const T& rhs); + +/*! Divides a scalar by a \p complex number. + * + * \param lhs The scalar numerator (dividend). + * \param rhs The complex denomimator (divisor). + */ +template +__host__ __device__ inline complex operator/(const T& lhs, const complex& rhs); + +/*! Adds two \p complex numbers. + * + * \param lhs The first \p complex. + * \param rhs The second \p complex. + */ +template +__host__ __device__ inline complex operator+(const complex& lhs, + const complex& rhs); + +/*! Adds a scalar to a \p complex number. + * + * \param lhs The \p complex. + * \param rhs The scalar. + */ +template +__host__ __device__ inline complex operator+(const complex& lhs, const T& rhs); + +/*! Adds a \p complex number to a scalar. + * + * \param lhs The scalar. + * \param rhs The \p complex. + */ +template +__host__ __device__ inline complex operator+(const T& lhs, const complex& rhs); + +/*! Subtracts two \p complex numbers. + * + * \param lhs The first \p complex (minuend). + * \param rhs The second \p complex (subtrahend). + */ +template +__host__ __device__ inline complex operator-(const complex& lhs, + const complex& rhs); + +/*! Subtracts a scalar from a \p complex number. + * + * \param lhs The \p complex (minuend). + * \param rhs The scalar (subtrahend). + */ +template +__host__ __device__ inline complex operator-(const complex& lhs, const T& rhs); + +/*! Subtracts a \p complex number from a scalar. + * + * \param lhs The scalar (minuend). + * \param rhs The \p complex (subtrahend). + */ +template +__host__ __device__ inline complex operator-(const T& lhs, const complex& rhs); + +/* --- Unary Arithmetic operators --- */ + +/*! Unary plus, returns its \p complex argument. + * + * \param rhs The \p complex argument. + */ +template +__host__ __device__ inline complex operator+(const complex& rhs); + +/*! Unary minus, returns the additive inverse (negation) of its \p complex + * argument. + * + * \param rhs The \p complex argument. + */ +template +__host__ __device__ inline complex operator-(const complex& rhs); + +/* --- Exponential Functions --- */ + +/*! Returns the complex exponential of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex exp(const complex& z); + +/*! Returns the complex natural logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex log(const complex& z); + +/*! Returns the complex base 10 logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ inline complex log10(const complex& z); + +/* --- Power Functions --- */ + +/*! Returns a \p complex number raised to another. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ complex pow(const complex& x, const complex& y); + +/*! Returns a \p complex number raised to a scalar. + * + * \param x The \p complex base. + * \param y The scalar exponent. + */ +template +__host__ __device__ complex pow(const complex& x, const T& y); + +/*! Returns a scalar raised to a \p complex number. + * + * \param x The scalar base. + * \param y The \p complex exponent. + */ +template +__host__ __device__ complex pow(const T& x, const complex& y); + +/*! Returns a \p complex number raised to another. The types of the two \p + * complex should be compatible + * and the type of the returned \p complex is the promoted type of the two + * arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ complex::type> pow( + const complex& x, const complex& y); + +/*! Returns a \p complex number raised to a scalar. The type of the \p complex + * should be compatible with the scalar + * and the type of the returned \p complex is the promoted type of the two + * arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ complex::type> pow( + const complex& x, const U& y); + +/*! Returns a scalar raised to a \p complex number. The type of the \p complex + * should be compatible with the scalar + * and the type of the returned \p complex is the promoted type of the two + * arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ complex::type> pow( + const T& x, const complex& y); + +/*! Returns the complex square root of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex sqrt(const complex& z); + +/* --- Trigonometric Functions --- */ + +/*! Returns the complex cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex cos(const complex& z); + +/*! Returns the complex sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex sin(const complex& z); + +/*! Returns the complex tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex tan(const complex& z); + +/* --- Hyperbolic Functions --- */ + +/*! Returns the complex hyperbolic cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex cosh(const complex& z); + +/*! Returns the complex hyperbolic sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex sinh(const complex& z); + +/*! Returns the complex hyperbolic tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex tanh(const complex& z); + +/* --- Inverse Trigonometric Functions --- */ + +/*! Returns the complex arc cosine of a \p complex number. + * + * The range of the real part of the result is [0, Pi] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex acos(const complex& z); + +/*! Returns the complex arc sine of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex asin(const complex& z); + +/*! Returns the complex arc tangent of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex atan(const complex& z); + +/* --- Inverse Hyperbolic Functions --- */ + +/*! Returns the complex inverse hyperbolic cosine of a \p complex number. + * + * The range of the real part of the result is [0, +inf] and + * the range of the imaginary part is [-Pi, Pi] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex acosh(const complex& z); + +/*! Returns the complex inverse hyperbolic sine of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex asinh(const complex& z); + +/*! Returns the complex inverse hyperbolic tangent of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ complex atanh(const complex& z); + +/* --- Equality Operators --- */ + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param lhs The first \p complex. + * \param rhs The second \p complex. + */ +template +__host__ __device__ inline bool operator==(const complex& lhs, const complex& rhs); + +/*! Returns true if the imaginary part of the \p complex number is zero and the + * real part is equal to the scalar. Returns false otherwise. + * + * \param lhs The scalar. + * \param rhs The \p complex. + */ +template +__host__ __device__ inline bool operator==(const T& lhs, const complex& rhs); + +/*! Returns true if the imaginary part of the \p complex number is zero and the + * real part is equal to the scalar. Returns false otherwise. + * + * \param lhs The \p complex. + * \param rhs The scalar. + */ +template +__host__ __device__ inline bool operator==(const complex& lhs, const T& rhs); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param lhs The first \p complex. + * \param rhs The second \p complex. + */ +template +__host__ __device__ inline bool operator!=(const complex& lhs, const complex& rhs); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param lhs The scalar. + * \param rhs The \p complex. + */ +template +__host__ __device__ inline bool operator!=(const T& lhs, const complex& rhs); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param lhs The \p complex. + * \param rhs The scalar. + */ +template +__host__ __device__ inline bool operator!=(const complex& lhs, const T& rhs); + +THRUST_NAMESPACE_END + +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex_inl.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc18279a46f5b11409cce7a480b471843e507a9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/complex_inl.h @@ -0,0 +1,167 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +THRUST_NAMESPACE_BEGIN + +/* --- Constructors --- */ +template +inline __host__ __device__ complex::complex(const T& re) { + real(re); + imag(T()); +} + +template +inline __host__ __device__ complex::complex(const T& re, const T& im) { + real(re); + imag(im); +} + +#if ((!defined(_MSC_VER) && __cplusplus < 201103L) || \ + (defined(_MSC_VER) && _MSC_VER < 1900)) +template +inline __host__ __device__ complex::complex() { + real(T()); + imag(T()); +} + +template +inline __host__ __device__ complex::complex(const complex& z) { + real(z.real()); + imag(z.imag()); +} +#endif + +template +template +inline __host__ __device__ complex::complex(const complex& z) { + // The explicit T() is there no prevent Visual Studio from complaining + // about potential loss of precision + real(T(z.real())); + imag(T(z.imag())); +} + +/* --- Assignment Operators --- */ + +template +inline __host__ __device__ complex& complex::operator=(const T& re) { + real(re); + imag(T()); + return *this; +} + +template +inline __host__ __device__ complex& complex::operator=(const complex& z) { + real(z.real()); + imag(z.imag()); + return *this; +} + +template +template +inline __host__ __device__ complex& complex::operator=(const complex& z) { + real(T(z.real())); + imag(T(z.imag())); + return *this; +} + +/* --- Compound Assignment Operators --- */ +// TODO(leofang): support operators with argument of type T, see upstream + +template +__host__ __device__ inline complex& complex::operator+=(const complex z) { + *this = *this + z; + return *this; +} + +template +__host__ __device__ inline complex& complex::operator-=(const complex z) { + *this = *this - z; + return *this; +} + +template +__host__ __device__ inline complex& complex::operator*=(const complex z) { + *this = *this * z; + return *this; +} + +template +__host__ __device__ inline complex& complex::operator/=(const complex z) { + *this = *this / z; + return *this; +} + +/* --- Equality Operators --- */ + +template +__host__ __device__ inline bool operator==(const complex& lhs, + const complex& rhs) { + return lhs.real() == rhs.real() && lhs.imag() == rhs.imag(); +} + +template +__host__ __device__ inline bool operator==(const T& lhs, const complex& rhs) { + return lhs == rhs.real() && rhs.imag() == 0; +} + +template +__host__ __device__ inline bool operator==(const complex& lhs, const T& rhs) { + return lhs.real() == rhs && lhs.imag() == 0; +} + +template +__host__ __device__ inline bool operator!=(const complex& lhs, + const complex& rhs) { + return !(lhs == rhs); +} + +template +__host__ __device__ inline bool operator!=(const T& lhs, const complex& rhs) { + return !(lhs == rhs); +} + +template +__host__ __device__ inline bool operator!=(const complex& lhs, const T& rhs) { + return !(lhs == rhs); +} + +THRUST_NAMESPACE_END + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinh.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinh.h new file mode 100644 index 0000000000000000000000000000000000000000..c8b109198e23ae2f7a93208740d5299466664395 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinh.h @@ -0,0 +1,194 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2005 Bruce D. Evans and Steven G. Kargl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* adapted from FreeBSD: + * lib/msun/src/s_csinh.c + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline complex csinh(const complex& z) { + double x, y, h; + uint32_t hx, hy, ix, iy, lx, ly; + const double huge = 8.98846567431157953864652595395e+307; // 0x1p1023; + + x = z.real(); + y = z.imag(); + + extract_words(hx, lx, x); + extract_words(hy, ly, y); + + ix = 0x7fffffff & hx; + iy = 0x7fffffff & hy; + + /* Handle the nearly-non-exceptional cases where x and y are finite. */ + if (ix < 0x7ff00000 && iy < 0x7ff00000) { + if ((iy | ly) == 0) return (complex(sinh(x), y)); + if (ix < 0x40360000) /* small x: normal case */ + return (complex(sinh(x) * cos(y), cosh(x) * sin(y))); + + /* |x| >= 22, so cosh(x) ~= exp(|x|) */ + if (ix < 0x40862e42) { + /* x < 710: exp(|x|) won't overflow */ + h = exp(fabs(x)) * 0.5; + return (complex(copysign(h, x) * cos(y), h * sin(y))); + } else if (ix < 0x4096bbaa) { + /* x < 1455: scale to avoid overflow */ + complex z_ = ldexp_cexp(complex(fabs(x), y), -1); + return (complex(z_.real() * copysign(1.0, x), z_.imag())); + } else { + /* x >= 1455: the result always overflows */ + h = huge * x; + return (complex(h * cos(y), h * h * sin(y))); + } + } + + /* + * sinh(+-0 +- I Inf) = sign(d(+-0, dNaN))0 + I dNaN. + * The sign of 0 in the result is unspecified. Choice = normally + * the same as dNaN. Raise the invalid floating-point exception. + * + * sinh(+-0 +- I NaN) = sign(d(+-0, NaN))0 + I d(NaN). + * The sign of 0 in the result is unspecified. Choice = normally + * the same as d(NaN). + */ + if ((ix | lx) == 0 && iy >= 0x7ff00000) + return (complex(copysign(0.0, x * (y - y)), y - y)); + + /* + * sinh(+-Inf +- I 0) = +-Inf + I +-0. + * + * sinh(NaN +- I 0) = d(NaN) + I +-0. + */ + if ((iy | ly) == 0 && ix >= 0x7ff00000) { + if (((hx & 0xfffff) | lx) == 0) return (complex(x, y)); + return (complex(x, copysign(0.0, y))); + } + + /* + * sinh(x +- I Inf) = dNaN + I dNaN. + * Raise the invalid floating-point exception for finite nonzero x. + * + * sinh(x + I NaN) = d(NaN) + I d(NaN). + * Optionally raises the invalid floating-point exception for finite + * nonzero x. Choice = don't raise (except for signaling NaNs). + */ + if (ix < 0x7ff00000 && iy >= 0x7ff00000) + return (complex(y - y, x * (y - y))); + + /* + * sinh(+-Inf + I NaN) = +-Inf + I d(NaN). + * The sign of Inf in the result is unspecified. Choice = normally + * the same as d(NaN). + * + * sinh(+-Inf +- I Inf) = +Inf + I dNaN. + * The sign of Inf in the result is unspecified. Choice = always +. + * Raise the invalid floating-point exception. + * + * sinh(+-Inf + I y) = +-Inf cos(y) + I Inf sin(y) + */ + if (ix >= 0x7ff00000 && ((hx & 0xfffff) | lx) == 0) { + if (iy >= 0x7ff00000) return (complex(x * x, x * (y - y))); + return (complex(x * cos(y), infinity() * sin(y))); + } + + /* + * sinh(NaN + I NaN) = d(NaN) + I d(NaN). + * + * sinh(NaN +- I Inf) = d(NaN) + I d(NaN). + * Optionally raises the invalid floating-point exception. + * Choice = raise. + * + * sinh(NaN + I y) = d(NaN) + I d(NaN). + * Optionally raises the invalid floating-point exception for finite + * nonzero y. Choice = don't raise (except for signaling NaNs). + */ + return (complex((x * x) * (y - y), (x + x) * (y - y))); +} + +__host__ __device__ inline complex csin(complex z) { + /* csin(z) = -I * csinh(I * z) */ + z = csinh(complex(-z.imag(), z.real())); + return (complex(z.imag(), -z.real())); +} + +} // namespace complex + +} // namespace detail + +template +__host__ __device__ inline complex sin(const complex& z) { + const ValueType re = z.real(); + const ValueType im = z.imag(); + return complex(::sin(re) * ::cosh(im), ::cos(re) * ::sinh(im)); +} + +template +__host__ __device__ inline complex sinh(const complex& z) { + const ValueType re = z.real(); + const ValueType im = z.imag(); + return complex(::sinh(re) * ::cos(im), ::cosh(re) * ::sin(im)); +} + +template <> +__host__ __device__ inline complex sin(const complex& z) { + return detail::complex::csin(z); +} + +template <> +__host__ __device__ inline complex sinh(const complex& z) { + return detail::complex::csinh(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinhf.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinhf.h new file mode 100644 index 0000000000000000000000000000000000000000..49f9b795de11ee5498784eb877b9ee5c30d66948 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/csinhf.h @@ -0,0 +1,135 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2005 Bruce D. Evans and Steven G. Kargl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* adapted from FreeBSD: + * lib/msun/src/s_csinhf.c + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline complex csinhf(const complex& z) { + float x, y, h; + uint32_t hx, hy, ix, iy; + + const float huge = 1.70141183460469231731687303716e+38; // 0x1p127; + + x = z.real(); + y = z.imag(); + + get_float_word(hx, x); + get_float_word(hy, y); + + ix = 0x7fffffff & hx; + iy = 0x7fffffff & hy; + + if (ix < 0x7f800000 && iy < 0x7f800000) { + if (iy == 0) return (complex(sinhf(x), y)); + if (ix < 0x41100000) /* small x: normal case */ + return (complex(sinhf(x) * cosf(y), coshf(x) * sinf(y))); + + /* |x| >= 9, so cosh(x) ~= exp(|x|) */ + if (ix < 0x42b17218) { + /* x < 88.7: expf(|x|) won't overflow */ + h = expf(fabsf(x)) * 0.5f; + return (complex(copysignf(h, x) * cosf(y), h * sinf(y))); + } else if (ix < 0x4340b1e7) { + /* x < 192.7: scale to avoid overflow */ + complex z_ = ldexp_cexpf(complex(fabsf(x), y), -1); + return (complex(z_.real() * copysignf(1.0f, x), z_.imag())); + } else { + /* x >= 192.7: the result always overflows */ + h = huge * x; + return (complex(h * cosf(y), h * h * sinf(y))); + } + } + + if (ix == 0 && iy >= 0x7f800000) + return (complex(copysignf(0, x * (y - y)), y - y)); + + if (iy == 0 && ix >= 0x7f800000) { + if ((hx & 0x7fffff) == 0) return (complex(x, y)); + return (complex(x, copysignf(0.0f, y))); + } + + if (ix < 0x7f800000 && iy >= 0x7f800000) + return (complex(y - y, x * (y - y))); + + if (ix >= 0x7f800000 && (hx & 0x7fffff) == 0) { + if (iy >= 0x7f800000) return (complex(x * x, x * (y - y))); + return (complex(x * cosf(y), infinity() * sinf(y))); + } + + return (complex((x * x) * (y - y), (x + x) * (y - y))); +} + +__host__ __device__ inline complex csinf(complex z) { + z = csinhf(complex(-z.imag(), z.real())); + return (complex(z.imag(), -z.real())); +} + +} // namespace complex + +} // namespace detail + +template <> +__host__ __device__ inline complex sin(const complex& z) { + return detail::complex::csinf(z); +} + +template <> +__host__ __device__ inline complex sinh(const complex& z) { + return detail::complex::csinhf(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanh.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanh.h new file mode 100644 index 0000000000000000000000000000000000000000..48f7faff3cb73874dcdb167f9c606ba41d514fb6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanh.h @@ -0,0 +1,193 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2011 David Schultz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Adapted from FreeBSD by Filipe Maia : + * freebsd/lib/msun/src/s_ctanh.c + */ + +/* + * Hyperbolic tangent of a complex argument z = x + i y. + * + * The algorithm is from: + * + * W. Kahan. Branch Cuts for Complex Elementary Functions or Much + * Ado About Nothing's Sign Bit. In The State of the Art in + * Numerical Analysis, pp. 165 ff. Iserles and Powell, eds., 1987. + * + * Method: + * + * Let t = tan(x) + * beta = 1/cos^2(y) + * s = sinh(x) + * rho = cosh(x) + * + * We have: + * + * tanh(z) = sinh(z) / cosh(z) + * + * sinh(x) cos(y) + i cosh(x) sin(y) + * = --------------------------------- + * cosh(x) cos(y) + i sinh(x) sin(y) + * + * cosh(x) sinh(x) / cos^2(y) + i tan(y) + * = ------------------------------------- + * 1 + sinh^2(x) / cos^2(y) + * + * beta rho s + i t + * = ---------------- + * 1 + beta s^2 + * + * Modifications: + * + * I omitted the original algorithm's handling of overflow in tan(x) after + * verifying with nearpi.c that this can't happen in IEEE single or double + * precision. I also handle large x differently. + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline complex ctanh(const complex& z) { + double x, y; + double t, beta, s, rho, denom; + uint32_t hx, ix, lx; + + x = z.real(); + y = z.imag(); + + extract_words(hx, lx, x); + ix = hx & 0x7fffffff; + + /* + * ctanh(NaN + i 0) = NaN + i 0 + * + * ctanh(NaN + i y) = NaN + i NaN for y != 0 + * + * The imaginary part has the sign of x*sin(2*y), but there's no + * special effort to get this right. + * + * ctanh(+-Inf +- i Inf) = +-1 +- 0 + * + * ctanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite + * + * The imaginary part of the sign is unspecified. This special + * case is only needed to avoid a spurious invalid exception when + * y is infinite. + */ + if (ix >= 0x7ff00000) { + if ((ix & 0xfffff) | lx) /* x is NaN */ + return (complex(x, (y == 0 ? y : x * y))); + set_high_word(x, hx - 0x40000000); /* x = copysign(1, x) */ + return (complex(x, copysign(0.0, isinf(y) ? y : sin(y) * cos(y)))); + } + + /* + * ctanh(x + i NAN) = NaN + i NaN + * ctanh(x +- i Inf) = NaN + i NaN + */ + if (!isfinite(y)) return (complex(y - y, y - y)); + + /* + * ctanh(+-huge + i +-y) ~= +-1 +- i 2sin(2y)/exp(2x), using the + * approximation sinh^2(huge) ~= exp(2*huge) / 4. + * We use a modified formula to avoid spurious overflow. + */ + if (ix >= 0x40360000) { /* x >= 22 */ + double exp_mx = exp(-fabs(x)); + return (complex(copysign(1.0, x), + 4.0 * sin(y) * cos(y) * exp_mx * exp_mx)); + } + + /* Kahan's algorithm */ + t = tan(y); + beta = 1.0 + t * t; /* = 1 / cos^2(y) */ + s = sinh(x); + rho = sqrt(1.0 + s * s); /* = cosh(x) */ + denom = 1.0 + beta * s * s; + return (complex((beta * rho * s) / denom, t / denom)); +} + +__host__ __device__ inline complex ctan(complex z) { + /* ctan(z) = -I * ctanh(I * z) */ + z = ctanh(complex(-z.imag(), z.real())); + return (complex(z.imag(), -z.real())); +} + +} // namespace complex + +} // namespace detail + +template +__host__ __device__ inline complex tan(const complex& z) { + return sin(z) / cos(z); +} + +template +__host__ __device__ inline complex tanh(const complex& z) { + // This implementation seems better than the simple sin/cos + return (thrust::exp(ValueType(2) * z) - ValueType(1)) / + (thrust::exp(ValueType(2) * z) + ValueType(1)); +} + +template <> +__host__ __device__ inline complex tan(const complex& z) { + return detail::complex::ctan(z); +} + +template <> +__host__ __device__ inline complex tanh(const complex& z) { + return detail::complex::ctanh(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanhf.h b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanhf.h new file mode 100644 index 0000000000000000000000000000000000000000..c5de7b94ec5b581b9f044793a62c20e6c1720996 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/complex/ctanhf.h @@ -0,0 +1,118 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*- + * Copyright (c) 2011 David Schultz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Adapted from FreeBSD by Filipe Maia, filipe.c.maia@gmail.com: + * freebsd/lib/msun/src/s_ctanhf.c + */ + +/* + * Hyperbolic tangent of a complex argument z. See ctanh.c for details. + */ + +#pragma once + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN +namespace detail { +namespace complex { + +using thrust::complex; + +__host__ __device__ inline complex ctanhf(const complex& z) { + float x, y; + float t, beta, s, rho, denom; + uint32_t hx, ix; + + x = z.real(); + y = z.imag(); + + get_float_word(hx, x); + ix = hx & 0x7fffffff; + + if (ix >= 0x7f800000) { + if (ix & 0x7fffff) return (complex(x, (y == 0.0f ? y : x * y))); + set_float_word(x, hx - 0x40000000); + return (complex(x, copysignf(0, isinf(y) ? y : sinf(y) * cosf(y)))); + } + + if (!isfinite(y)) return (complex(y - y, y - y)); + + if (ix >= 0x41300000) { /* x >= 11 */ + float exp_mx = expf(-fabsf(x)); + return (complex(copysignf(1.0f, x), + 4.0f * sinf(y) * cosf(y) * exp_mx * exp_mx)); + } + + t = tanf(y); + beta = 1.0f + t * t; + s = sinhf(x); + rho = sqrtf(1.0f + s * s); + denom = 1.0f + beta * s * s; + return (complex((beta * rho * s) / denom, t / denom)); +} + +__host__ __device__ inline complex ctanf(complex z) { + z = ctanhf(complex(-z.imag(), z.real())); + return (complex(z.imag(), -z.real())); +} + +} // namespace complex + +} // namespace detail + +template <> +__host__ __device__ inline complex tan(const complex& z) { + return detail::complex::ctanf(z); +} + +template <> +__host__ __device__ inline complex tanh(const complex& z) { + return detail::complex::ctanhf(z); +} + +THRUST_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__init__.py b/vllm/lib/python3.10/site-packages/cupy/_indexing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..637a20f18cc32b5ddedc10f13587b0d8bfc4f93e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_indexing/__init__.py @@ -0,0 +1,2 @@ +# Functions from the following NumPy document +# https://numpy.org/doc/stable/reference/routines.indexing.html diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10d83fbe2bf767a8d7d69ac46501502ff21834b5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/generate.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32e6750469a5a022c696501108736a0ea3032b5c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/generate.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/indexing.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6656b9f2433d5221bb5cb820284ab82cb742dac Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/indexing.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/insert.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/insert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d40c9232b8a5a677484d9cb4e93ae3b0df90556 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/insert.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/iterate.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/iterate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27191a1f3930225b3fa34f6736cc3f23aa923c5b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/_indexing/__pycache__/iterate.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/generate.py b/vllm/lib/python3.10/site-packages/cupy/_indexing/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..c54e680fb4c316adba466536a4919d9d50a8ff0d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_indexing/generate.py @@ -0,0 +1,588 @@ +# class s_(object): + +import functools +import numbers +import operator + +import numpy + +import cupy +from cupy._creation import from_data +from cupy._manipulation import join + + +class AxisConcatenator(object): + """Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see :func:`cupy.r_`. + This implementation is partially borrowed from NumPy's one. + + """ + + def _output_obj(self, obj, ndim, ndmin, trans1d): + k2 = ndmin - ndim + if trans1d < 0: + trans1d += k2 + 1 + defaxes = list(range(ndmin)) + k1 = trans1d + axes = defaxes[:k1] + defaxes[k2:] + \ + defaxes[k1:k2] + return obj.transpose(axes) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.trans1d = trans1d + self.matrix = matrix + self.ndmin = ndmin + + def __getitem__(self, key): + trans1d = self.trans1d + ndmin = self.ndmin + objs = [] + arrays = [] + scalars = [] + if isinstance(key, str): + raise NotImplementedError + if not isinstance(key, tuple): + key = (key,) + + for i, k in enumerate(key): + if isinstance(k, slice): + raise NotImplementedError + elif isinstance(k, str): + if i != 0: + raise ValueError( + 'special directives must be the first entry.') + raise NotImplementedError + elif type(k) in numpy.ScalarType: + newobj = from_data.array(k, ndmin=ndmin) + scalars.append(i) + else: + newobj = from_data.array(k, copy=False, ndmin=ndmin) + if ndmin > 1: + ndim = from_data.array(k, copy=False).ndim + if trans1d != -1 and ndim < ndmin: + newobj = self._output_obj(newobj, ndim, ndmin, trans1d) + arrays.append(newobj) + + objs.append(newobj) + + final_dtype = numpy.result_type(*arrays, *[key[k] for k in scalars]) + if final_dtype is not None: + for k in scalars: + objs[k] = objs[k].astype(final_dtype) + + return join.concatenate(tuple(objs), axis=self.axis) + + def __len__(self): + return 0 + + +class CClass(AxisConcatenator): + + def __init__(self): + super(CClass, self).__init__(-1, ndmin=2, trans1d=0) + + +c_ = CClass() +"""Translates slice objects to concatenation along the second axis. + +This is a CuPy object that corresponds to :obj:`cupy.r_`, which is +useful because of its common occurrence. In particular, arrays will be +stacked along their last axis after being upgraded to at least 2-D with +1's post-pended to the shape (column vectors made out of 1-D arrays). + +For detailed documentation, see :obj:`r_`. + +This implementation is partially borrowed from NumPy's one. + +Returns: + cupy.ndarray: Joined array. + +.. seealso:: :obj:`numpy.c_` + +Examples +-------- +>>> a = cupy.array([[1, 2, 3]], dtype=np.int32) +>>> b = cupy.array([[4, 5, 6]], dtype=np.int32) +>>> cupy.c_[a, 0, 0, b] +array([[1, 2, 3, 0, 0, 4, 5, 6]], dtype=int32) + +""" + + +class RClass(AxisConcatenator): + + def __init__(self): + super(RClass, self).__init__() + + +r_ = RClass() +"""Translates slice objects to concatenation along the first axis. + +This is a simple way to build up arrays quickly. +If the index expression contains comma separated arrays, then stack +them along their first axis. + +This object can build up from normal CuPy arrays. +Therefore, the other objects (e.g. writing strings like '2,3,4', +or using imaginary numbers like [1,2,3j], +or using string integers like '-1') are not implemented yet +compared with NumPy. + +This implementation is partially borrowed from NumPy's one. + +Returns: + cupy.ndarray: Joined array. + +.. seealso:: :obj:`numpy.r_` + +Examples +-------- +>>> a = cupy.array([1, 2, 3], dtype=np.int32) +>>> b = cupy.array([4, 5, 6], dtype=np.int32) +>>> cupy.r_[a, 0, 0, b] +array([1, 2, 3, 0, 0, 4, 5, 6], dtype=int32) + +""" + + +def indices(dimensions, dtype=int): + """Returns an array representing the indices of a grid. + + Computes an array where the subarrays contain index values 0,1,... + varying only along the corresponding axis. + + Args: + dimensions: The shape of the grid. + dtype: Data type specifier. It is int by default. + + Returns: + ndarray: + The array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + + Examples + -------- + >>> grid = cupy.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + .. seealso:: :func:`numpy.indices` + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + res = cupy.empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + res[i] = cupy.arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1:] + ) + return res + + +def ix_(*args): + """Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[cupy.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Args: + *args: 1-D sequences + + Returns: + tuple of ndarrays: + N arrays with N dimensions each, with N the number of input sequences. + Together these arrays form an open mesh. + + Examples + -------- + >>> a = cupy.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = cupy.ix_([0,1], [2,4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`numpy.ix_` + + """ + # TODO(niboshi): Avoid nonzero which may synchronize the device. + out = [] + nd = len(args) + for k, new in enumerate(args): + new = from_data.asarray(new) + if new.ndim != 1: + raise ValueError('Cross index must be 1 dimensional') + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(numpy.intp) + if cupy.issubdtype(new.dtype, cupy.bool_): + new, = new.nonzero() # may synchronize + new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1)) + out.append(new) + return tuple(out) + + +def ravel_multi_index(multi_index, dims, mode='wrap', order='C'): + """ + Converts a tuple of index arrays into an array of flat indices, applying + boundary modes to the multi-index. + + Args: + multi_index (tuple of cupy.ndarray) : A tuple of integer arrays, one + array for each dimension. + dims (tuple of ints): The shape of array into which the indices from + ``multi_index`` apply. + mode ('raise', 'wrap' or 'clip'), optional: Specifies how out-of-bounds + indices are handled. Can specify either one mode or a tuple of + modes, one mode per index: + + - *'raise'* -- raise an error + - *'wrap'* -- wrap around (default) + - *'clip'* -- clip to the range + + In 'clip' mode, a negative index which would normally wrap will + clip to 0 instead. + order ('C' or 'F'), optional: Determines whether the multi-index should + be viewed as indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns: + raveled_indices (cupy.ndarray): An array of indices into the flattened + version of an array of dimensions ``dims``. + + .. warning:: + + This function may synchronize the device when ``mode == 'raise'``. + + Notes + ----- + Note that the default `mode` (``'wrap'``) is different than in NumPy. This + is done to avoid potential device synchronization. + + Examples + -------- + >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6)) + array([22, 41, 37]) + >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6), + ... order='F') + array([31, 41, 13]) + >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,6), + ... mode='clip') + array([22, 23, 19]) + >>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,4), + ... mode=('clip', 'wrap')) + array([12, 13, 13]) + >>> cupy.ravel_multi_index(cupy.asarray((3,1,4,1)), (6,7,8,9)) + array(1621) + + .. seealso:: :func:`numpy.ravel_multi_index`, :func:`unravel_index` + """ + + ndim = len(dims) + if len(multi_index) != ndim: + raise ValueError( + "parameter multi_index must be a sequence of " + "length {}".format(ndim)) + + for d in dims: + if not isinstance(d, numbers.Integral): + raise TypeError( + "{} object cannot be interpreted as an integer".format( + type(d))) + + if isinstance(mode, str): + mode = (mode, ) * ndim + + if functools.reduce(operator.mul, dims) > cupy.iinfo(cupy.int64).max: + raise ValueError("invalid dims: array size defined by dims is larger " + "than the maximum possible size") + + s = 1 + ravel_strides = [1] * ndim + + order = 'C' if order is None else order.upper() + if order == 'C': + for i in range(ndim - 2, -1, -1): + s = s * dims[i + 1] + ravel_strides[i] = s + elif order == 'F': + for i in range(1, ndim): + s = s * dims[i - 1] + ravel_strides[i] = s + else: + raise ValueError('order not understood') + + multi_index = cupy.broadcast_arrays(*multi_index) + raveled_indices = cupy.zeros(multi_index[0].shape, dtype=cupy.int64) + for d, stride, idx, _mode in zip(dims, ravel_strides, multi_index, mode): + + if not isinstance(idx, cupy.ndarray): + raise TypeError("elements of multi_index must be cupy arrays") + if not cupy.can_cast(idx, cupy.int64, 'same_kind'): + raise TypeError( + 'multi_index entries could not be cast from dtype(\'{}\') to ' + 'dtype(\'{}\') according to the rule \'same_kind\''.format( + idx.dtype, cupy.int64().dtype)) + idx = idx.astype(cupy.int64, copy=False) + + if _mode == "raise": + if cupy.any(cupy.logical_or(idx >= d, idx < 0)): + raise ValueError("invalid entry in coordinates array") + elif _mode == "clip": + idx = cupy.clip(idx, 0, d - 1) + elif _mode == 'wrap': + idx = idx % d + else: + raise ValueError('Unrecognized mode: {}'.format(_mode)) + raveled_indices += stride * idx + return raveled_indices + + +def unravel_index(indices, dims, order='C'): + """Converts array of flat indices into a tuple of coordinate arrays. + + Args: + indices (cupy.ndarray): An integer array whose elements are indices + into the flattened version of an array of dimensions :obj:`dims`. + dims (tuple of ints): The shape of the array to use for unraveling + indices. + order ('C' or 'F'): Determines whether the indices should be viewed as + indexing in row-major (C-style) or column-major (Fortran-style) + order. + + Returns: + tuple of ndarrays: + Each array in the tuple has the same shape as the indices array. + + Examples + -------- + >>> cupy.unravel_index(cupy.array([22, 41, 37]), (7, 6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> cupy.unravel_index(cupy.array([31, 41, 13]), (7, 6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`numpy.unravel_index`, :func:`ravel_multi_index` + + """ + order = 'C' if order is None else order.upper() + if order == 'C': + dims = reversed(dims) + elif order == 'F': + pass + else: + raise ValueError('order not understood') + + if not cupy.can_cast(indices, cupy.int64, 'same_kind'): + raise TypeError( + 'Iterator operand 0 dtype could not be cast ' + 'from dtype(\'{}\') to dtype(\'{}\') ' + 'according to the rule \'same_kind\''.format( + indices.dtype, cupy.int64().dtype)) + + if (indices < 0).any(): # synchronize! + raise ValueError('invalid entry in index array') + + unraveled_coords = [] + for dim in dims: + unraveled_coords.append(indices % dim) + indices = indices // dim + + if (indices > 0).any(): # synchronize! + raise ValueError('invalid entry in index array') + + if order == 'C': + unraveled_coords = reversed(unraveled_coords) + return tuple(unraveled_coords) + + +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of + size ``(n, n)`` with a possible offset argument `k`, when called + as ``mask_func(a, k)`` returns a new array with zeros in certain + locations (functions like :func:`~cupy.triu` or :func:`~cupy.tril` do + precisely this). Then this function returns the indices where the non-zero + values would be located. + + Args: + n (int): The returned indices will be valid to access arrays + of shape (n, n). + mask_func (callable): A function whose call signature is + similar to that of :func:`~cupy.triu`, :func:`~tril`. That is, + ``mask_func(x, k)`` returns a boolean array, shaped like + `x`. `k` is an optional argument to the function. + k (scalar): An optional argument which is passed through to + `mask_func`. Functions like :func:`~cupy.triu`, :func:`~cupy.tril` + take a second argument that is interpreted as an offset. + + Returns: + tuple of arrays: The `n` arrays of indices corresponding to + the locations where ``mask_func(np.ones((n, n)), k)`` is + True. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`numpy.mask_indices` + """ + a = cupy.ones((n, n), dtype=cupy.int8) + return mask_func(a, k).nonzero() + + +# TODO(okuta): Implement diag_indices + + +# TODO(okuta): Implement diag_indices_from + + +def tril_indices(n, k=0, m=None): + """Returns the indices of the lower triangular matrix. + Here, the first group of elements contains row coordinates + of all indices and the second group of elements + contains column coordinates. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal above which to zero elements. `k = 0` + (the default) is the main diagonal, `k < 0` is + below it and `k > 0` is above. + m : int, optional + The column dimension of the arrays for which the + returned arrays will be valid. By default, `m = n`. + + Returns + ------- + y : tuple of ndarrays + The indices for the triangle. The returned tuple + contains two arrays, each with the indices along + one dimension of the array. + + See Also + -------- + numpy.tril_indices + + """ + + tri_ = cupy.tri(n, m, k=k, dtype=bool) + + return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_] + for inds in cupy.indices(tri_.shape, dtype=int)) + + +def tril_indices_from(arr, k=0): + """Returns the indices for the lower-triangle of arr. + + Parameters + ---------- + arr : cupy.ndarray + The indices are valid for square arrays + whose dimensions are the same as arr. + k : int, optional + Diagonal offset. + + See Also + -------- + numpy.tril_indices_from + + """ + + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +def triu_indices(n, k=0, m=None): + """Returns the indices of the upper triangular matrix. + Here, the first group of elements contains row coordinates + of all indices and the second group of elements + contains column coordinates. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Refers to the diagonal offset. By default, `k = 0` i.e. + the main dialogal. The positive value of `k` + denotes the diagonals above the main diagonal, while the negative + value includes the diagonals below the main diagonal. + m : int, optional + The column dimension of the arrays for which the + returned arrays will be valid. By default, `m = n`. + + Returns + ------- + y : tuple of ndarrays + The indices for the triangle. The returned tuple + contains two arrays, each with the indices along + one dimension of the array. + + See Also + -------- + numpy.triu_indices + + """ + + tri_ = ~cupy.tri(n, m, k=k - 1, dtype=bool) + + return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_] + for inds in cupy.indices(tri_.shape, dtype=int)) + + +def triu_indices_from(arr, k=0): + """Returns indices for the upper-triangle of arr. + + Parameters + ---------- + arr : cupy.ndarray + The indices are valid for square arrays. + k : int, optional + Diagonal offset (see 'triu_indices` for details). + + Returns + ------- + triu_indices_from : tuple of ndarrays + Indices for the upper-triangle of `arr`. + + See Also + -------- + numpy.triu_indices_from + + """ + + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/indexing.py b/vllm/lib/python3.10/site-packages/cupy/_indexing/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..37314afe73bf9aff3cf0feceb8ada312950d2428 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_indexing/indexing.py @@ -0,0 +1,223 @@ +import cupy +from cupy._core import internal + + +def take(a, indices, axis=None, out=None): + """Takes elements of an array at specified indices along an axis. + + This is an implementation of "fancy indexing" at single axis. + + This function does not support ``mode`` option. + + Args: + a (cupy.ndarray): Array to extract elements. + indices (int or array-like): Indices of elements that this function + takes. + axis (int): The axis along which to select indices. The flattened input + is used by default. + out (cupy.ndarray): Output array. If provided, it should be of + appropriate shape and dtype. + + Returns: + cupy.ndarray: The result of fancy indexing. + + .. seealso:: :func:`numpy.take` + + """ + # TODO(okuta): check type + return a.take(indices, axis, out) + + +def take_along_axis(a, indices, axis): + """Take values from the input array by matching 1d index and data slices. + + Args: + a (cupy.ndarray): Array to extract elements. + indices (cupy.ndarray): Indices to take along each 1d slice of ``a``. + axis (int): The axis to take 1d slices along. + + Returns: + cupy.ndarray: The indexed result. + + .. seealso:: :func:`numpy.take_along_axis` + """ + + if indices.dtype.kind not in ('i', 'u'): + raise IndexError('`indices` must be an integer array') + + if axis is None: + a = a.ravel() + axis = 0 + + ndim = a.ndim + + axis = internal._normalize_axis_index(axis, ndim) + + if ndim != indices.ndim: + raise ValueError( + '`indices` and `a` must have the same number of dimensions') + + fancy_index = [] + for i, n in enumerate(a.shape): + if i == axis: + fancy_index.append(indices) + else: + ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1) + fancy_index.append(cupy.arange(n).reshape(ind_shape)) + + return a[tuple(fancy_index)] + + +def choose(a, choices, out=None, mode='raise'): + return a.choose(choices, out, mode) + + +def compress(condition, a, axis=None, out=None): + """Returns selected slices of an array along given axis. + + Args: + condition (1-D array of bools): Array that selects which entries to + return. If len(condition) is less than the size of a along the + given axis, then output is truncated to the length of the condition + array. + a (cupy.ndarray): Array from which to extract a part. + axis (int): Axis along which to take slices. If None (default), work + on the flattened array. + out (cupy.ndarray): Output array. If provided, it should be of + appropriate shape and dtype. + + Returns: + cupy.ndarray: A copy of a without the slices along axis for which + condition is false. + + .. warning:: + + This function may synchronize the device. + + + .. seealso:: :func:`numpy.compress` + + """ + return a.compress(condition, axis, out) + + +def diagonal(a, offset=0, axis1=0, axis2=1): + """Returns specified diagonals. + + This function extracts the diagonals along two specified axes. The other + axes are not changed. This function returns a writable view of this array + as NumPy 1.10 will do. + + Args: + a (cupy.ndarray): Array from which the diagonals are taken. + offset (int): Index of the diagonals. Zero indicates the main + diagonals, a positive value upper diagonals, and a negative value + lower diagonals. + axis1 (int): The first axis to take diagonals from. + axis2 (int): The second axis to take diagonals from. + + Returns: + cupy.ndarray: A view of the diagonals of ``a``. + + .. seealso:: :func:`numpy.diagonal` + + """ + # TODO(okuta): check type + return a.diagonal(offset, axis1, axis2) + + +def extract(condition, a): + """Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. + If ``condition`` is boolean, ``np.extract`` is equivalent to + ``arr[condition]``. + + Args: + condition (int or array_like): An array whose nonzero or True entries + indicate the elements of array to extract. + a (cupy.ndarray): Input array of the same size as condition. + + Returns: + cupy.ndarray: Rank 1 array of values from arr where condition is True. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`numpy.extract` + """ + + if not isinstance(a, cupy.ndarray): + raise TypeError('extract requires input array to be cupy.ndarray') + + if not isinstance(condition, cupy.ndarray): + condition = cupy.array(condition) + + a = a.ravel() + condition = condition.ravel() + + return a.take(condition.nonzero()[0]) + + +def select(condlist, choicelist, default=0): + """Return an array drawn from elements in choicelist, depending on conditions. + + Args: + condlist (list of bool arrays): The list of conditions which determine + from which array in `choicelist` the output elements are taken. + When multiple conditions are satisfied, the first one encountered + in `condlist` is used. + choicelist (list of cupy.ndarray): The list of arrays from which the + output elements are taken. It has to be of the same length + as `condlist`. + default (scalar) : If provided, will fill element inserted in `output` + when all conditions evaluate to False. default value is 0. + + Returns: + cupy.ndarray: The output at position m is the m-th element of the + array in `choicelist` where the m-th element of the corresponding + array in `condlist` is True. + + .. seealso:: :func:`numpy.select` + """ # NOQA + + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + if not cupy.isscalar(default): + raise TypeError("default only accepts scalar values") + + for i in range(len(choicelist)): + if not isinstance(choicelist[i], cupy.ndarray): + raise TypeError("choicelist only accepts lists of cupy ndarrays") + cond = condlist[i] + if cond.dtype.type is not cupy.bool_: + raise ValueError( + 'invalid entry {} in condlist: should be boolean ndarray' + .format(i)) + + dtype = cupy.result_type(*choicelist) + + condlist = cupy.broadcast_arrays(*condlist) + choicelist = cupy.broadcast_arrays(*choicelist, default) + + if choicelist[0].ndim == 0: + result_shape = condlist[0].shape + else: + result_shape = cupy.broadcast_arrays(condlist[0], + choicelist[0])[0].shape + + result = cupy.empty(result_shape, dtype) + cupy.copyto(result, default) + + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + cupy.copyto(result, choice, where=cond) + + return result diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/insert.py b/vllm/lib/python3.10/site-packages/cupy/_indexing/insert.py new file mode 100644 index 0000000000000000000000000000000000000000..b1f7627093bbb867336b782ea9c57f4896d724fd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_indexing/insert.py @@ -0,0 +1,260 @@ +import numpy + +import cupy +from cupy import _core + + +def place(arr, mask, vals): + """Change elements of an array based on conditional and input values. + + This function uses the first N elements of `vals`, where N is the number + of true values in `mask`. + + Args: + arr (cupy.ndarray): Array to put data into. + mask (array-like): Boolean mask array. Must have the same size as `a`. + vals (array-like): Values to put into `a`. Only the first + N elements are used, where N is the number of True values in + `mask`. If `vals` is smaller than N, it will be repeated, and if + elements of `a` are to be masked, this sequence must be non-empty. + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`numpy.place` + """ + # TODO(niboshi): Avoid nonzero which may synchronize the device. + mask = cupy.asarray(mask) + if arr.size != mask.size: + raise ValueError('Mask and data must be the same size.') + vals = cupy.asarray(vals) + + mask_indices = mask.ravel().nonzero()[0] # may synchronize + if mask_indices.size == 0: + return + if vals.size == 0: + raise ValueError('Cannot insert from an empty array.') + arr.put(mask_indices, vals, mode='wrap') + + +def put(a, ind, v, mode='wrap'): + """Replaces specified elements of an array with given values. + + Args: + a (cupy.ndarray): Target array. + ind (array-like): Target indices, interpreted as integers. + v (array-like): Values to place in `a` at target indices. + If `v` is shorter than `ind` it will be repeated as necessary. + mode (str): How out-of-bounds indices will behave. Its value must be + either `'raise'`, `'wrap'` or `'clip'`. Otherwise, + :class:`TypeError` is raised. + + .. note:: + Default `mode` is set to `'wrap'` to avoid unintended performance drop. + If you need NumPy's behavior, please pass `mode='raise'` manually. + + .. seealso:: :func:`numpy.put` + """ + a.put(ind, v, mode=mode) + + +_putmask_kernel = _core.ElementwiseKernel( + 'Q mask, raw S values, uint64 len_vals', 'T out', + ''' + if (mask) out = (T) values[i % len_vals]; + ''', + 'cupy_putmask_kernel' +) + + +def putmask(a, mask, values): + """ + Changes elements of an array inplace, based on a conditional mask and + input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + If `values` is not the same size as `a` and `mask` then it will repeat. + + Args: + a (cupy.ndarray): Target array. + mask (cupy.ndarray): Boolean mask array. It has to be + the same shape as `a`. + values (cupy.ndarray or scalar): Values to put into `a` where `mask` + is True. If `values` is smaller than `a`, then it will be + repeated. + + Examples + -------- + >>> x = cupy.arange(6).reshape(2, 3) + >>> cupy.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = cupy.arange(6) + >>> cupy.putmask(x, x>2, cupy.array([-33, -44])) + >>> x + array([ 0, 1, 2, -44, -33, -44]) + + .. seealso:: :func:`numpy.putmask` + + """ + + if not isinstance(a, cupy.ndarray): + raise TypeError('`a` should be of type cupy.ndarray') + if not isinstance(mask, cupy.ndarray): + raise TypeError('`mask` should be of type cupy.ndarray') + if not (cupy.isscalar(values) or isinstance(values, cupy.ndarray)): + raise TypeError('`values` should be of type cupy.ndarray') + + if not a.shape == mask.shape: + raise ValueError('mask and data must be the same size') + + mask = mask.astype(numpy.bool_) + + if cupy.isscalar(values): + a[mask] = values + + elif not numpy.can_cast(values.dtype, a.dtype): + raise TypeError('Cannot cast array data from' + ' {} to {} according to the rule \'safe\'' + .format(values.dtype, a.dtype)) + + elif a.shape == values.shape: + a[mask] = values[mask] + + else: + values = values.ravel() + _putmask_kernel(mask, values, len(values), a) + + +def fill_diagonal(a, val, wrap=False): + """Fills the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim > 2``, the diagonal is the list of + locations with indices ``a[i, i, ..., i]`` all identical. This function + modifies the input array in-place, it does not return a value. + + Args: + a (cupy.ndarray): The array, at least 2-D. + val (scalar): The value to be written on the diagonal. + Its type must be compatible with that of the array a. + wrap (bool): If specified, the diagonal is "wrapped" after N columns. + This affects only tall matrices. + + Examples + -------- + >>> a = cupy.zeros((3, 3), int) + >>> cupy.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + .. seealso:: :func:`numpy.fill_diagonal` + """ + # The following are imported from the original numpy + if a.ndim < 2: + raise ValueError('array must be at least 2-d') + end = None + if a.ndim == 2: + step = a.shape[1] + 1 + if not wrap: + end = a.shape[1] * a.shape[1] + else: + if not numpy.all(numpy.diff(a.shape) == 0): + raise ValueError('All dimensions of input must be of equal length') + step = 1 + numpy.cumprod(a.shape[:-1]).sum() + + a.flat[:end:step] = val + + +def diag_indices(n, ndim=2): + """Return the indices to access the main diagonal of an array. + + Returns a tuple of indices that can be used to access the main + diagonal of an array with ``ndim >= 2`` dimensions and shape + (n, n, ..., n). + + Args: + n (int): The size, along each dimension of the arrays for which + the indices are to be returned. + ndim (int): The number of dimensions. default `2`. + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = cupy.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = cupy.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Create indices to manipulate a 3-D array: + + >>> d3 = cupy.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = cupy.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + + [[0, 0], + [0, 1]]]) + + .. seealso:: :func:`numpy.diag_indices` + + """ + idx = cupy.arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + See `diag_indices` for full details. + + Args: + arr (cupy.ndarray): At least 2-D. + + .. seealso:: :func:`numpy.diag_indices_from` + + """ + if not isinstance(arr, cupy.ndarray): + raise TypeError("Argument must be cupy.ndarray") + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not cupy.all(cupy.diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/vllm/lib/python3.10/site-packages/cupy/_indexing/iterate.py b/vllm/lib/python3.10/site-packages/cupy/_indexing/iterate.py new file mode 100644 index 0000000000000000000000000000000000000000..d8fbddc8921c8232136c0359773c1dcddbe5ab5d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_indexing/iterate.py @@ -0,0 +1,155 @@ +import numpy + +import cupy +from cupy import _core +from cupy._core import internal + + +class flatiter: + """Flat iterator object to iterate over arrays. + + A flatiter iterator is returned by ``x.flat`` for any array ``x``. It + allows iterating over the array as if it were a 1-D array, either in a + for-loop or by calling its ``next`` method. + + Iteration is done in row-major, C-style order (the last index varying the + fastest). + + Attributes: + base (cupy.ndarray): A reference to the array that is iterated over. + + .. note:: + Restricted support of basic slicing is currently supplied. Advanced + indexing is not supported yet. + + .. seealso:: :func:`numpy.flatiter` + + """ + + def __init__(self, a): + self._base = a + self._index = 0 + + def __setitem__(self, ind, value): + if ind is Ellipsis: + self[:] = value + return + + if isinstance(ind, tuple): + raise IndexError('unsupported iterator index') + + if isinstance(ind, bool): + raise IndexError('unsupported iterator index') + + if numpy.isscalar(ind): + ind = int(ind) + base = self._base + size = base.size + indices = [] + for s in base.shape: + size = size // s + indices.append(ind // size) + ind %= size + base[tuple(indices)] = value + return + + if isinstance(ind, slice): + base = self._base + s = internal.complete_slice(ind, base.size) + s_start = s.start + s_step = s.step + size = s.stop - s.start + if s_step > 0: + size = (size - 1) // s_step + 1 + else: + size = (size + 1) // s_step + 1 + value = cupy.asarray(value, dtype=base.dtype) + _flatiter_setitem_slice(value, s_start, s_step, base, size=size) + return + + raise IndexError('unsupported iterator index') + + def __getitem__(self, ind): + if ind is Ellipsis: + return self[:] + + if isinstance(ind, tuple): + raise IndexError('unsupported iterator index') + + if isinstance(ind, bool): + raise IndexError('unsupported iterator index') + + if numpy.isscalar(ind): + ind = int(ind) + base = self._base + size = base.size + indices = [] + for s in base.shape: + size = size // s + indices.append(ind // size) + ind %= size + return base[tuple(indices)].copy() + + if isinstance(ind, slice): + base = self._base + s = internal.complete_slice(ind, base.size) + s_start = s.start + s_step = s.step + size = s.stop - s.start + if s_step > 0: + size = (size - 1) // s_step + 1 + else: + size = (size + 1) // s_step + 1 + return _flatiter_getitem_slice(base, s_start, s_step, size=size) + + raise IndexError('unsupported iterator index') + + def __iter__(self): + return self + + def __next__(self): + index = self._index + if index == len(self): + raise StopIteration() + self._index += 1 + return self[index] + + def copy(self): + """Get a copy of the iterator as a 1-D array.""" + return self.base.flatten() + + @property + def base(self): + """A reference to the array that is iterated over.""" + return self._base + + # TODO(Takagi): Implement coords + + # TODO(Takagi): Implement index + + # TODO(Takagi): Implement __lt__ + + # TODO(Takagi): Implement __le__ + + # TODO(Takagi): Implement __eq__ + + # TODO(Takagi): Implement __ne__ + + # TODO(Takagi): Implement __ge__ + + # TODO(Takagi): Implement __gt__ + + def __len__(self): + return self.base.size + + +_flatiter_setitem_slice = _core.ElementwiseKernel( + 'raw T val, int64 start, int64 step', 'raw T a', + 'a[start + i * step] = val[i % val.size()]', + 'cupy_flatiter_setitem_slice') + + +_flatiter_getitem_slice = _core.ElementwiseKernel( + 'raw T a, int64 start, int64 step', 'T o', + 'o = a[start + i * step]', + 'cupy_flatiter_getitem_slice') diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/__init__.py b/vllm/lib/python3.10/site-packages/cupy/array_api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c5d6896d65f93facb64df72a54d01a2ab8f94b4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/__init__.py @@ -0,0 +1,388 @@ +""" +A NumPy sub-namespace that conforms to the Python array API standard. + +This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It +is still considered experimental, and will issue a warning when imported. + +This is a proof-of-concept namespace that wraps the corresponding NumPy +functions to give a conforming implementation of the Python array API standard +(https://data-apis.github.io/array-api/latest/). The standard is currently in +an RFC phase and comments on it are both welcome and encouraged. Comments +should be made either at https://github.com/data-apis/array-api or at +https://github.com/data-apis/consortium-feedback/discussions. + +NumPy already follows the proposed spec for the most part, so this module +serves mostly as a thin wrapper around it. However, NumPy also implements a +lot of behavior that is not included in the spec, so this serves as a +restricted subset of the API. Only those functions that are part of the spec +are included in this namespace, and all functions are given with the exact +signature given in the spec, including the use of position-only arguments, and +omitting any extra keyword arguments implemented by NumPy but not part of the +spec. The behavior of some functions is also modified from the NumPy behavior +to conform to the standard. Note that the underlying array object itself is +wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule +is implemented in pure Python with no C extensions. + +The array API spec is designed as a "minimal API subset" and explicitly allows +libraries to include behaviors not specified by it. But users of this module +that intend to write portable code should be aware that only those behaviors +that are listed in the spec are guaranteed to be implemented across libraries. +Consequently, the NumPy implementation was chosen to be both conforming and +minimal, so that users can use this implementation of the array API namespace +and be sure that behaviors that it defines will be available in conforming +namespaces from other libraries. + +A few notes about the current state of this submodule: + +- There is a test suite that tests modules against the array API standard at + https://github.com/data-apis/array-api-tests. The test suite is still a work + in progress, but the existing tests pass on this module, with a few + exceptions: + + - DLPack support (see https://github.com/data-apis/array-api/pull/106) is + not included here, as it requires a full implementation in NumPy proper + first. + + The test suite is not yet complete, and even the tests that exist are not + guaranteed to give a comprehensive coverage of the spec. Therefore, when + reviewing and using this submodule, you should refer to the standard + documents themselves. There are some tests in numpy.array_api.tests, but + they primarily focus on things that are not tested by the official array API + test suite. + +- There is a custom array object, numpy.array_api.Array, which is returned by + all functions in this module. All functions in the array API namespace + implicitly assume that they will only receive this object as input. The only + way to create instances of this object is to use one of the array creation + functions. It does not have a public constructor on the object itself. The + object is a small wrapper class around numpy.ndarray. The main purpose of it + is to restrict the namespace of the array object to only those dtypes and + only those methods that are required by the spec, as well as to limit/change + certain behavior that differs in the spec. In particular: + + - The array API namespace does not have scalar objects, only 0-D arrays. + Operations on Array that would create a scalar in NumPy create a 0-D + array. + + - Indexing: Only a subset of indices supported by NumPy are required by the + spec. The Array object restricts indexing to only allow those types of + indices that are required by the spec. See the docstring of the + numpy.array_api.Array._validate_indices helper function for more + information. + + - Type promotion: Some type promotion rules are different in the spec. In + particular, the spec does not have any value-based casting. The spec also + does not require cross-kind casting, like integer -> floating-point. Only + those promotions that are explicitly required by the array API + specification are allowed in this module. See NEP 47 for more info. + + - Functions do not automatically call asarray() on their input, and will not + work if the input type is not Array. The exception is array creation + functions, and Python operators on the Array object, which accept Python + scalars of the same type as the array dtype. + +- All functions include type annotations, corresponding to those given in the + spec (see _typing.py for definitions of some custom types). These do not + currently fully pass mypy due to some limitations in mypy. + +- Dtype objects are just the NumPy dtype objects, e.g., float64 = + np.dtype('float64'). The spec does not require any behavior on these dtype + objects other than that they be accessible by name and be comparable by + equality, but it was considered too much extra complexity to create custom + objects to represent dtypes. + +- All places where the implementations in this submodule are known to deviate + from their corresponding functions in NumPy are marked with "# Note:" + comments. + +Still TODO in this module are: + +- DLPack support for numpy.ndarray is still in progress. See + https://github.com/numpy/numpy/pull/19083. + +- The copy=False keyword argument to asarray() is not yet implemented. This + requires support in numpy.asarray() first. + +- Some functions are not yet fully tested in the array API test suite, and may + require updates that are not yet known until the tests are written. + +- The spec is still in an RFC phase and may still have minor updates, which + will need to be reflected here. + +- Complex number support in array API spec is planned but not yet finalized, + as are the fft extension and certain linear algebra functions such as eig + that require complex dtypes. + +""" + +# CuPy-specific: still need to support Python 3.7. +import sys + +if sys.version_info < (3, 8): + raise RuntimeError('cupy.array_api requires Python 3.8+') + + +import warnings + +warnings.warn( + "The cupy.array_api submodule is still experimental. See NEP 47.", stacklevel=2 +) + +__array_api_version__ = "2021.12" + +__all__ = ["__array_api_version__"] + +from ._constants import e, inf, nan, pi + +__all__ += ["e", "inf", "nan", "pi"] + +from ._creation_functions import ( + asarray, + arange, + empty, + empty_like, + eye, + from_dlpack, + full, + full_like, + linspace, + meshgrid, + ones, + ones_like, + tril, + triu, + zeros, + zeros_like, +) + +__all__ += [ + "asarray", + "arange", + "empty", + "empty_like", + "eye", + "from_dlpack", + "full", + "full_like", + "linspace", + "meshgrid", + "ones", + "ones_like", + "tril", + "triu", + "zeros", + "zeros_like", +] + +from ._data_type_functions import ( + astype, + broadcast_arrays, + broadcast_to, + can_cast, + finfo, + iinfo, + result_type, +) + +__all__ += [ + "astype", + "broadcast_arrays", + "broadcast_to", + "can_cast", + "finfo", + "iinfo", + "result_type", +] + +from ._dtypes import ( + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + bool, +) + +__all__ += [ + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "float32", + "float64", + "bool", +] + +from ._elementwise_functions import ( + abs, + acos, + acosh, + add, + asin, + asinh, + atan, + atan2, + atanh, + bitwise_and, + bitwise_left_shift, + bitwise_invert, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + ceil, + cos, + cosh, + divide, + equal, + exp, + expm1, + floor, + floor_divide, + greater, + greater_equal, + isfinite, + isinf, + isnan, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logical_and, + logical_not, + logical_or, + logical_xor, + multiply, + negative, + not_equal, + positive, + pow, + remainder, + round, + sign, + sin, + sinh, + square, + sqrt, + subtract, + tan, + tanh, + trunc, +) + +__all__ += [ + "abs", + "acos", + "acosh", + "add", + "asin", + "asinh", + "atan", + "atan2", + "atanh", + "bitwise_and", + "bitwise_left_shift", + "bitwise_invert", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "ceil", + "cos", + "cosh", + "divide", + "equal", + "exp", + "expm1", + "floor", + "floor_divide", + "greater", + "greater_equal", + "isfinite", + "isinf", + "isnan", + "less", + "less_equal", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "multiply", + "negative", + "not_equal", + "positive", + "pow", + "remainder", + "round", + "sign", + "sin", + "sinh", + "square", + "sqrt", + "subtract", + "tan", + "tanh", + "trunc", +] + +from ._indexing_functions import take + +__all__ += ["take"] + +# linalg is an extension in the array API spec, which is a sub-namespace. Only +# a subset of functions in it are imported into the top-level namespace. +from . import linalg + +__all__ += ["linalg"] + +from .linalg import matmul, tensordot, matrix_transpose, vecdot + +__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"] + +from ._manipulation_functions import ( + concat, + expand_dims, + flip, + permute_dims, + reshape, + roll, + squeeze, + stack, +) + +__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"] + +from ._searching_functions import argmax, argmin, nonzero, where + +__all__ += ["argmax", "argmin", "nonzero", "where"] + +from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values + +__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"] + +from ._sorting_functions import argsort, sort + +__all__ += ["argsort", "sort"] + +from ._statistical_functions import max, mean, min, prod, std, sum, var + +__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"] + +from ._utility_functions import all, any + +__all__ += ["all", "any"] diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_set_functions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_set_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0c342cd61b926a6f668845d0c5aada90996f20e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_set_functions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_typing.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e107121419e2d31e62394e52ecb7c2e4d6af40b0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/array_api/__pycache__/_typing.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_array_object.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_array_object.py new file mode 100644 index 0000000000000000000000000000000000000000..25a12bf47ff34685c1dd12c761bd8f7aae8b094d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_array_object.py @@ -0,0 +1,1138 @@ +""" +Wrapper class around the ndarray object for the array API standard. + +The array API standard defines some behaviors differently than ndarray, in +particular, type promotion rules are different (the standard has no +value-based casting). The standard also specifies a more limited subset of +array methods and functionalities than are implemented on ndarray. Since the +goal of the array_api namespace is to be a minimal implementation of the array +API standard, we need to define a separate wrapper class for the array_api +namespace. + +The standard compliant class is only a wrapper class. It is *not* a subclass +of ndarray. +""" + +from __future__ import annotations + +import operator +from enum import IntEnum +from ._creation_functions import asarray +from ._dtypes import ( + _all_dtypes, + _boolean_dtypes, + _integer_dtypes, + _integer_or_boolean_dtypes, + _floating_dtypes, + _numeric_dtypes, + _result_type, + _dtype_categories, +) + +from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex +import types + +if TYPE_CHECKING: + from ._typing import Any, PyCapsule, Device, Dtype + import numpy.typing as npt + +import cupy as np +from cupy.cuda import Device as _Device +from cupy.cuda import stream as stream_module +from cupy_backends.cuda.api import runtime + +from cupy import array_api + + +class Array: + """ + n-d array object for the array API namespace. + + See the docstring of :py:obj:`np.ndarray ` for more + information. + + This is a wrapper around numpy.ndarray that restricts the usage to only + those things that are required by the array API namespace. Note, + attributes on this object that start with a single underscore are not part + of the API specification and should only be used internally. This object + should not be constructed directly. Rather, use one of the creation + functions, such as asarray(). + + """ + _array: np.ndarray + + # Use a custom constructor instead of __init__, as manually initializing + # this class is not supported API. + @classmethod + def _new(cls, x: Union[np.ndarray, np.generic], /) -> Array: + """ + This is a private method for initializing the array API Array + object. + + Functions outside of the array_api submodule should not use this + method. Use one of the creation functions instead, such as + ``asarray``. + + """ + obj = super().__new__(cls) + # Note: The spec does not have array scalars, only 0-D arrays. + if isinstance(x, np.generic): + # Convert the array scalar to a 0-D array + x = np.asarray(x) + if x.dtype not in _all_dtypes: + raise TypeError( + f"The array_api namespace does not support the dtype '{x.dtype}'" + ) + obj._array = x + return obj + + # Prevent Array() from working + def __new__(cls, *args, **kwargs): + raise TypeError( + "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead." + ) + + # These functions are not required by the spec, but are implemented for + # the sake of usability. + + def __str__(self: Array, /) -> str: + """ + Performs the operation __str__. + """ + return self._array.__str__().replace("array", "Array") + + def __repr__(self: Array, /) -> str: + """ + Performs the operation __repr__. + """ + suffix = f", dtype={self.dtype.name})" + if 0 in self.shape: + prefix = "empty(" + mid = str(self.shape) + else: + prefix = "Array(" + mid = np.array2string(np.asnumpy(self._array), separator=', ', prefix=prefix, suffix=suffix) + return prefix + mid + suffix + + def __cupy_get_ndarray__(self): + return self._array + + # These are various helper functions to make the array behavior match the + # spec in places where it either deviates from or is more strict than + # NumPy behavior + + def _check_allowed_dtypes(self, other: Union[bool, int, float, Array], dtype_category: str, op: str) -> Array: + """ + Helper function for operators to only allow specific input dtypes + + Use like + + other = self._check_allowed_dtypes(other, 'numeric', '__add__') + if other is NotImplemented: + return other + """ + + if self.dtype not in _dtype_categories[dtype_category]: + raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}") + if isinstance(other, (int, float, bool)): + other = self._promote_scalar(other) + elif isinstance(other, Array): + if other.dtype not in _dtype_categories[dtype_category]: + raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}") + else: + return NotImplemented + + # This will raise TypeError for type combinations that are not allowed + # to promote in the spec (even if the NumPy array operator would + # promote them). + res_dtype = _result_type(self.dtype, other.dtype) + if op.startswith("__i"): + # Note: NumPy will allow in-place operators in some cases where + # the type promoted operator does not match the left-hand side + # operand. For example, + + # >>> a = np.array(1, dtype=np.int8) + # >>> a += np.array(1, dtype=np.int16) + + # The spec explicitly disallows this. + if res_dtype != self.dtype: + raise TypeError( + f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}" + ) + + return other + + # Helper function to match the type promotion rules in the spec + def _promote_scalar(self, scalar: Union[bool, int, float]) -> Array: + """ + Returns a promoted version of a Python scalar appropriate for use with + operations on self. + + This may raise an OverflowError in cases where the scalar is an + integer that is too large to fit in a NumPy integer dtype, or + TypeError when the scalar type is incompatible with the dtype of self. + """ + # Note: Only Python scalar types that match the array dtype are + # allowed. + if isinstance(scalar, bool): + if self.dtype not in _boolean_dtypes: + raise TypeError( + "Python bool scalars can only be promoted with bool arrays" + ) + elif isinstance(scalar, int): + if self.dtype in _boolean_dtypes: + raise TypeError( + "Python int scalars cannot be promoted with bool arrays" + ) + elif isinstance(scalar, float): + if self.dtype not in _floating_dtypes: + raise TypeError( + "Python float scalars can only be promoted with floating-point arrays." + ) + else: + raise TypeError("'scalar' must be a Python scalar") + + # Note: scalars are unconditionally cast to the same dtype as the + # array. + + # Note: the spec only specifies integer-dtype/int promotion + # behavior for integers within the bounds of the integer dtype. + # Outside of those bounds we use the default NumPy behavior (either + # cast or raise OverflowError). + return Array._new(np.array(scalar, self.dtype)) + + @staticmethod + def _normalize_two_args(x1, x2) -> Tuple[Array, Array]: + """ + Normalize inputs to two arg functions to fix type promotion rules + + NumPy deviates from the spec type promotion rules in cases where one + argument is 0-dimensional and the other is not. For example: + + >>> import numpy as np + >>> a = np.array([1.0], dtype=np.float32) + >>> b = np.array(1.0, dtype=np.float64) + >>> np.add(a, b) # The spec says this should be float64 + array([2.], dtype=float32) + + To fix this, we add a dimension to the 0-dimension array before passing it + through. This works because a dimension would be added anyway from + broadcasting, so the resulting shape is the same, but this prevents NumPy + from not promoting the dtype. + """ + # Another option would be to use signature=(x1.dtype, x2.dtype, None), + # but that only works for ufuncs, so we would have to call the ufuncs + # directly in the operator methods. One should also note that this + # sort of trick wouldn't work for functions like searchsorted, which + # don't do normal broadcasting, but there aren't any functions like + # that in the array API namespace. + if x1.ndim == 0 and x2.ndim != 0: + # The _array[None] workaround was chosen because it is relatively + # performant. broadcast_to(x1._array, x2.shape) is much slower. We + # could also manually type promote x2, but that is more complicated + # and about the same performance as this. + x1 = Array._new(x1._array[None]) + elif x2.ndim == 0 and x1.ndim != 0: + x2 = Array._new(x2._array[None]) + return (x1, x2) + + # Note: A large fraction of allowed indices are disallowed here (see the + # docstring below) + def _validate_index(self, key): + """ + Validate an index according to the array API. + + The array API specification only requires a subset of indices that are + supported by NumPy. This function will reject any index that is + allowed by NumPy but not required by the array API specification. We + always raise ``IndexError`` on such indices (the spec does not require + any specific behavior on them, but this makes the NumPy array API + namespace a minimal implementation of the spec). See + https://data-apis.org/array-api/latest/API_specification/indexing.html + for the full list of required indexing behavior + + This function raises IndexError if the index ``key`` is invalid. It + only raises ``IndexError`` on indices that are not already rejected by + NumPy, as NumPy will already raise the appropriate error on such + indices. ``shape`` may be None, in which case, only cases that are + independent of the array shape are checked. + + The following cases are allowed by NumPy, but not specified by the array + API specification: + + - Indices to not include an implicit ellipsis at the end. That is, + every axis of an array must be explicitly indexed or an ellipsis + included. This behaviour is sometimes referred to as flat indexing. + + - The start and stop of a slice may not be out of bounds. In + particular, for a slice ``i:j:k`` on an axis of size ``n``, only the + following are allowed: + + - ``i`` or ``j`` omitted (``None``). + - ``-n <= i <= max(0, n - 1)``. + - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``. + - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``. + + - Boolean array indices are not allowed as part of a larger tuple + index. + + - Integer array indices are not allowed (with the exception of 0-D + arrays, which are treated the same as scalars). + + Additionally, it should be noted that indices that would return a + scalar in NumPy will return a 0-D array. Array scalars are not allowed + in the specification, only 0-D arrays. This is done in the + ``Array._new`` constructor, not this function. + + """ + _key = key if isinstance(key, tuple) else (key,) + for i in _key: + if isinstance(i, bool) or not ( + isinstance(i, SupportsIndex) # i.e. ints + or isinstance(i, Array) + or isinstance(i, np.ndarray) + or isinstance(i, slice) + or i == Ellipsis + or i is None + ): + raise IndexError( + f"Single-axes index {i} has {type(i)=}, but only " + "integers, slices (:), ellipsis (...), newaxis (None), " + "zero-dimensional integer arrays and boolean arrays " + "are specified in the Array API." + ) + + nonexpanding_key = [] + single_axes = [] + n_ellipsis = 0 + key_has_mask = False + for i in _key: + if i is not None: + nonexpanding_key.append(i) + if isinstance(i, Array) or isinstance(i, np.ndarray): + if i.dtype in _boolean_dtypes: + key_has_mask = True + single_axes.append(i) + else: + # i must not be an array here, to avoid elementwise equals + if i == Ellipsis: + n_ellipsis += 1 + else: + single_axes.append(i) + + n_single_axes = len(single_axes) + if n_ellipsis > 1: + return # handled by ndarray + elif n_ellipsis == 0: + # Note boolean masks must be the sole index, which we check for + # later on. + if not key_has_mask and n_single_axes < self.ndim: + raise IndexError( + f"{self.ndim=}, but the multi-axes index only specifies " + f"{n_single_axes} dimensions. If this was intentional, " + "add a trailing ellipsis (...) which expands into as many " + "slices (:) as necessary - this is what np.ndarray arrays " + "implicitly do, but such flat indexing behaviour is not " + "specified in the Array API." + ) + + if n_ellipsis == 0: + indexed_shape = self.shape + else: + ellipsis_start = None + for pos, i in enumerate(nonexpanding_key): + if not (isinstance(i, Array) or isinstance(i, np.ndarray)): + if i == Ellipsis: + ellipsis_start = pos + break + assert ellipsis_start is not None # sanity check + ellipsis_end = self.ndim - (n_single_axes - ellipsis_start) + indexed_shape = ( + self.shape[:ellipsis_start] + self.shape[ellipsis_end:] + ) + for i, side in zip(single_axes, indexed_shape): + if isinstance(i, slice): + if side == 0: + f_range = "0 (or None)" + else: + f_range = f"between -{side} and {side - 1} (or None)" + if i.start is not None: + try: + start = operator.index(i.start) + except TypeError: + pass # handled by ndarray + else: + if not (-side <= start <= side): + raise IndexError( + f"Slice {i} contains {start=}, but should be " + f"{f_range} for an axis of size {side} " + "(out-of-bounds starts are not specified in " + "the Array API)" + ) + if i.stop is not None: + try: + stop = operator.index(i.stop) + except TypeError: + pass # handled by ndarray + else: + if not (-side <= stop <= side): + raise IndexError( + f"Slice {i} contains {stop=}, but should be " + f"{f_range} for an axis of size {side} " + "(out-of-bounds stops are not specified in " + "the Array API)" + ) + elif isinstance(i, Array): + if i.dtype in _boolean_dtypes and len(_key) != 1: + assert isinstance(key, tuple) # sanity check + raise IndexError( + f"Single-axes index {i} is a boolean array and " + f"{len(key)=}, but masking is only specified in the " + "Array API when the array is the sole index." + ) + elif i.dtype in _integer_dtypes and i.ndim != 0: + raise IndexError( + f"Single-axes index {i} is a non-zero-dimensional " + "integer array, but advanced integer indexing is not " + "specified in the Array API." + ) + elif isinstance(i, tuple): + raise IndexError( + f"Single-axes index {i} is a tuple, but nested tuple " + "indices are not specified in the Array API." + ) + + # Everything below this line is required by the spec. + + def __abs__(self: Array, /) -> Array: + """ + Performs the operation __abs__. + """ + if self.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in __abs__") + res = self._array.__abs__() + return self.__class__._new(res) + + def __add__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __add__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__add__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__add__(other._array) + return self.__class__._new(res) + + def __and__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __and__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__and__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__and__(other._array) + return self.__class__._new(res) + + def __array_namespace__( + self: Array, /, *, api_version: Optional[str] = None + ) -> types.ModuleType: + if api_version is not None and not api_version.startswith("2021."): + raise ValueError(f"Unrecognized array API version: {api_version!r}") + return array_api + + def __bool__(self: Array, /) -> bool: + """ + Performs the operation __bool__. + """ + # Note: This is an error here. + if self._array.ndim != 0: + raise TypeError("bool is only allowed on arrays with 0 dimensions") + if self.dtype not in _boolean_dtypes: + raise ValueError("bool is only allowed on boolean arrays") + res = self._array.__bool__() + return res + + def __dlpack__(self: Array, /, *, stream=None) -> PyCapsule: + """ + Performs the operation __dlpack__. + """ + return self._array.__dlpack__(stream=stream) + + def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]: + """ + Performs the operation __dlpack_device__. + """ + return self._array.__dlpack_device__() + + def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array: # type: ignore + """ + Performs the operation __eq__. + """ + # Even though "all" dtypes are allowed, we still require them to be + # promotable with each other. + other = self._check_allowed_dtypes(other, "all", "__eq__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__eq__(other._array) + return self.__class__._new(res) + + def __float__(self: Array, /) -> float: + """ + Performs the operation __float__. + """ + # Note: This is an error here. + if self._array.ndim != 0: + raise TypeError("float is only allowed on arrays with 0 dimensions") + if self.dtype not in _floating_dtypes: + raise ValueError("float is only allowed on floating-point arrays") + res = self._array.__float__() + return res + + def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __floordiv__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__floordiv__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__floordiv__(other._array) + return self.__class__._new(res) + + def __ge__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __ge__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__ge__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__ge__(other._array) + return self.__class__._new(res) + + def __getitem__( + self: Array, + key: Union[ + int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array + ], + /, + ) -> Array: + """ + Performs the operation __getitem__. + """ + # Note: Only indices required by the spec are allowed. See the + # docstring of _validate_index + self._validate_index(key) + if isinstance(key, Array): + # Indexing self._array with array_api arrays can be erroneous + key = key._array + res = self._array.__getitem__(key) + return self._new(res) + + def __gt__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __gt__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__gt__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__gt__(other._array) + return self.__class__._new(res) + + def __int__(self: Array, /) -> int: + """ + Performs the operation __int__. + """ + # Note: This is an error here. + if self._array.ndim != 0: + raise TypeError("int is only allowed on arrays with 0 dimensions") + if self.dtype not in _integer_dtypes: + raise ValueError("int is only allowed on integer arrays") + res = self._array.__int__() + return res + + def __index__(self: Array, /) -> int: + """ + Performs the operation __index__. + """ + # TODO(leofang): just do this when CuPy is ready: + # res = self._array.__index__() + if self.ndim != 0 or self.dtype not in _integer_dtypes: + raise TypeError("only integer scalar arrays can be converted to a scalar index") + return int(self._array) + + def __invert__(self: Array, /) -> Array: + """ + Performs the operation __invert__. + """ + if self.dtype not in _integer_or_boolean_dtypes: + raise TypeError("Only integer or boolean dtypes are allowed in __invert__") + res = self._array.__invert__() + return self.__class__._new(res) + + def __le__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __le__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__le__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__le__(other._array) + return self.__class__._new(res) + + def __lshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __lshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__lshift__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__lshift__(other._array) + return self.__class__._new(res) + + def __lt__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __lt__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__lt__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__lt__(other._array) + return self.__class__._new(res) + + def __matmul__(self: Array, other: Array, /) -> Array: + """ + Performs the operation __matmul__. + """ + # matmul is not defined for scalars, but without this, we may get + # the wrong error message from asarray. + other = self._check_allowed_dtypes(other, "numeric", "__matmul__") + if other is NotImplemented: + return other + res = self._array.__matmul__(other._array) + return self.__class__._new(res) + + def __mod__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __mod__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__mod__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__mod__(other._array) + return self.__class__._new(res) + + def __mul__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __mul__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__mul__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__mul__(other._array) + return self.__class__._new(res) + + def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array: # type: ignore + """ + Performs the operation __ne__. + """ + other = self._check_allowed_dtypes(other, "all", "__ne__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__ne__(other._array) + return self.__class__._new(res) + + def __neg__(self: Array, /) -> Array: + """ + Performs the operation __neg__. + """ + if self.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in __neg__") + res = self._array.__neg__() + return self.__class__._new(res) + + def __or__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __or__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__or__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__or__(other._array) + return self.__class__._new(res) + + def __pos__(self: Array, /) -> Array: + """ + Performs the operation __pos__. + """ + if self.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in __pos__") + res = self._array.__pos__() + return self.__class__._new(res) + + # PEP 484 requires int to be a subtype of float, but __pow__ should not + # accept int. + def __pow__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __pow__. + """ + from ._elementwise_functions import pow + + other = self._check_allowed_dtypes(other, "numeric", "__pow__") + if other is NotImplemented: + return other + # Note: NumPy's __pow__ does not follow type promotion rules for 0-d + # arrays, so we use pow() here instead. + return pow(self, other) + + def __rshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __rshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__rshift__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rshift__(other._array) + return self.__class__._new(res) + + def __setitem__( + self, + key: Union[ + int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array + ], + value: Union[int, float, bool, Array], + /, + ) -> None: + """ + Performs the operation __setitem__. + """ + # Note: Only indices required by the spec are allowed. See the + # docstring of _validate_index + self._validate_index(key) + if isinstance(key, Array): + # Indexing self._array with array_api arrays can be erroneous + key = key._array + self._array.__setitem__(key, asarray(value)._array) + + def __sub__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __sub__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__sub__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__sub__(other._array) + return self.__class__._new(res) + + # PEP 484 requires int to be a subtype of float, but __truediv__ should + # not accept int. + def __truediv__(self: Array, other: Union[float, Array], /) -> Array: + """ + Performs the operation __truediv__. + """ + other = self._check_allowed_dtypes(other, "floating-point", "__truediv__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__truediv__(other._array) + return self.__class__._new(res) + + def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __xor__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__xor__(other._array) + return self.__class__._new(res) + + def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __iadd__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__iadd__") + if other is NotImplemented: + return other + self._array.__iadd__(other._array) + return self + + def __radd__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __radd__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__radd__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__radd__(other._array) + return self.__class__._new(res) + + def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __iand__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__") + if other is NotImplemented: + return other + self._array.__iand__(other._array) + return self + + def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __rand__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rand__(other._array) + return self.__class__._new(res) + + def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __ifloordiv__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__") + if other is NotImplemented: + return other + self._array.__ifloordiv__(other._array) + return self + + def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __rfloordiv__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rfloordiv__(other._array) + return self.__class__._new(res) + + def __ilshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __ilshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__ilshift__") + if other is NotImplemented: + return other + self._array.__ilshift__(other._array) + return self + + def __rlshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __rlshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__rlshift__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rlshift__(other._array) + return self.__class__._new(res) + + def __imatmul__(self: Array, other: Array, /) -> Array: + """ + Performs the operation __imatmul__. + """ + # Note: NumPy does not implement __imatmul__. + + # matmul is not defined for scalars, but without this, we may get + # the wrong error message from asarray. + other = self._check_allowed_dtypes(other, "numeric", "__imatmul__") + if other is NotImplemented: + return other + + # __imatmul__ can only be allowed when it would not change the shape + # of self. + other_shape = other.shape + if self.shape == () or other_shape == (): + raise ValueError("@= requires at least one dimension") + if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]: + raise ValueError("@= cannot change the shape of the input array") + self._array[:] = self._array.__matmul__(other._array) + return self + + def __rmatmul__(self: Array, other: Array, /) -> Array: + """ + Performs the operation __rmatmul__. + """ + # matmul is not defined for scalars, but without this, we may get + # the wrong error message from asarray. + other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__") + if other is NotImplemented: + return other + res = self._array.__rmatmul__(other._array) + return self.__class__._new(res) + + def __imod__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __imod__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__imod__") + if other is NotImplemented: + return other + self._array.__imod__(other._array) + return self + + def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __rmod__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__rmod__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rmod__(other._array) + return self.__class__._new(res) + + def __imul__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __imul__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__imul__") + if other is NotImplemented: + return other + self._array.__imul__(other._array) + return self + + def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __rmul__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__rmul__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rmul__(other._array) + return self.__class__._new(res) + + def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __ior__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__") + if other is NotImplemented: + return other + self._array.__ior__(other._array) + return self + + def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __ror__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__ror__(other._array) + return self.__class__._new(res) + + def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __ipow__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__ipow__") + if other is NotImplemented: + return other + self._array.__ipow__(other._array) + return self + + def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __rpow__. + """ + from ._elementwise_functions import pow + + other = self._check_allowed_dtypes(other, "numeric", "__rpow__") + if other is NotImplemented: + return other + # Note: NumPy's __pow__ does not follow the spec type promotion rules + # for 0-d arrays, so we use pow() here instead. + return pow(other, self) + + def __irshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __irshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__irshift__") + if other is NotImplemented: + return other + self._array.__irshift__(other._array) + return self + + def __rrshift__(self: Array, other: Union[int, Array], /) -> Array: + """ + Performs the operation __rrshift__. + """ + other = self._check_allowed_dtypes(other, "integer", "__rrshift__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rrshift__(other._array) + return self.__class__._new(res) + + def __isub__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __isub__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__isub__") + if other is NotImplemented: + return other + self._array.__isub__(other._array) + return self + + def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array: + """ + Performs the operation __rsub__. + """ + other = self._check_allowed_dtypes(other, "numeric", "__rsub__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rsub__(other._array) + return self.__class__._new(res) + + def __itruediv__(self: Array, other: Union[float, Array], /) -> Array: + """ + Performs the operation __itruediv__. + """ + other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__") + if other is NotImplemented: + return other + self._array.__itruediv__(other._array) + return self + + def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array: + """ + Performs the operation __rtruediv__. + """ + other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rtruediv__(other._array) + return self.__class__._new(res) + + def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __ixor__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__") + if other is NotImplemented: + return other + self._array.__ixor__(other._array) + return self + + def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array: + """ + Performs the operation __rxor__. + """ + other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__") + if other is NotImplemented: + return other + self, other = self._normalize_two_args(self, other) + res = self._array.__rxor__(other._array) + return self.__class__._new(res) + + def to_device(self: Array, device: Device, /, stream=None) -> Array: + if device == self.device: + return self + elif not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + else: + # see cupy/cupy#5985 for the reason how we handle device/stream here + prev_device = runtime.getDevice() + prev_stream: stream_module.Stream = None + if stream is not None: + prev_stream = stream_module.get_current_stream() + # stream can be an int as specified in __dlpack__, or a CuPy stream + if isinstance(stream, int): + stream = np.cuda.ExternalStream(stream) + elif isinstance(stream, np.cuda.Stream): + pass + else: + raise ValueError('the input stream is not recognized') + stream.use() + try: + runtime.setDevice(device.id) + arr = self._array.copy() + finally: + runtime.setDevice(prev_device) + if stream is not None: + prev_stream.use() + return Array._new(arr) + + @property + def dtype(self) -> Dtype: + """ + Array API compatible wrapper for :py:meth:`np.ndarray.dtype `. + + See its docstring for more information. + """ + return self._array.dtype + + @property + def device(self) -> Device: + return self._array.device + + # Note: mT is new in array API spec (see matrix_transpose) + @property + def mT(self) -> Array: + from .linalg import matrix_transpose + return matrix_transpose(self) + + @property + def ndim(self) -> int: + """ + Array API compatible wrapper for :py:meth:`np.ndarray.ndim `. + + See its docstring for more information. + """ + return self._array.ndim + + @property + def shape(self) -> Tuple[int, ...]: + """ + Array API compatible wrapper for :py:meth:`np.ndarray.shape `. + + See its docstring for more information. + """ + return self._array.shape + + @property + def size(self) -> int: + """ + Array API compatible wrapper for :py:meth:`np.ndarray.size `. + + See its docstring for more information. + """ + return self._array.size + + @property + def T(self) -> Array: + """ + Array API compatible wrapper for :py:meth:`np.ndarray.T `. + + See its docstring for more information. + """ + # Note: T only works on 2-dimensional arrays. See the corresponding + # note in the specification: + # https://data-apis.org/array-api/latest/API_specification/array_object.html#t + if self.ndim != 2: + raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.") + return self.__class__._new(self._array.T) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_constants.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..c3ada2726615f1c79d48bee5e1322b35dda7468f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_constants.py @@ -0,0 +1,6 @@ +import cupy as np + +e = np.e +inf = np.inf +nan = np.nan +pi = np.pi diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_creation_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_creation_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7369a92643c2fe533164baaa7f028ed43b82a4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_creation_functions.py @@ -0,0 +1,448 @@ +from __future__ import annotations + + +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +if TYPE_CHECKING: + from ._typing import ( + Array, + Device, + Dtype, + NestedSequence, + SupportsBufferProtocol, + ) + from collections.abc import Sequence +from ._dtypes import _all_dtypes + +import cupy as np +from cupy.cuda import Device as _Device +from cupy_backends.cuda.api import runtime + + +def _check_valid_dtype(dtype): + # Note: Only spelling dtypes as the dtype objects is supported. + + # We use this instead of "dtype in _all_dtypes" because the dtype objects + # define equality with the sorts of things we want to disallow. + for d in (None,) + _all_dtypes: + if dtype is d: + return + raise ValueError("dtype must be one of the supported dtypes") + + +def asarray( + obj: Union[ + Array, + bool, + int, + float, + NestedSequence[bool | int | float], + SupportsBufferProtocol, + ], + /, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + copy: Optional[bool] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.asarray `. + + See its docstring for more information. + """ + # _array_object imports in this file are inside the functions to avoid + # circular imports + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + if copy is False: + # Note: copy=False is not yet implemented in np.asarray + raise NotImplementedError("copy=False is not yet implemented") + if isinstance(obj, Array): + if dtype is not None and obj.dtype != dtype: + copy = True + if copy is True: + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + obj = Array._new(np.array(obj._array, copy=True, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + return obj + if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)): + # Give a better error message in this case. NumPy would convert this + # to an object array. TODO: This won't handle large integers in lists. + raise OverflowError("Integer out of bounds for array dtypes") + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + res = np.asarray(obj, dtype=dtype) + finally: + runtime.setDevice(prev_device) + return Array._new(res) + + +def arange( + start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arange `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def empty( + shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.empty `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.empty(shape, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def empty_like( + x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.empty_like `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.empty_like(x._array, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def eye( + n_rows: int, + n_cols: Optional[int] = None, + /, + *, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.eye `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def from_dlpack(x: object, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.from_dlpack `. + + See its docstring for more information. + """ + from ._array_object import Array + return Array._new(np.from_dlpack(x)) + + +def full( + shape: Union[int, Tuple[int, ...]], + fill_value: Union[int, float], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.full `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + if isinstance(fill_value, Array) and fill_value.ndim == 0: + fill_value = fill_value._array + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + res = np.full(shape, fill_value, dtype=dtype) + finally: + runtime.setDevice(prev_device) + if res.dtype not in _all_dtypes: + # This will happen if the fill value is not something that NumPy + # coerces to one of the acceptable dtypes. + raise TypeError("Invalid input to full") + return Array._new(res) + + +def full_like( + x: Array, + /, + fill_value: Union[int, float], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.full_like `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + if isinstance(fill_value, Array) and fill_value.ndim == 0: + fill_value = fill_value._array + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + res = np.full_like(x._array, fill_value, dtype=dtype) + finally: + runtime.setDevice(prev_device) + if res.dtype not in _all_dtypes: + # This will happen if the fill value is not something that NumPy + # coerces to one of the acceptable dtypes. + raise TypeError("Invalid input to full_like") + return Array._new(res) + + +def linspace( + start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linspace `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is None: + device = _Device() # current device + elif not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint)) + finally: + runtime.setDevice(prev_device) + + +def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]: + """ + Array API compatible wrapper for :py:func:`np.meshgrid `. + + See its docstring for more information. + """ + from ._array_object import Array + + # Note: unlike np.meshgrid, only inputs with all the same dtype are + # allowed + + if len({a.dtype for a in arrays}) > 1: + raise ValueError("meshgrid inputs must all have the same dtype") + + return [ + Array._new(array) + for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing) + ] + + +def ones( + shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.ones `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.ones(shape, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def ones_like( + x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.ones_like `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.ones_like(x._array, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def tril(x: Array, /, *, k: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.tril `. + + See its docstring for more information. + """ + from ._array_object import Array + + if x.ndim < 2: + # Note: Unlike np.tril, x must be at least 2-D + raise ValueError("x must be at least 2-dimensional for tril") + return Array._new(np.tril(x._array, k=k)) + + +def triu(x: Array, /, *, k: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.triu `. + + See its docstring for more information. + """ + from ._array_object import Array + + if x.ndim < 2: + # Note: Unlike np.triu, x must be at least 2-D + raise ValueError("x must be at least 2-dimensional for triu") + return Array._new(np.triu(x._array, k=k)) + + +def zeros( + shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.zeros `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.zeros(shape, dtype=dtype)) + finally: + runtime.setDevice(prev_device) + + +def zeros_like( + x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.zeros_like `. + + See its docstring for more information. + """ + from ._array_object import Array + + _check_valid_dtype(dtype) + if device is not None and not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + if device is None: + device = _Device() # current device + prev_device = runtime.getDevice() + try: + runtime.setDevice(device.id) + return Array._new(np.zeros_like(x._array, dtype=dtype)) + finally: + runtime.setDevice(prev_device) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_data_type_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_data_type_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..954e84a5c44ef36fc587a14359f20c4ff73d5ee1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_data_type_functions.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from ._array_object import Array +from ._dtypes import _all_dtypes, _result_type + +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Tuple, Union + +if TYPE_CHECKING: + from ._typing import Dtype + from collections.abc import Sequence + +import cupy as np + + +# Note: astype is a function, not an array method as in NumPy. +def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array: + if not copy and dtype == x.dtype: + return x + return Array._new(x._array.astype(dtype=dtype, copy=copy)) + + +def broadcast_arrays(*arrays: Array) -> List[Array]: + """ + Array API compatible wrapper for :py:func:`np.broadcast_arrays `. + + See its docstring for more information. + """ + from ._array_object import Array + + return [ + Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays]) + ] + + +def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array: + """ + Array API compatible wrapper for :py:func:`np.broadcast_to `. + + See its docstring for more information. + """ + from ._array_object import Array + + return Array._new(np.broadcast_to(x._array, shape)) + + +def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool: + """ + Array API compatible wrapper for :py:func:`np.can_cast `. + + See its docstring for more information. + """ + if isinstance(from_, Array): + from_ = from_.dtype + elif from_ not in _all_dtypes: + raise TypeError(f"{from_=}, but should be an array_api array or dtype") + if to not in _all_dtypes: + raise TypeError(f"{to=}, but should be a dtype") + # Note: We avoid np.can_cast() as it has discrepancies with the array API, + # since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8). + # See https://github.com/numpy/numpy/issues/20870 + try: + # We promote `from_` and `to` together. We then check if the promoted + # dtype is `to`, which indicates if `from_` can (up)cast to `to`. + dtype = _result_type(from_, to) + return to == dtype + except TypeError: + # _result_type() raises if the dtypes don't promote together + return False + + +# These are internal objects for the return types of finfo and iinfo, since +# the NumPy versions contain extra data that isn't part of the spec. +@dataclass +class finfo_object: + bits: int + # Note: The types of the float data here are float, whereas in NumPy they + # are scalars of the corresponding float dtype. + eps: float + max: float + min: float + smallest_normal: float + + +@dataclass +class iinfo_object: + bits: int + max: int + min: int + + +def finfo(type: Union[Dtype, Array], /) -> finfo_object: + """ + Array API compatible wrapper for :py:func:`np.finfo `. + + See its docstring for more information. + """ + fi = np.finfo(type) # type: ignore + # Note: The types of the float data here are float, whereas in NumPy they + # are scalars of the corresponding float dtype. + try: + tiny = fi.smallest_normal + except AttributeError: # for backward compatibility + tiny = fi.tiny + return finfo_object( + fi.bits, + float(fi.eps), + float(fi.max), + float(fi.min), + float(tiny), + ) + + +def iinfo(type: Union[Dtype, Array], /) -> iinfo_object: + """ + Array API compatible wrapper for :py:func:`np.iinfo `. + + See its docstring for more information. + """ + ii = np.iinfo(type) # type: ignore + return iinfo_object(ii.bits, ii.max, ii.min) + + +def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype: + """ + Array API compatible wrapper for :py:func:`np.result_type `. + + See its docstring for more information. + """ + # Note: we use a custom implementation that gives only the type promotions + # required by the spec rather than using np.result_type. NumPy implements + # too many extra type promotions like int64 + uint64 -> float64, and does + # value-based casting on scalar arrays. + A = [] + for a in arrays_and_dtypes: + if isinstance(a, Array): + a = a.dtype + elif isinstance(a, np.ndarray) or a not in _all_dtypes: + raise TypeError("result_type() inputs must be array_api arrays or dtypes") + A.append(a) + + if len(A) == 0: + raise ValueError("at least one array or dtype is required") + elif len(A) == 1: + return A[0] + else: + t = A[0] + for t2 in A[1:]: + t = _result_type(t, t2) + return t diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_dtypes.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..8853510662a98746c8b392add5b11e9d5131f106 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_dtypes.py @@ -0,0 +1,143 @@ +import cupy as np + +# Note: we use dtype objects instead of dtype classes. The spec does not +# require any behavior on dtypes other than equality. +int8 = np.dtype("int8") +int16 = np.dtype("int16") +int32 = np.dtype("int32") +int64 = np.dtype("int64") +uint8 = np.dtype("uint8") +uint16 = np.dtype("uint16") +uint32 = np.dtype("uint32") +uint64 = np.dtype("uint64") +float32 = np.dtype("float32") +float64 = np.dtype("float64") +# Note: This name is changed +bool = np.dtype("bool") + +_all_dtypes = ( + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + bool, +) +_boolean_dtypes = (bool,) +_floating_dtypes = (float32, float64) +_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64) +_integer_or_boolean_dtypes = ( + bool, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, +) +_numeric_dtypes = ( + float32, + float64, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, +) + +_dtype_categories = { + "all": _all_dtypes, + "numeric": _numeric_dtypes, + "integer": _integer_dtypes, + "integer or boolean": _integer_or_boolean_dtypes, + "boolean": _boolean_dtypes, + "floating-point": _floating_dtypes, +} + + +# Note: the spec defines a restricted type promotion table compared to NumPy. +# In particular, cross-kind promotions like integer + float or boolean + +# integer are not allowed, even for functions that accept both kinds. +# Additionally, NumPy promotes signed integer + uint64 to float64, but this +# promotion is not allowed here. To be clear, Python scalar int objects are +# allowed to promote to floating-point dtypes, but only in array operators +# (see Array._promote_scalar) method in _array_object.py. +_promotion_table = { + (int8, int8): int8, + (int8, int16): int16, + (int8, int32): int32, + (int8, int64): int64, + (int16, int8): int16, + (int16, int16): int16, + (int16, int32): int32, + (int16, int64): int64, + (int32, int8): int32, + (int32, int16): int32, + (int32, int32): int32, + (int32, int64): int64, + (int64, int8): int64, + (int64, int16): int64, + (int64, int32): int64, + (int64, int64): int64, + (uint8, uint8): uint8, + (uint8, uint16): uint16, + (uint8, uint32): uint32, + (uint8, uint64): uint64, + (uint16, uint8): uint16, + (uint16, uint16): uint16, + (uint16, uint32): uint32, + (uint16, uint64): uint64, + (uint32, uint8): uint32, + (uint32, uint16): uint32, + (uint32, uint32): uint32, + (uint32, uint64): uint64, + (uint64, uint8): uint64, + (uint64, uint16): uint64, + (uint64, uint32): uint64, + (uint64, uint64): uint64, + (int8, uint8): int16, + (int8, uint16): int32, + (int8, uint32): int64, + (int16, uint8): int16, + (int16, uint16): int32, + (int16, uint32): int64, + (int32, uint8): int32, + (int32, uint16): int32, + (int32, uint32): int64, + (int64, uint8): int64, + (int64, uint16): int64, + (int64, uint32): int64, + (uint8, int8): int16, + (uint16, int8): int32, + (uint32, int8): int64, + (uint8, int16): int16, + (uint16, int16): int32, + (uint32, int16): int64, + (uint8, int32): int32, + (uint16, int32): int32, + (uint32, int32): int64, + (uint8, int64): int64, + (uint16, int64): int64, + (uint32, int64): int64, + (float32, float32): float32, + (float32, float64): float64, + (float64, float32): float64, + (float64, float64): float64, + (bool, bool): bool, +} + + +def _result_type(type1, type2): + if (type1, type2) in _promotion_table: + return _promotion_table[type1, type2] + raise TypeError(f"{type1} and {type2} cannot be type promoted together") diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_elementwise_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_elementwise_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..063fa3354d3c3f06caf4dabbf32764286230b42d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_elementwise_functions.py @@ -0,0 +1,729 @@ +from __future__ import annotations + +from ._dtypes import ( + _boolean_dtypes, + _floating_dtypes, + _integer_dtypes, + _integer_or_boolean_dtypes, + _numeric_dtypes, + _result_type, +) +from ._array_object import Array + +import cupy as np + + +def abs(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.abs `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in abs") + return Array._new(np.abs(x._array)) + + +# Note: the function name is different here +def acos(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arccos `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in acos") + return Array._new(np.arccos(x._array)) + + +# Note: the function name is different here +def acosh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arccosh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in acosh") + return Array._new(np.arccosh(x._array)) + + +def add(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.add `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in add") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.add(x1._array, x2._array)) + + +# Note: the function name is different here +def asin(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arcsin `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in asin") + return Array._new(np.arcsin(x._array)) + + +# Note: the function name is different here +def asinh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arcsinh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in asinh") + return Array._new(np.arcsinh(x._array)) + + +# Note: the function name is different here +def atan(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arctan `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in atan") + return Array._new(np.arctan(x._array)) + + +# Note: the function name is different here +def atan2(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arctan2 `. + + See its docstring for more information. + """ + if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in atan2") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.arctan2(x1._array, x2._array)) + + +# Note: the function name is different here +def atanh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.arctanh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in atanh") + return Array._new(np.arctanh(x._array)) + + +def bitwise_and(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.bitwise_and `. + + See its docstring for more information. + """ + if ( + x1.dtype not in _integer_or_boolean_dtypes + or x2.dtype not in _integer_or_boolean_dtypes + ): + raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.bitwise_and(x1._array, x2._array)) + + +# Note: the function name is different here +def bitwise_left_shift(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.left_shift `. + + See its docstring for more information. + """ + if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes: + raise TypeError("Only integer dtypes are allowed in bitwise_left_shift") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + # Note: bitwise_left_shift is only defined for x2 nonnegative. + if np.any(x2._array < 0): + raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0") + return Array._new(np.left_shift(x1._array, x2._array)) + + +# Note: the function name is different here +def bitwise_invert(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.invert `. + + See its docstring for more information. + """ + if x.dtype not in _integer_or_boolean_dtypes: + raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert") + return Array._new(np.invert(x._array)) + + +def bitwise_or(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.bitwise_or `. + + See its docstring for more information. + """ + if ( + x1.dtype not in _integer_or_boolean_dtypes + or x2.dtype not in _integer_or_boolean_dtypes + ): + raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.bitwise_or(x1._array, x2._array)) + + +# Note: the function name is different here +def bitwise_right_shift(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.right_shift `. + + See its docstring for more information. + """ + if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes: + raise TypeError("Only integer dtypes are allowed in bitwise_right_shift") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + # Note: bitwise_right_shift is only defined for x2 nonnegative. + if np.any(x2._array < 0): + raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0") + return Array._new(np.right_shift(x1._array, x2._array)) + + +def bitwise_xor(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.bitwise_xor `. + + See its docstring for more information. + """ + if ( + x1.dtype not in _integer_or_boolean_dtypes + or x2.dtype not in _integer_or_boolean_dtypes + ): + raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.bitwise_xor(x1._array, x2._array)) + + +def ceil(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.ceil `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in ceil") + if x.dtype in _integer_dtypes: + # Note: The return dtype of ceil is the same as the input + return x + return Array._new(np.ceil(x._array)) + + +def cos(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.cos `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in cos") + return Array._new(np.cos(x._array)) + + +def cosh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.cosh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in cosh") + return Array._new(np.cosh(x._array)) + + +def divide(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.divide `. + + See its docstring for more information. + """ + if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in divide") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.divide(x1._array, x2._array)) + + +def equal(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.equal `. + + See its docstring for more information. + """ + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.equal(x1._array, x2._array)) + + +def exp(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.exp `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in exp") + return Array._new(np.exp(x._array)) + + +def expm1(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.expm1 `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in expm1") + return Array._new(np.expm1(x._array)) + + +def floor(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.floor `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in floor") + if x.dtype in _integer_dtypes: + # Note: The return dtype of floor is the same as the input + return x + return Array._new(np.floor(x._array)) + + +def floor_divide(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.floor_divide `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in floor_divide") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.floor_divide(x1._array, x2._array)) + + +def greater(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.greater `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in greater") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.greater(x1._array, x2._array)) + + +def greater_equal(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.greater_equal `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in greater_equal") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.greater_equal(x1._array, x2._array)) + + +def isfinite(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.isfinite `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in isfinite") + return Array._new(np.isfinite(x._array)) + + +def isinf(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.isinf `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in isinf") + return Array._new(np.isinf(x._array)) + + +def isnan(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.isnan `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in isnan") + return Array._new(np.isnan(x._array)) + + +def less(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.less `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in less") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.less(x1._array, x2._array)) + + +def less_equal(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.less_equal `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in less_equal") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.less_equal(x1._array, x2._array)) + + +def log(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.log `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in log") + return Array._new(np.log(x._array)) + + +def log1p(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.log1p `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in log1p") + return Array._new(np.log1p(x._array)) + + +def log2(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.log2 `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in log2") + return Array._new(np.log2(x._array)) + + +def log10(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.log10 `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in log10") + return Array._new(np.log10(x._array)) + + +def logaddexp(x1: Array, x2: Array) -> Array: + """ + Array API compatible wrapper for :py:func:`np.logaddexp `. + + See its docstring for more information. + """ + if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in logaddexp") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.logaddexp(x1._array, x2._array)) + + +def logical_and(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.logical_and `. + + See its docstring for more information. + """ + if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes: + raise TypeError("Only boolean dtypes are allowed in logical_and") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.logical_and(x1._array, x2._array)) + + +def logical_not(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.logical_not `. + + See its docstring for more information. + """ + if x.dtype not in _boolean_dtypes: + raise TypeError("Only boolean dtypes are allowed in logical_not") + return Array._new(np.logical_not(x._array)) + + +def logical_or(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.logical_or `. + + See its docstring for more information. + """ + if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes: + raise TypeError("Only boolean dtypes are allowed in logical_or") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.logical_or(x1._array, x2._array)) + + +def logical_xor(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.logical_xor `. + + See its docstring for more information. + """ + if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes: + raise TypeError("Only boolean dtypes are allowed in logical_xor") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.logical_xor(x1._array, x2._array)) + + +def multiply(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.multiply `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in multiply") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.multiply(x1._array, x2._array)) + + +def negative(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.negative `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in negative") + return Array._new(np.negative(x._array)) + + +def not_equal(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.not_equal `. + + See its docstring for more information. + """ + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.not_equal(x1._array, x2._array)) + + +def positive(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.positive `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in positive") + return Array._new(np.positive(x._array)) + + +# Note: the function name is different here +def pow(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.power `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in pow") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.power(x1._array, x2._array)) + + +def remainder(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.remainder `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in remainder") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.remainder(x1._array, x2._array)) + + +def round(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.round `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in round") + return Array._new(np.round(x._array)) + + +def sign(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.sign `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in sign") + return Array._new(np.sign(x._array)) + + +def sin(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.sin `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in sin") + return Array._new(np.sin(x._array)) + + +def sinh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.sinh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in sinh") + return Array._new(np.sinh(x._array)) + + +def square(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.square `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in square") + return Array._new(np.square(x._array)) + + +def sqrt(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.sqrt `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in sqrt") + return Array._new(np.sqrt(x._array)) + + +def subtract(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.subtract `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in subtract") + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.subtract(x1._array, x2._array)) + + +def tan(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.tan `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in tan") + return Array._new(np.tan(x._array)) + + +def tanh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.tanh `. + + See its docstring for more information. + """ + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in tanh") + return Array._new(np.tanh(x._array)) + + +def trunc(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.trunc `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in trunc") + if x.dtype in _integer_dtypes: + # Note: The return dtype of trunc is the same as the input + return x + return Array._new(np.trunc(x._array)) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_indexing_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_indexing_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..be38fcc28c97eb5d5c8240b7f7b819d2856a6a9c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_indexing_functions.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from ._array_object import Array +from ._dtypes import _integer_dtypes + +import cupy as np + +def take(x: Array, indices: Array, /, *, axis: int) -> Array: + """ + Array API compatible wrapper for :py:func:`np.take `. + See its docstring for more information. + """ + if indices.dtype not in _integer_dtypes: + raise TypeError("Only integer dtypes are allowed in indexing") + if indices.ndim != 1: + raise ValueError("Only 1-dim indices array is supported") + return Array._new(np.take(x._array, indices._array, axis=axis)) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_manipulation_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_manipulation_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..bbe38505b3b86b43956a982c393e81c3b3a52c3c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_manipulation_functions.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from ._array_object import Array +from ._data_type_functions import result_type + +from typing import List, Optional, Tuple, Union + +import cupy as np + +# Note: the function name is different here +def concat( + arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0 +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.concatenate `. + + See its docstring for more information. + """ + # Note: Casting rules here are different from the np.concatenate default + # (no for scalars with axis=None, no cross-kind casting) + dtype = result_type(*arrays) + arrays = tuple(a._array for a in arrays) + return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype)) + + +def expand_dims(x: Array, /, *, axis: int) -> Array: + """ + Array API compatible wrapper for :py:func:`np.expand_dims `. + + See its docstring for more information. + """ + return Array._new(np.expand_dims(x._array, axis)) + + +def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array: + """ + Array API compatible wrapper for :py:func:`np.flip `. + + See its docstring for more information. + """ + return Array._new(np.flip(x._array, axis=axis)) + + +# Note: The function name is different here (see also matrix_transpose). +# Unlike transpose(), the axes argument is required. +def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array: + """ + Array API compatible wrapper for :py:func:`np.transpose `. + + See its docstring for more information. + """ + return Array._new(np.transpose(x._array, axes)) + + +# Note: the optional argument is called 'shape', not 'newshape' +def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array: + """ + Array API compatible wrapper for :py:func:`np.reshape `. + + See its docstring for more information. + """ + return Array._new(np.reshape(x._array, shape)) + + +def roll( + x: Array, + /, + shift: Union[int, Tuple[int, ...]], + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.roll `. + + See its docstring for more information. + """ + return Array._new(np.roll(x._array, shift, axis=axis)) + + +def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array: + """ + Array API compatible wrapper for :py:func:`np.squeeze `. + + See its docstring for more information. + """ + return Array._new(np.squeeze(x._array, axis=axis)) + + +def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.stack `. + + See its docstring for more information. + """ + # Call result type here just to raise on disallowed type combinations + result_type(*arrays) + arrays = tuple(a._array for a in arrays) + return Array._new(np.stack(arrays, axis=axis)) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_searching_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_searching_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..6657ae89ed7b277acc3c908454b246a766d5ff2e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_searching_functions.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +from ._array_object import Array +from ._dtypes import _result_type + +from typing import Optional, Tuple + +import cupy as np + + +def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array: + """ + Array API compatible wrapper for :py:func:`np.argmax `. + + See its docstring for more information. + """ + return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims))) + + +def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array: + """ + Array API compatible wrapper for :py:func:`np.argmin `. + + See its docstring for more information. + """ + return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims))) + + +def nonzero(x: Array, /) -> Tuple[Array, ...]: + """ + Array API compatible wrapper for :py:func:`np.nonzero `. + + See its docstring for more information. + """ + return tuple(Array._new(i) for i in np.nonzero(x._array)) + + +def where(condition: Array, x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.where `. + + See its docstring for more information. + """ + # Call result type here just to raise on disallowed type combinations + _result_type(x1.dtype, x2.dtype) + x1, x2 = Array._normalize_two_args(x1, x2) + return Array._new(np.where(condition._array, x1._array, x2._array)) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_set_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_set_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..faf122c00bd7849ef2e08d50d82cbb23f87ca637 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_set_functions.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +from ._array_object import Array + +from typing import NamedTuple + +import cupy as np + +# Note: np.unique() is split into four functions in the array API: +# unique_all, unique_counts, unique_inverse, and unique_values (this is done +# to remove polymorphic return types). + +# Note: The various unique() functions are supposed to return multiple NaNs. +# This does not match the NumPy behavior, however, this is currently left as a +# TODO in this implementation as this behavior may be reverted in np.unique(). +# See https://github.com/numpy/numpy/issues/20326. + +# Note: The functions here return a namedtuple (np.unique() returns a normal +# tuple). + +class UniqueAllResult(NamedTuple): + values: Array + indices: Array + inverse_indices: Array + counts: Array + + +class UniqueCountsResult(NamedTuple): + values: Array + counts: Array + + +class UniqueInverseResult(NamedTuple): + values: Array + inverse_indices: Array + + +def unique_all(x: Array, /) -> UniqueAllResult: + """ + Array API compatible wrapper for :py:func:`np.unique `. + + See its docstring for more information. + """ + values, indices, inverse_indices, counts = np.unique( + x._array, + return_counts=True, + return_index=True, + return_inverse=True, + equal_nan=False, + ) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueAllResult( + Array._new(values), + Array._new(indices), + Array._new(inverse_indices), + Array._new(counts), + ) + + +def unique_counts(x: Array, /) -> UniqueCountsResult: + res = np.unique( + x._array, + return_counts=True, + return_index=False, + return_inverse=False, + equal_nan=False, + ) + + return UniqueCountsResult(*[Array._new(i) for i in res]) + + +def unique_inverse(x: Array, /) -> UniqueInverseResult: + """ + Array API compatible wrapper for :py:func:`np.unique `. + + See its docstring for more information. + """ + values, inverse_indices = np.unique( + x._array, + return_counts=False, + return_index=False, + return_inverse=True, + equal_nan=False, + ) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueInverseResult(Array._new(values), Array._new(inverse_indices)) + + +def unique_values(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.unique `. + + See its docstring for more information. + """ + res = np.unique( + x._array, + return_counts=False, + return_index=False, + return_inverse=False, + equal_nan=False, + ) + return Array._new(res) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_sorting_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_sorting_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ce62a2addac607e281989dc1aa0c7def26cb89b6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_sorting_functions.py @@ -0,0 +1,53 @@ +# mypy: ignore-errors + +from __future__ import annotations + +from ._array_object import Array + +import cupy as np + + +# Note: the descending keyword argument is new in this function +def argsort( + x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.argsort `. + + See its docstring for more information. + """ + # Note: Unlike in NumPy we only support kind={None, 'stable'}, but the standard + # does *not* require we need to support unstable sort. + kind = None + if not descending: + res = np.argsort(x._array, axis=axis, kind=kind) + else: + # As NumPy has no native descending sort, we imitate it here. Note that + # simply flipping the results of np.argsort(x._array, ...) would not + # respect the relative order like it would in native descending sorts. + res = np.flip( + np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind), + axis=axis, + ) + # Rely on flip()/argsort() to validate axis + normalised_axis = axis if axis >= 0 else x.ndim + axis + max_i = x.shape[normalised_axis] - 1 + res = max_i - res + return Array._new(res) + +# Note: the descending keyword argument is new in this function +def sort( + x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.sort `. + + See its docstring for more information. + """ + # Note: Unlike in NumPy we only support kind={None, 'stable'}, but the standard + # does *not* require we need to support unstable sort. + kind = None + res = np.sort(x._array, axis=axis, kind=kind) + if descending: + res = np.flip(res, axis=axis) + return Array._new(res) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_statistical_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_statistical_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..6f9c4b779e1600f4b2084e2a774c3d5d3fe5f24c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_statistical_functions.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +from ._dtypes import ( + _floating_dtypes, + _numeric_dtypes, +) +from ._array_object import Array +from ._creation_functions import asarray +from ._dtypes import float32, float64 + +from typing import TYPE_CHECKING, Optional, Tuple, Union + +if TYPE_CHECKING: + from ._typing import Dtype + +import cupy as np + + +def max( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, +) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in max") + return Array._new(np.max(x._array, axis=axis, keepdims=keepdims)) + + +def mean( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, +) -> Array: + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in mean") + return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims)) + + +def min( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, +) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in min") + return Array._new(np.min(x._array, axis=axis, keepdims=keepdims)) + + +def prod( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, +) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in prod") + # Note: sum() and prod() always upcast float32 to float64 for dtype=None + # We need to do so here before computing the product to avoid overflow + if dtype is None and x.dtype == float32: + dtype = float64 + return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims)) + + +def std( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, +) -> Array: + # Note: the keyword argument correction is different here + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in std") + return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims)) + + +def sum( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, +) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in sum") + # Note: sum() and prod() always upcast integers to (u)int64 and float32 to + # float64 for dtype=None. `np.sum` does that too for integers, but not for + # float32, so we need to special-case it here + if dtype is None and x.dtype == float32: + dtype = float64 + return Array._new(np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims)) + + +def var( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, +) -> Array: + # Note: the keyword argument correction is different here + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in var") + return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims)) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_typing.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..e5289b7a0f10a6f534df8d72bfbcb9388169f432 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_typing.py @@ -0,0 +1,75 @@ +""" +This file defines the types for type annotations. + +These names aren't part of the module namespace, but they are used in the +annotations in the function signatures. The functions in the module are only +valid for inputs that match the given type annotations. +""" + +from __future__ import annotations +from cupy.cuda import Device + + +__all__ = [ + "Array", + "Device", + "Dtype", + "SupportsDLPack", + "SupportsBufferProtocol", + "PyCapsule", +] + +import sys +from typing import ( + Any, + Literal, + Sequence, + Type, + Union, + TYPE_CHECKING, + TypeVar, + Protocol, +) + +from ._array_object import Array +from numpy import ( + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +_T_co = TypeVar("_T_co", covariant=True) + +class NestedSequence(Protocol[_T_co]): + def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... + def __len__(self, /) -> int: ... + +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype + +SupportsBufferProtocol = Any +PyCapsule = Any + +class SupportsDLPack(Protocol): + def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ... diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/_utility_functions.py b/vllm/lib/python3.10/site-packages/cupy/array_api/_utility_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..cdcbbc83e70368c157c1a3eb1ff6acefd0b15d8e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/_utility_functions.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from ._array_object import Array + +from typing import Optional, Tuple, Union + +import cupy as np + + +def all( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.all `. + + See its docstring for more information. + """ + return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims))) + + +def any( + x: Array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, +) -> Array: + """ + Array API compatible wrapper for :py:func:`np.any `. + + See its docstring for more information. + """ + return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims))) diff --git a/vllm/lib/python3.10/site-packages/cupy/array_api/linalg.py b/vllm/lib/python3.10/site-packages/cupy/array_api/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..9eaacd1baa2ea6d3c37504ce6aca9e5fa3face54 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/array_api/linalg.py @@ -0,0 +1,445 @@ +from __future__ import annotations + +import functools + +from ._dtypes import _floating_dtypes, _numeric_dtypes +from ._manipulation_functions import reshape +from ._array_object import Array + +from .._core.internal import _normalize_axis_indices as normalize_axis_tuple + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal, Optional, Sequence, Tuple, Union + +from typing import NamedTuple + +import cupy as np + +class EighResult(NamedTuple): + eigenvalues: Array + eigenvectors: Array + +class QRResult(NamedTuple): + Q: Array + R: Array + +class SlogdetResult(NamedTuple): + sign: Array + logabsdet: Array + +class SVDResult(NamedTuple): + U: Array + S: Array + Vh: Array + +# Note: the inclusion of the upper keyword is different from +# np.linalg.cholesky, which does not have it. +def cholesky(x: Array, /, *, upper: bool = False) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.cholesky `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.cholesky. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in cholesky') + L = np.linalg.cholesky(x._array) + if upper: + return Array._new(L).mT + return Array._new(L) + +# Note: cross is the numpy top-level namespace, not np.linalg +def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: + """ + Array API compatible wrapper for :py:func:`np.cross `. + + See its docstring for more information. + """ + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in cross') + # Note: this is different from np.cross(), which broadcasts + if x1.shape != x2.shape: + raise ValueError('x1 and x2 must have the same shape') + if x1.ndim == 0: + raise ValueError('cross() requires arrays of dimension at least 1') + # Note: this is different from np.cross(), which allows dimension 2 + if x1.shape[axis] != 3: + raise ValueError('cross() dimension must equal 3') + return Array._new(np.cross(x1._array, x2._array, axis=axis)) + +def det(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.det `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.det. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in det') + return Array._new(np.linalg.det(x._array)) + +# Note: diagonal is the numpy top-level namespace, not np.linalg +def diagonal(x: Array, /, *, offset: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.diagonal `. + + See its docstring for more information. + """ + # Note: diagonal always operates on the last two axes, whereas np.diagonal + # operates on the first two axes by default + return Array._new(np.diagonal(x._array, offset=offset, axis1=-2, axis2=-1)) + + +def eigh(x: Array, /) -> EighResult: + """ + Array API compatible wrapper for :py:func:`np.linalg.eigh `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.eigh. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in eigh') + + # Note: the return type here is a namedtuple, which is different from + # np.eigh, which only returns a tuple. + return EighResult(*map(Array._new, np.linalg.eigh(x._array))) + + +def eigvalsh(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.eigvalsh `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.eigvalsh. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in eigvalsh') + + return Array._new(np.linalg.eigvalsh(x._array)) + +def inv(x: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.inv `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.inv. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in inv') + + return Array._new(np.linalg.inv(x._array)) + + +# Note: matmul is the numpy top-level namespace but not in np.linalg +def matmul(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.matmul `. + + See its docstring for more information. + """ + # Note: the restriction to numeric dtypes only is different from + # np.matmul. + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in matmul') + + return Array._new(np.matmul(x1._array, x2._array)) + + +# Note: the name here is different from norm(). The array API norm is split +# into matrix_norm and vector_norm(). + +# The type for ord should be Optional[Union[int, float, Literal[np.inf, +# -np.inf, 'fro', 'nuc']]], but Literal does not support floating-point +# literals. +def matrix_norm(x: Array, /, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.norm `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.norm. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in matrix_norm') + + return Array._new(np.linalg.norm(x._array, axis=(-2, -1), keepdims=keepdims, ord=ord)) + + +def matrix_power(x: Array, n: int, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.matrix_power `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.matrix_power. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed for the first argument of matrix_power') + + # np.matrix_power already checks if n is an integer + return Array._new(np.linalg.matrix_power(x._array, n)) + +# Note: the keyword argument name rtol is different from np.linalg.matrix_rank +def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array: + """ + Array API compatible wrapper for :py:func:`np.matrix_rank `. + + See its docstring for more information. + """ + # Note: this is different from np.linalg.matrix_rank, which supports 1 + # dimensional arrays. + if x.ndim < 2: + raise np.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") + S = np.linalg.svd(x._array, compute_uv=False) + if rtol is None: + tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(S.dtype).eps + else: + if isinstance(rtol, Array): + rtol = rtol._array + # Note: this is different from np.linalg.matrix_rank, which does not multiply + # the tolerance by the largest singular value. + tol = S.max(axis=-1, keepdims=True)*np.asarray(rtol)[..., np.newaxis] + return Array._new(np.count_nonzero(S > tol, axis=-1)) + + +# Note: this function is new in the array API spec. Unlike transpose, it only +# transposes the last two axes. +def matrix_transpose(x: Array, /) -> Array: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return Array._new(np.swapaxes(x._array, -1, -2)) + +# Note: outer is the numpy top-level namespace, not np.linalg +def outer(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.outer `. + + See its docstring for more information. + """ + # Note: the restriction to numeric dtypes only is different from + # np.outer. + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in outer') + + # Note: the restriction to only 1-dim arrays is different from np.outer + if x1.ndim != 1 or x2.ndim != 1: + raise ValueError('The input arrays to outer must be 1-dimensional') + + return Array._new(np.outer(x1._array, x2._array)) + +# Note: the keyword argument name rtol is different from np.linalg.pinv +def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.pinv `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.pinv. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in pinv') + + # Note: this is different from np.linalg.pinv, which does not multiply the + # default tolerance by max(M, N). + if rtol is None: + rtol = max(x.shape[-2:]) * np.finfo(x.dtype).eps # type: ignore + return Array._new(np.linalg.pinv(x._array, rcond=rtol)) + +def qr(x: Array, /, *, mode: Literal['reduced', 'complete'] = 'reduced') -> QRResult: + """ + Array API compatible wrapper for :py:func:`np.linalg.qr `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.qr. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in qr') + + # Note: the return type here is a namedtuple, which is different from + # np.linalg.qr, which only returns a tuple. + return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode))) + +def slogdet(x: Array, /) -> SlogdetResult: + """ + Array API compatible wrapper for :py:func:`np.linalg.slogdet `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.slogdet. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in slogdet') + + # Note: the return type here is a namedtuple, which is different from + # np.linalg.slogdet, which only returns a tuple. + return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array))) + +# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a +# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack +# of matrices. The np.linalg.solve behavior of allowing stacks of both +# matrices and vectors is ambiguous c.f. +# https://github.com/numpy/numpy/issues/15349 and +# https://github.com/data-apis/array-api/issues/285. + +# Note: The impl below is deviated from numpy.array_api's. +def _solve(a, b): + from cupy.linalg._util import ( + _assert_stacked_2d, _assert_stacked_square, linalg_common_type) + + _commonType = functools.partial(linalg_common_type, reject_float16=False) + + _assert_stacked_2d(a) + _assert_stacked_square(a) + _, result_t = _commonType(a, b) + + if b.ndim == 1: + # (M,) -> (M, 1) + old_shape = b.shape + b = b.reshape(-1, 1) + elif b.ndim == a.ndim - 1: + # x1 has shape (M, M) or (..., M, M) + raise ValueError('x2 must have shape (M,) or (..., M, K); ' + '(..., M) is not allowed') + else: + # (..., M, K) => no change + old_shape = None + + r = np.linalg.solve(a, b).astype(result_t, copy=False) + r = r.reshape(old_shape) if old_shape else r + return r + +def solve(x1: Array, x2: Array, /) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.solve `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.solve. + if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in solve') + + return Array._new(_solve(x1._array, x2._array)) + +def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult: + """ + Array API compatible wrapper for :py:func:`np.linalg.svd `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.svd. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in svd') + + # Note: the return type here is a namedtuple, which is different from + # np.svd, which only returns a tuple. + return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices))) + +# Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to +# np.linalg.svd(compute_uv=False). +def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]: + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in svdvals') + return Array._new(np.linalg.svd(x._array, compute_uv=False)) + +# Note: tensordot is the numpy top-level namespace but not in np.linalg + +# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like. +def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array: + # Note: the restriction to numeric dtypes only is different from + # np.tensordot. + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in tensordot') + + return Array._new(np.tensordot(x1._array, x2._array, axes=axes)) + +# Note: trace is the numpy top-level namespace, not np.linalg +def trace(x: Array, /, *, offset: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.trace `. + + See its docstring for more information. + """ + if x.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in trace') + # Note: trace always operates on the last two axes, whereas np.trace + # operates on the first two axes by default + return Array._new(np.asarray(np.trace(x._array, offset=offset, axis1=-2, axis2=-1))) + +# Note: vecdot is not in NumPy +def vecdot(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError('Only numeric dtypes are allowed in vecdot') + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + x1_, x2_ = np.broadcast_arrays(x1._array, x2._array) + x1_ = np.moveaxis(x1_, axis, -1) + x2_ = np.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return Array._new(res[..., 0, 0]) + + +# Note: the name here is different from norm(). The array API norm is split +# into matrix_norm and vector_norm(). + +# The type for ord should be Optional[Union[int, float, Literal[np.inf, +# -np.inf]]] but Literal does not support floating-point literals. +def vector_norm(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> Array: + """ + Array API compatible wrapper for :py:func:`np.linalg.norm `. + + See its docstring for more information. + """ + # Note: the restriction to floating-point dtypes only is different from + # np.linalg.norm. + if x.dtype not in _floating_dtypes: + raise TypeError('Only floating-point dtypes are allowed in norm') + + # np.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or + # when axis=None and the input is 2-D, so to force a vector norm, we make + # it so the input is 1-D (for axis=None), or reshape so that norm is done + # on a single dimension. + a = x._array + if axis is None: + # Note: np.linalg.norm() doesn't handle 0-D arrays + a = a.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # np.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(a.ndim) if i not in normalized_axis) + newshape = axis + rest + a = np.transpose(a, newshape).reshape( + (np.prod([a.shape[i] for i in axis], dtype=int), *[a.shape[i] for i in rest])) + _axis = 0 + else: + _axis = axis + + res = Array._new(np.linalg.norm(a, axis=_axis, ord=ord)) + + if keepdims: + # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + shape = list(x.shape) + _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim) + for i in _axis: + shape[i] = 1 + res = reshape(res, tuple(shape)) + + return res + +__all__ = ['cholesky', 'cross', 'det', 'diagonal', 'eigh', 'eigvalsh', 'inv', 'matmul', 'matrix_norm', 'matrix_power', 'matrix_rank', 'matrix_transpose', 'outer', 'pinv', 'qr', 'slogdet', 'solve', 'svd', 'svdvals', 'tensordot', 'trace', 'vecdot', 'vector_norm'] diff --git a/vllm/lib/python3.10/site-packages/cupy/exceptions/__init__.py b/vllm/lib/python3.10/site-packages/cupy/exceptions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff18a2933c90e4739d4eedcb11fc7ecc15ce7852 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/exceptions/__init__.py @@ -0,0 +1,17 @@ +# mypy: ignore-errors +import numpy + +if numpy.__version__ < '2': + from numpy import AxisError # NOQA + from numpy import ComplexWarning # NOQA + from numpy import ModuleDeprecationWarning # NOQA + from numpy import TooHardError # NOQA + from numpy import VisibleDeprecationWarning # NOQA + from numpy import RankWarning # NOQA +else: + from numpy.exceptions import AxisError # NOQA + from numpy.exceptions import ComplexWarning # NOQA + from numpy.exceptions import ModuleDeprecationWarning # NOQA + from numpy.exceptions import TooHardError # NOQA + from numpy.exceptions import VisibleDeprecationWarning # NOQA + from numpy.exceptions import RankWarning # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupy/exceptions/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/exceptions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c48b47c3313581edb333cbdf53d6fd8ba8c84ad6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/exceptions/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/__init__.py b/vllm/lib/python3.10/site-packages/cupy/polynomial/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..271f72dfafa0b614251be326e2446fa83f4ab981 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/polynomial/__init__.py @@ -0,0 +1,6 @@ +# Functions from the following NumPy document +# https://numpy.org/doc/stable/reference/routines.polynomials.html + +# "NOQA" to suppress flake8 warning +from cupy.polynomial import polynomial # NOQA +from cupy.polynomial import polyutils # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ae64660b3d32669e28b65c200cdf8814b399277 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polynomial.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dfdf2b98f095bd59798efb0b0193b1953e2502f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polynomial.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polyutils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polyutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf17795c90ea18f3f97ce23b4731ec1d017eafce Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupy/polynomial/__pycache__/polyutils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/polynomial.py b/vllm/lib/python3.10/site-packages/cupy/polynomial/polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..a701bcf9a39f38439376ba3feceb7976b5f9ec04 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/polynomial/polynomial.py @@ -0,0 +1,181 @@ +import cupy + + +def polyvander(x, deg): + """Computes the Vandermonde matrix of given degree. + + Args: + x (cupy.ndarray): array of points + deg (int): degree of the resulting matrix. + + Returns: + cupy.ndarray: The Vandermonde matrix + + .. seealso:: :func:`numpy.polynomial.polynomial.polyvander` + + """ + deg = cupy.polynomial.polyutils._deprecate_as_int(deg, 'deg') + if deg < 0: + raise ValueError('degree must be non-negative') + if x.ndim == 0: + x = x.ravel() + dtype = cupy.float64 if x.dtype.kind in 'biu' else x.dtype + out = x ** cupy.arange(deg + 1, dtype=dtype).reshape((-1,) + (1,) * x.ndim) + return cupy.moveaxis(out, 0, -1) + + +def polycompanion(c): + """Computes the companion matrix of c. + + Args: + c (cupy.ndarray): 1-D array of polynomial coefficients + ordered from low to high degree. + + Returns: + cupy.ndarray: Companion matrix of dimensions (deg, deg). + + .. seealso:: :func:`numpy.polynomial.polynomial.polycompanion` + + """ + [c] = cupy.polynomial.polyutils.as_series([c]) + deg = c.size - 1 + if deg == 0: + raise ValueError('Series must have maximum degree of at least 1.') + matrix = cupy.eye(deg, k=-1, dtype=c.dtype) + matrix[:, -1] -= c[:-1] / c[-1] + return matrix + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length `n + 1`, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + numpy.polynomial.polynomial.polyval + + Notes + ----- + The evaluation uses Horner's method. + + """ + c = cupy.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = cupy.asarray(x) + if isinstance(x, cupy.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length `N`, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + numpy.polynomial.polynomial.polyvalfroomroots + """ + r = cupy.array(r, ndmin=1, copy=False) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(cupy.double) + if isinstance(x, (tuple, list)): + x = cupy.asarray(x) + if isinstance(x, cupy.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return cupy.prod(x - r, axis=0) diff --git a/vllm/lib/python3.10/site-packages/cupy/polynomial/polyutils.py b/vllm/lib/python3.10/site-packages/cupy/polynomial/polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..a61775882e8ea3487cab06db210be240b479f6e8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/polynomial/polyutils.py @@ -0,0 +1,110 @@ +import cupy + +import operator +import warnings + + +def _deprecate_as_int(x, desc): + try: + return operator.index(x) + except TypeError as e: + try: + ix = int(x) + except TypeError: + pass + else: + if ix == x: + warnings.warn( + 'In future, this will raise TypeError, as {} will ' + 'need to be an integer not just an integral float.' + .format(desc), + DeprecationWarning, + stacklevel=3 + ) + return ix + + raise TypeError('{} must be an integer'.format(desc)) from e + + +def trimseq(seq): + """Removes small polynomial series coefficients. + + Args: + seq (cupy.ndarray): input array. + + Returns: + cupy.ndarray: input array with trailing zeros removed. If the + resulting output is empty, it returns the first element. + + .. seealso:: :func:`numpy.polynomial.polyutils.trimseq` + + """ + if seq.size == 0: + return seq + ret = cupy.trim_zeros(seq, trim='b') + if ret.size > 0: + return ret + return seq[:1] + + +def as_series(alist, trim=True): + """Returns argument as a list of 1-d arrays. + + Args: + alist (cupy.ndarray or list of cupy.ndarray): 1-D or 2-D input array. + trim (bool, optional): trim trailing zeros. + + Returns: + list of cupy.ndarray: list of 1-D arrays. + + .. seealso:: :func:`numpy.polynomial.polyutils.as_series` + + """ + arrays = [] + for a in alist: + if a.size == 0: + raise ValueError('Coefficient array is empty') + if a.ndim > 1: + raise ValueError('Coefficient array is not 1-d') + if a.dtype.kind == 'b': + raise ValueError('Coefficient arrays have no common type') + a = a.ravel() + if trim: + a = trimseq(a) + arrays.append(a) + dtype = cupy.common_type(*arrays) + ret = [a.astype(dtype, copy=False) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """Removes small trailing coefficients from a polynomial. + + Args: + c(cupy.ndarray): 1d array of coefficients from lowest to highest order. + tol(number, optional): trailing coefficients whose absolute value are + less than or equal to ``tol`` are trimmed. + + Returns: + cupy.ndarray: trimmed 1d array. + + .. seealso:: :func:`numpy.polynomial.polyutils.trimcoef` + + """ + if tol < 0: + raise ValueError('tol must be non-negative') + if c.size == 0: + raise ValueError('Coefficient array is empty') + if c.ndim > 1: + raise ValueError('Coefficient array is not 1-d') + if c.dtype.kind == 'b': + raise ValueError('bool inputs are not allowed') + if c.ndim == 0: + c = c.ravel() + c = c.astype(cupy.common_type(c), copy=False) + filt = (cupy.abs(c) > tol)[::-1] + ind = c.size - cupy._manipulation.add_remove._first_nonzero_krnl( + filt, c.size).item() + if ind == 0: + return cupy.zeros_like(c[:1]) + return c[: ind]