diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/Openacc/cupti_openacc.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/Openacc/cupti_openacc.h new file mode 100644 index 0000000000000000000000000000000000000000..b7ea50da7beb2187e77f7606dd70faed0e4b4add --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/Openacc/cupti_openacc.h @@ -0,0 +1,98 @@ +/* + * Copyright 2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#include + +#if !defined(_CUPTI_OPENACC_H_) +#define _CUPTI_OPENACC_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize OpenACC support + * + * \param profRegister function of type acc_prof_reg as obtained from acc_register_library + * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library + * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library + */ +CUptiResult CUPTIAPI +cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_OPENACC_H_*/ + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..7849c6c6e099e85a4676e7c9c38c05b5a5b02d26 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.h @@ -0,0 +1,1192 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__ +#endif + +#if !defined(__DEVICE_DOUBLE_FUNCTIONS_H__) +#define __DEVICE_DOUBLE_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__ +#else +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +extern "C" +{ +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a double as a 64-bit signed integer. + * + * Reinterpret the bits in the double-precision floating-point value \p x + * as a signed 64-bit integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ long long int __double_as_longlong(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a 64-bit signed integer as a double. + * + * Reinterpret the bits in the 64-bit signed integer value \p x as + * a double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __longlong_as_double(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Adds two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Adds two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-up mode. + * + * Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-down mode. + * + * Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Subtracts two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-up mode. + * + * Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-down mode. + * + * Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Multiplies two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-up mode. + * + * Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-down mode. + * + * Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-up mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-down mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed int to a double. + * + * Convert the signed integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __int2double_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned int to a double. + * + * Convert the unsigned integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-towards-zero mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-up mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-down mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-up mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-down mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high 32 bits in a double as a signed integer. + * + * Reinterpret the high 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2hiint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret low 32 bits in a double as a signed integer. + * + * Reinterpret the low 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2loint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high and low 32-bit integer values as a double. + * + * Reinterpret the integer value of \p hi as the high 32 bits of a + * double-precision floating-point value and the integer value of \p lo + * as the low 32 bits of the same double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo); +} + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode); + +#undef EXCLUDE_FROM_RTC + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode = cudaRoundNearest); + +#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__ + + +#endif /* __cplusplus && __CUDACC__ */ + +#if !defined(__CUDACC_RTC__) +#include "device_double_functions.hpp" +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__DEVICE_DOUBLE_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f63063689d65c4a1dffb9a823ddaf6a5b353cba3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.hpp @@ -0,0 +1,197 @@ +/* + * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__ +#endif + +#if !defined(__DEVICE_DOUBLE_FUNCTIONS_HPP__) +#define __DEVICE_DOUBLE_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__ +#else +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __fma_rz(a, b, c) : + mode == cudaRoundPosInf ? __fma_ru(a, b, c) : + mode == cudaRoundMinInf ? __fma_rd(a, b, c) : + __fma_rn(a, b, c); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dmul_rz(a, b) : + mode == cudaRoundPosInf ? __dmul_ru(a, b) : + mode == cudaRoundMinInf ? __dmul_rd(a, b) : + __dmul_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dadd_rz(a, b) : + mode == cudaRoundPosInf ? __dadd_ru(a, b) : + mode == cudaRoundMinInf ? __dadd_rd(a, b) : + __dadd_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dsub_rz(a, b) : + mode == cudaRoundPosInf ? __dsub_ru(a, b) : + mode == cudaRoundMinInf ? __dsub_rd(a, b) : + __dsub_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2int_rn(a) : + mode == cudaRoundPosInf ? __double2int_ru(a) : + mode == cudaRoundMinInf ? __double2int_rd(a) : + __double2int_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2uint_rn(a) : + mode == cudaRoundPosInf ? __double2uint_ru(a) : + mode == cudaRoundMinInf ? __double2uint_rd(a) : + __double2uint_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2ll_rn(a) : + mode == cudaRoundPosInf ? __double2ll_ru(a) : + mode == cudaRoundMinInf ? __double2ll_rd(a) : + __double2ll_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2ull_rn(a) : + mode == cudaRoundPosInf ? __double2ull_ru(a) : + mode == cudaRoundMinInf ? __double2ull_rd(a) : + __double2ull_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __ll2double_rz(a) : + mode == cudaRoundPosInf ? __ll2double_ru(a) : + mode == cudaRoundMinInf ? __ll2double_rd(a) : + __ll2double_rn(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __ull2double_rz(a) : + mode == cudaRoundPosInf ? __ull2double_ru(a) : + mode == cudaRoundMinInf ? __ull2double_rd(a) : + __ull2double_rn(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode) +{ + return (double)a; +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode) +{ + return (double)a; +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode) +{ + return (double)a; +} + +#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__DEVICE_DOUBLE_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_config.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_config.h new file mode 100644 index 0000000000000000000000000000000000000000..2c44d6187c7ad1bb4463b13793a6275fd5530473 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_config.h @@ -0,0 +1,304 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__ +#endif + +#if !defined(__HOST_CONFIG_H__) +#define __HOST_CONFIG_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) + +#define _CRTIMP +#define __THROW + +#else /* __CUDACC_RTC__ */ + +/* check for host compilers that are compatible with nvcc */ +#if !defined(__GNUC__) && !defined(_WIN32) + +#error --- !!! UNSUPPORTED COMPILER !!! --- + +#endif /* !__GNUC__ && !_WIN32 */ + +/* check invalid configurations */ +#if defined(__PGIC__) +#if !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) +#error -- unsupported pgc++ configuration! pgc++ is supported only on Linux x86_64! +#endif /* !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) */ +#endif /* defined(__PGIC__) */ + +#if defined(__powerpc__) +#if !defined(__powerpc64__) || !defined(__LITTLE_ENDIAN__) +#error -- unsupported PPC platform! Only 64-bit little endian PPC is supported! +#endif /* !__powerpc64__ || !__LITTLE_ENDIAN__ */ +#endif /* __powerpc__ */ + +#if defined(__APPLE__) && defined(__MACH__) && !defined(__clang__) +#error -- clang and clang++ are the only supported host compilers on Mac OS X! +#endif /* __APPLE__ && __MACH__ && !__clang__ */ + + +/* check host compiler version */ +#if !__NV_NO_HOST_COMPILER_CHECK + +#if defined(__ICC) + +#if (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && !(__ICC >= 1900 && __ICC <= 2021)) || !defined(__GNUC__) || !defined(__LP64__) + +#error -- unsupported ICC configuration! Only ICC 15.0, ICC 16.0, ICC 17.0, ICC 18.0, ICC 19.x and 20.x on Linux x86_64 are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && __ICC != 1900) || !__GNUC__ || !__LP64__ */ + +#endif /* __ICC */ + +#if defined(__GRCO_CLANG_COMPILER__) +#if (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) +#error -- unsupported Grace clang version! The version must be 16.x to 17.x. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. +#endif /* (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) */ + +#endif /* __GRCO_CLANG_COMPILER__ */ + +#if defined(__INTEL_CLANG_COMPILER) +#error -- unsupported Intel ICX compiler! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. +#endif /* __INTEL_CLANG_COMPILER */ + +#if defined(__powerpc__) + +#if defined(__ibmxl_vrm__) && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && \ + !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) + +#error -- unsupported xlC version! only xlC 13.1 and 16.1 are supported. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* __ibmxl_vrm__ && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && + !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) */ + +#endif /* __powerpc__ */ + +#if defined(__GNUC__) + +#if __GNUC__ > 12 + +#error -- unsupported GNU version! gcc versions later than 12 are not supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* __GNUC__ > 12 */ + + +#if defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) + +#if (__clang_major__ >= 17) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) +#error -- unsupported clang version! clang version must be less than 16 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* (__clang_major__ >= 17) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */ + +#endif /* defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) */ + + +#endif /* __GNUC__ */ + +#if defined(_WIN32) + +#if _MSC_VER < 1910 || _MSC_VER >= 1940 + +#error -- unsupported Microsoft Visual Studio version! Only the versions between 2017 and 2022 (inclusive) are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#elif _MSC_VER >= 1910 && _MSC_VER < 1910 + +#pragma message("support for this version of Microsoft Visual Studio has been deprecated! Only the versions between 2017 and 2022 (inclusive) are supported!") + +#endif /* (_MSC_VER < 1910 || _MSC_VER >= 1940) || (_MSC_VER >= 1910 && _MSC_VER < 1910) */ + +#endif /* _WIN32 */ +#endif /* !__NV_NO_HOST_COMPILER_CHECK */ + + +/* configure host compiler */ +#if defined(__APPLE__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#if defined(__BLOCKS__) /* nvcc does not support closures */ + +#undef __BLOCKS__ + +#endif /* __BLOCKS__ */ + +#elif defined(__ANDROID__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__QNX__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__HORIZON__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__GNUC__) + +#define _CRTIMP +#define _ACRTIMP + +#include /* for __THROW */ + +#elif defined(_WIN32) + +#if _MSC_VER >= 1500 + +#undef _USE_DECLSPECS_FOR_SAL +#define _USE_DECLSPECS_FOR_SAL \ + 1 + +#endif /* _MSC_VER >= 1500 */ + +#if !defined(_CRT_NONSTDC_NO_WARNINGS) + +#define _CRT_NONSTDC_NO_WARNINGS /* to suppress warnings */ + +#endif /* !_CRT_NONSTDC_NO_WARNINGS */ + +#if !defined(_CRT_SECURE_NO_WARNINGS) + +#define _CRT_SECURE_NO_WARNINGS /* to suppress warnings */ + +#endif /* !_CRT_SECURE_NO_WARNINGS */ + +#if !defined(NOMINMAX) + +#define NOMINMAX /* min and max are part of cuda runtime */ + +#endif /* !NOMINMAX */ + +#include /* for _CRTIMP */ +#if _MSC_VER >= 1900 +#include /* for _ACRTIMP */ +#endif /* _MSC_VER >= 1900 */ + +#define __THROW + +#endif /* __APPLE__ */ + +#endif /* __CUDACC_RTC__ */ + + +#if defined(__cplusplus) && defined(__CUDA_ARCH__) && (defined(__PGIC__) || defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) + +#if __CUDACC_RTC__ +typedef char *va_list; +#else /* !__CUDACC_RTC__ */ +#include +#endif /* __CUDACC_RTC__ */ + + +#undef va_start +#undef va_end +#undef va_arg + +#ifdef __PGIC__ + +#undef __builtin_va_end + +#define va_start(v,l) __builtin_alt_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_alt_va_arg(v,l) + +#if (__cplusplus >= 201103L) +#undef va_copy +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif + +#else /* !__PGIC__ */ + + +#define va_start(ap, x) (__cu_va_start(&ap, x)) +#define va_end(ap) (__cu_va_end(&ap)) +#define va_arg(ap, t) (*((t *)__cu_va_arg(&ap, (t *)0))) + +#if (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) +#undef va_copy +#define va_copy(apd, aps) (__cu_va_copy(&(apd), &(aps))) +#endif /* (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) */ +#endif /* __PGIC__ */ + +#endif /* defined(__cplusplus) && (defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) */ + + + +#endif /* __CUDACC__ */ + +#endif /* !__HOST_CONFIG_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_defines.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..1f6a026c4cce51e084373d3c4dfc57a5f6ff4fea --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_defines.h @@ -0,0 +1,276 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__ +#endif + +#if !defined(__HOST_DEFINES_H__) +#define __HOST_DEFINES_H__ + +#if defined(__CUDACC__) && !defined(__CUDACC_RTC__) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_ALLOW_UNSUPPORTED_LIBCPP) +#include +#if ((defined(_MSC_VER ) && (defined(_M_X64) || defined(_M_AMD64))) ||\ + (defined(__x86_64__) || defined(__amd64__))) && defined(_LIBCPP_VERSION) && !(defined(__HORIZON__) || defined(__ANDROID__) || defined(__QNX__)) +#error "libc++ is not supported on x86 system" +#endif +#endif + +/* CUDA JIT mode (__CUDACC_RTC__) also uses GNU style attributes */ +#if defined(__GNUC__) || (defined(__PGIC__) && defined(__linux__)) || defined(__CUDA_LIBDEVICE__) || defined(__CUDACC_RTC__) + +#if defined(__CUDACC_RTC__) +#define __volatile__ volatile +#endif /* __CUDACC_RTC__ */ + +#define __no_return__ \ + __attribute__((noreturn)) + +#if defined(__CUDACC__) || defined(__CUDA_ARCH__) || defined(__CUDA_LIBDEVICE__) +/* gcc allows users to define attributes with underscores, + e.g., __attribute__((__noinline__)). + Consider a non-CUDA source file (e.g. .cpp) that has the + above attribute specification, and includes this header file. In that case, + defining __noinline__ as below would cause a gcc compilation error. + Hence, only define __noinline__ when the code is being processed + by a CUDA compiler component. +*/ +#define __noinline__ \ + __attribute__((noinline)) +#endif /* __CUDACC__ || __CUDA_ARCH__ || __CUDA_LIBDEVICE__ */ + +#undef __forceinline__ +#define __forceinline__ \ + __inline__ __attribute__((always_inline)) +#define __inline_hint__ \ + __attribute__((nv_inline_hint)) +#define __align__(n) \ + __attribute__((aligned(n))) +#define __thread__ \ + __thread +#define __import__ +#define __export__ +#define __cdecl +#define __annotate__(a) \ + __attribute__((a)) +#define __location__(a) \ + __annotate__(a) +#define CUDARTAPI +#define CUDARTAPI_CDECL + +#elif defined(_MSC_VER) + +#if _MSC_VER >= 1400 + +#define __restrict__ \ + __restrict + +#else /* _MSC_VER >= 1400 */ + +#define __restrict__ + +#endif /* _MSC_VER >= 1400 */ + +#define __inline__ \ + __inline +#define __no_return__ \ + __declspec(noreturn) +#define __noinline__ \ + __declspec(noinline) +#define __forceinline__ \ + __forceinline +#define __inline_hint__ \ + __declspec(nv_inline_hint) +#define __align__(n) \ + __declspec(align(n)) +#define __thread__ \ + __declspec(thread) +#define __import__ \ + __declspec(dllimport) +#define __export__ \ + __declspec(dllexport) +#define __annotate__(a) \ + __declspec(a) +#define __location__(a) \ + __annotate__(__##a##__) +#define CUDARTAPI \ + __stdcall +#define CUDARTAPI_CDECL \ + __cdecl + +#else /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */ + +#define __inline__ + +#if !defined(__align__) + +#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for '__align__' !!! --- + +#endif /* !__align__ */ + +#if !defined(CUDARTAPI) + +#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for 'CUDARTAPI' !!! --- + +#endif /* !CUDARTAPI */ + +#endif /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */ + +#if (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !defined(__clang__)))) || \ + (defined(_MSC_VER) && _MSC_VER < 1900) || \ + (!defined(__GNUC__) && !defined(_MSC_VER)) + +#define __specialization_static \ + static + +#else /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) || + (_MSC_VER && _MSC_VER < 1900) || + (!__GNUC__ && !_MSC_VER) */ + +#define __specialization_static + +#endif /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) || + (_MSC_VER && _MSC_VER < 1900) || + (!__GNUC__ && !_MSC_VER) */ + +#if !defined(__CUDACC__) && !defined(__CUDA_LIBDEVICE__) + +#undef __annotate__ +#define __annotate__(a) + +#else /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */ + +#define __launch_bounds__(...) \ + __annotate__(launch_bounds(__VA_ARGS__)) + +#endif /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */ + +#if defined(__CUDACC__) || defined(__CUDA_LIBDEVICE__) || \ + defined(__GNUC__) || defined(_WIN64) + +#define __builtin_align__(a) \ + __align__(a) + +#else /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */ + +#define __builtin_align__(a) + +#endif /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */ + +#if defined(__CUDACC__) || !defined(__grid_constant__) +#define __grid_constant__ \ + __location__(grid_constant) +#endif /* defined(__CUDACC__) || !defined(__grid_constant__) */ + +#if defined(__CUDACC__) || !defined(__host__) +#define __host__ \ + __location__(host) +#endif /* defined(__CUDACC__) || !defined(__host__) */ +#if defined(__CUDACC__) || !defined(__device__) +#define __device__ \ + __location__(device) +#endif /* defined(__CUDACC__) || !defined(__device__) */ +#if defined(__CUDACC__) || !defined(__global__) +#define __global__ \ + __location__(global) +#endif /* defined(__CUDACC__) || !defined(__global__) */ +#if defined(__CUDACC__) || !defined(__shared__) +#define __shared__ \ + __location__(shared) +#endif /* defined(__CUDACC__) || !defined(__shared__) */ +#if defined(__CUDACC__) || !defined(__constant__) +#define __constant__ \ + __location__(constant) +#endif /* defined(__CUDACC__) || !defined(__constant__) */ +#if defined(__CUDACC__) || !defined(__managed__) +#define __managed__ \ + __location__(managed) +#endif /* defined(__CUDACC__) || !defined(__managed__) */ + +#if !defined(__CUDACC__) +#define __device_builtin__ +#define __device_builtin_texture_type__ +#define __device_builtin_surface_type__ +#define __cudart_builtin__ +#else /* defined(__CUDACC__) */ +#define __device_builtin__ \ + __location__(device_builtin) +#define __device_builtin_texture_type__ \ + __location__(device_builtin_texture_type) +#define __device_builtin_surface_type__ \ + __location__(device_builtin_surface_type) +#define __cudart_builtin__ \ + __location__(cudart_builtin) +#endif /* !defined(__CUDACC__) */ + +#if defined(__CUDACC__) || !defined(__cluster_dims__) +#if defined(_MSC_VER) +#define __cluster_dims__(...) \ + __declspec(__cluster_dims__(__VA_ARGS__)) + +#else /* !defined(_MSC_VER) */ +#define __cluster_dims__(...) \ + __attribute__((cluster_dims(__VA_ARGS__))) +#endif /* defined(_MSC_VER) */ +#endif /* defined(__CUDACC__) || !defined(__cluster_dims__) */ + +#define __CUDA_ARCH_HAS_FEATURE__(_FEAT) __CUDA_ARCH_FEAT_##_FEAT + +#endif /* !__HOST_DEFINES_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_runtime.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..94d4f6b4bdac0cf39a8b3a47fea7df61b712fcf8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_runtime.h @@ -0,0 +1,288 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__ +#endif + +#if !defined(__CUDA_INTERNAL_COMPILATION__) + +#define __CUDA_INTERNAL_COMPILATION__ +#define __text__ +#define __surf__ +#define __name__shadow_var(c, cpp) \ + #c +#define __name__text_var(c, cpp) \ + #cpp +#define __host__shadow_var(c, cpp) \ + cpp +#define __text_var(c, cpp) \ + cpp +#define __device_fun(fun) \ + #fun +#define __device_var(var) \ + #var +#define __device__text_var(c, cpp) \ + #c +#define __device__shadow_var(c, cpp) \ + #c + +#if defined(_WIN32) && !defined(_WIN64) + +#define __pad__(f) \ + f + +#else /* _WIN32 && !_WIN64 */ + +#define __pad__(f) + +#endif /* _WIN32 && !_WIN64 */ + +#include "builtin_types.h" +#include "storage_class.h" + +#else /* !__CUDA_INTERNAL_COMPILATION__ */ + +template +static inline T *__cudaAddressOf(T &val) +{ + return (T *)((void *)(&(const_cast(reinterpret_cast(val))))); +} + +#define __cudaRegisterBinary(X) \ + __cudaFatCubinHandle = __cudaRegisterFatBinary((void*)&__fatDeviceText); \ + { void (*callback_fp)(void **) = (void (*)(void **))(X); (*callback_fp)(__cudaFatCubinHandle); __cudaRegisterFatBinaryEnd(__cudaFatCubinHandle); }\ + atexit(__cudaUnregisterBinaryUtil) + +#define __cudaRegisterVariable(handle, var, ext, size, constant, global) \ + __cudaRegisterVar(handle, (char*)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global) +#define __cudaRegisterManagedVariable(handle, var, ext, size, constant, global) \ + __cudaRegisterManagedVar(handle, (void **)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global) + +#define __cudaRegisterGlobalTexture(handle, tex, dim, norm, ext) \ + __cudaRegisterTexture(handle, (const struct textureReference*)&tex, (const void**)(void*)__device##tex, __name##tex, dim, norm, ext) +#define __cudaRegisterGlobalSurface(handle, surf, dim, ext) \ + __cudaRegisterSurface(handle, (const struct surfaceReference*)&surf, (const void**)(void*)__device##surf, __name##surf, dim, ext) +#define __cudaRegisterEntry(handle, funptr, fun, thread_limit) \ + __cudaRegisterFunction(handle, (const char*)funptr, (char*)__device_fun(fun), #fun, -1, (uint3*)0, (uint3*)0, (dim3*)0, (dim3*)0, (int*)0) + +extern "C" cudaError_t CUDARTAPI __cudaPopCallConfiguration( + dim3 *gridDim, + dim3 *blockDim, + size_t *sharedMem, + void *stream +); + +#define __cudaLaunchPrologue(size) \ + void * __args_arr[size]; \ + int __args_idx = 0 + +#define __cudaSetupArg(arg, offset) \ + __args_arr[__args_idx] = (void *)__cudaAddressOf(arg); ++__args_idx + +#define __cudaSetupArgSimple(arg, offset) \ + __args_arr[__args_idx] = (void *)(char *)&arg; ++__args_idx + +#if defined(__GNUC__) +#define __NV_ATTR_UNUSED_FOR_LAUNCH __attribute__((unused)) +#else /* !__GNUC__ */ +#define __NV_ATTR_UNUSED_FOR_LAUNCH +#endif /* __GNUC__ */ + +/* the use of __args_idx in the expression below avoids host compiler warning about it being an + unused variable when the launch has no arguments */ +#define __cudaLaunch(fun) \ + { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \ + dim3 __gridDim, __blockDim;\ + size_t __sharedMem; \ + cudaStream_t __stream; \ + if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \ + return; \ + if (__args_idx == 0) {\ + (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\ + } else { \ + (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\ + }\ + } + +#if defined(__GNUC__) +#define __nv_dummy_param_ref(param) \ + { volatile static void **__ref __attribute__((unused)); __ref = (volatile void **)param; } +#else /* __GNUC__ */ +#define __nv_dummy_param_ref(param) \ + { volatile static void **__ref; __ref = (volatile void **)param; } +#endif /* __GNUC__ */ + +static void ____nv_dummy_param_ref(void *param) __nv_dummy_param_ref(param) + +#define __REGISTERFUNCNAME_CORE(X) __cudaRegisterLinkedBinary##X +#define __REGISTERFUNCNAME(X) __REGISTERFUNCNAME_CORE(X) + +extern "C" { +void __REGISTERFUNCNAME( __NV_MODULE_ID ) ( void (*)(void **), void *, void *, void (*)(void *)); +} + +#define __TO_STRING_CORE(X) #X +#define __TO_STRING(X) __TO_STRING_CORE(X) + +extern "C" { +#if defined(_WIN32) +#pragma data_seg("__nv_module_id") + static const __declspec(allocate("__nv_module_id")) unsigned char __module_id_str[] = __TO_STRING(__NV_MODULE_ID); +#pragma data_seg() +#elif defined(__APPLE__) + static const unsigned char __module_id_str[] __attribute__((section ("__NV_CUDA,__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID); +#else + static const unsigned char __module_id_str[] __attribute__((section ("__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID); +#endif + +#undef __FATIDNAME_CORE +#undef __FATIDNAME +#define __FATIDNAME_CORE(X) __fatbinwrap##X +#define __FATIDNAME(X) __FATIDNAME_CORE(X) + +#define ____cudaRegisterLinkedBinary(X) \ +{ __REGISTERFUNCNAME(__NV_MODULE_ID) (( void (*)(void **))(X), (void *)&__FATIDNAME(__NV_MODULE_ID), (void *)&__module_id_str, (void (*)(void *))&____nv_dummy_param_ref); } + +} + +extern "C" { +extern void** CUDARTAPI __cudaRegisterFatBinary( + void *fatCubin +); + +extern void CUDARTAPI __cudaRegisterFatBinaryEnd( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaUnregisterFatBinary( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaRegisterVar( + void **fatCubinHandle, + char *hostVar, + char *deviceAddress, + const char *deviceName, + int ext, + size_t size, + int constant, + int global +); + +extern void CUDARTAPI __cudaRegisterManagedVar( + void **fatCubinHandle, + void **hostVarPtrAddress, + char *deviceAddress, + const char *deviceName, + int ext, + size_t size, + int constant, + int global +); + +extern char CUDARTAPI __cudaInitModule( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaRegisterTexture( + void **fatCubinHandle, + const struct textureReference *hostVar, + const void **deviceAddress, + const char *deviceName, + int dim, + int norm, + int ext +); + +extern void CUDARTAPI __cudaRegisterSurface( + void **fatCubinHandle, + const struct surfaceReference *hostVar, + const void **deviceAddress, + const char *deviceName, + int dim, + int ext +); + +extern void CUDARTAPI __cudaRegisterFunction( + void **fatCubinHandle, + const char *hostFun, + char *deviceFun, + const char *deviceName, + int thread_limit, + uint3 *tid, + uint3 *bid, + dim3 *bDim, + dim3 *gDim, + int *wSize +); + +#if defined(__APPLE__) +extern "C" int atexit(void (*)(void)); + +#elif defined(__GNUC__) && !defined(__ANDROID__) && !defined(__HORIZON__) +extern int atexit(void(*)(void)) throw(); + +#elif defined(__HORIZON__) + +// __TEMP_WAR__ 200132570 HOS : Disable atexit call until it works +#define atexit(p) + +#else /* __GNUC__ && !__ANDROID__ */ +extern int __cdecl atexit(void(__cdecl *)(void)); +#endif + +} + +static void **__cudaFatCubinHandle; + +static void __cdecl __cudaUnregisterBinaryUtil(void) +{ + ____nv_dummy_param_ref((void *)&__cudaFatCubinHandle); + __cudaUnregisterFatBinary(__cudaFatCubinHandle); +} + +static char __nv_init_managed_rt_with_module(void **handle) +{ + return __cudaInitModule(handle); +} + +#include "common_functions.h" + +#pragma pack() + +#if defined(_WIN32) + +#pragma warning(disable: 4099) + +#if !defined(_WIN64) + +#pragma warning(disable: 4408) + +#endif /* !_WIN64 */ + +#endif /* _WIN32 */ + +#endif /* !__CUDA_INTERNAL_COMPILATION__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..279a68cc38efd25935f0e5bc65934d0d378a2c19 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.h @@ -0,0 +1,12210 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/math_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/math_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__ +#endif + +#if !defined(__MATH_FUNCTIONS_H__) +#define __MATH_FUNCTIONS_H__ + +#if defined(__QNX__) && (__GNUC__ >= 5) && defined(__CUDACC__) +#if __has_include(<__config>) +#include <__config> +#endif +#endif + +/** + * \defgroup CUDA_MATH Mathematical Functions + * + * CUDA mathematical functions are always available in device code. + * + * Host implementations of the common mathematical functions are mapped + * in a platform-specific way to standard math library functions, provided + * by the host compiler and respective host libm where available. + * Some functions, not available with the host compilers, are implemented + * in crt/math_functions.hpp header file. + * For example, see ::erfinv(). Other, less common functions, + * like ::rhypot(), ::cyl_bessel_i0() are only available in device code. + * + * Note that many floating-point and integer functions names are + * overloaded for different argument types. For example, the ::log() + * function has the following prototypes: + * \code + * double log(double x); + * float log(float x); + * float logf(float x); + * \endcode + * + * Note also that due to implementation constraints, certain math functions + * from std:: namespace may be callable in device code even via explicitly + * qualified std:: names. However, such use is discouraged, since this + * capability is unsupported, unverified, undocumented, not portable, and + * may change without notice. + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "host_defines.h" + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +extern "C" +{ + +/* Define math function DOXYGEN toplevel groups, functions will + be added to these groups later. +*/ +/** + * \defgroup CUDA_MATH_SINGLE Single Precision Mathematical Functions + * This section describes single precision mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_DOUBLE Double Precision Mathematical Functions + * This section describes double precision mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INT Integer Mathematical Functions + * This section describes integer mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_SINGLE Single Precision Intrinsics + * This section describes single precision intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_DOUBLE Double Precision Intrinsics + * This section describes double precision intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_INT Integer Intrinsics + * This section describes integer intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_CAST Type Casting Intrinsics + * This section describes type casting intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * + * \defgroup CUDA_MATH_INTRINSIC_SIMD SIMD Intrinsics + * This section describes SIMD intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + + +/** + * @} + */ +#define __DEVICE_FUNCTIONS_DECL__ __host__ __device__ +#if !defined(_MSC_VER) +#define __CUDA_MATH_CRTIMP +#else +#if _MSC_VER < 1900 +#define __CUDA_MATH_CRTIMP _CRTIMP +#else +#define __CUDA_MATH_CRTIMP _ACRTIMP +#endif +#endif + +#if defined(__ANDROID__) && (__ANDROID_API__ <= 20) && !defined(__aarch64__) +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ int abs(int); +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long int labs(long int); +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long long int llabs(long long int); +#else /* __ANDROID__ */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ int __cdecl abs(int a) __THROW; +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p long \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long int __cdecl labs(long int a) __THROW; +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p long \p long \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long long int llabs(long long int a) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} +#endif +#endif /* __ANDROID__ */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +/* put all math functions in std */ +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the absolute value of the input argument. + * + * Calculate the absolute value of the input argument \p x. + * + * \return + * Returns the absolute value of the input argument. + * - fabs( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fabs( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns +0. + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl fabs(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the absolute value of its argument + * + * Calculate the absolute value of the input argument \p x. + * + * \return + * Returns the absolute value of its argument. + * - fabsf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fabsf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns +0. + * - fabsf(NaN) returns an unspecified NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fabsf(float x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int min(const int a, const int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int umin(const unsigned int a, const unsigned int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llmin(const long long int a, const long long int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int ullmin(const unsigned long long int a, const unsigned long long int b); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine the minimum numeric value of the arguments. + * + * Determines the minimum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the minimum numeric value of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fminf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fminf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Determine the minimum numeric value of the arguments. + * + * Determines the minimum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the minimum numeric value of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fmin(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fmin(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int max(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int umax(const unsigned int a, const unsigned int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llmax(const long long int a, const long long int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int ullmax(const unsigned long long int a, const unsigned long long int b); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine the maximum numeric value of the arguments. + * + * Determines the maximum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the maximum numeric values of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmaxf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fmaxf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Determine the maximum numeric value of the arguments. + * + * Determines the maximum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the maximum numeric values of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fmax(double, double) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fmax(double, double); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine of the input argument. + * + * Calculate the sine of the input argument \p x (measured in radians). + * + * \return + * - sin( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sin( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sin(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cosine of the input argument. + * + * Calculate the cosine of the input argument \p x (measured in radians). + * + * \return + * - cos( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cos( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl cos(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine and cosine of the first input argument. + * + * Calculate the sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sin() and ::cos(). + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincos(double x, double *sptr, double *cptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine and cosine of the first input argument. + * + * Calculate the sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the second + * argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinf() and ::cosf(). + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincosf(float x, float *sptr, float *cptr) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the tangent of the input argument. + * + * Calculate the tangent of the input argument \p x (measured in radians). + * + * \return + * - tan( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tan( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl tan(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the input argument. + * + * Calculate the nonnegative square root of \p x, + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * - sqrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sqrt( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - sqrt(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sqrt(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the reciprocal of the square root of the input argument. + * + * Calculate the reciprocal of the nonnegative square root of \p x, + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * - rsqrt( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - rsqrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rsqrt(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rsqrt(double x); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the reciprocal of the square root of the input argument. + * + * Calculate the reciprocal of the nonnegative square root of \p x, + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * - rsqrtf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - rsqrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rsqrtf(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rsqrtf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 2 logarithm of the input argument. + * + * Calculate the base 2 logarithm of the input argument \p x. + * + * \return + * - log2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log2(1) returns +0. + * - log2(\p x) returns NaN for \p x < 0. + * - log2( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double log2(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl log2(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 2 exponential of the input argument. + * + * Calculate + * \latexonly $2^x$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * x + * + * + * \endxmlonly, + * the base 2 exponential of the input argument \p x. + * + * \return + * - exp2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp2( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp2( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double exp2(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl exp2(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 2 exponential of the input argument. + * + * Calculate + * \latexonly $2^x$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * x + * + * + * \endxmlonly, + * the base 2 exponential of the input argument \p x. + * + * \return + * - exp2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp2f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp2f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float exp2f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl exp2f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 10 exponential of the input argument. + * + * Calculate + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly, + * the base 10 exponential of the input argument \p x. + * + * \return + * - exp10( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp10( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp10( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double exp10(double x) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 10 exponential of the input argument. + * + * Calculate + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly, + * the base 10 exponential of the input argument \p x. + * + * \return + * - exp10f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp10f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp10f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float exp10f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument, minus 1. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly + * -1, the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, minus 1. + * + * \return + * - expm1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - expm1( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns -1. + * - expm1( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double expm1(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl expm1(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument, minus 1. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly + * -1, the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, minus 1. + * + * \return + * - expm1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - expm1f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns -1. + * - expm1f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float expm1f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl expm1f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 2 logarithm of the input argument. + * + * Calculate the base 2 logarithm of the input argument \p x. + * + * \return + * - log2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log2f(1) returns +0. + * - log2f(\p x) returns NaN for \p x < 0. + * - log2f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log2f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl log2f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 10 logarithm of the input argument. + * + * Calculate the base 10 logarithm of the input argument \p x. + * + * \return + * - log10( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log10(1) returns +0. + * - log10(\p x) returns NaN for \p x < 0. + * - log10( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl log10(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument. + * + * Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument \p x. + * + * \return + * - log( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log(1) returns +0. + * - log(\p x) returns NaN for \p x < 0. + * - log( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl log(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly + * of the input argument \p x. + * + * \return + * - log1p( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - log1p(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log1p(\p x) returns NaN for \p x < -1. + * - log1p( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double log1p(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl log1p(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly + * of the input argument \p x. + * + * \return + * - log1pf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - log1pf(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log1pf(\p x) returns NaN for \p x < -1. + * - log1pf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log1pf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl log1pf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the largest integer less than or equal to \p x. + * + * Calculates the largest integer value which is less than or equal to \p x. + * + * \return + * Returns + * \latexonly $\lfloor x \rfloor$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - floor( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - floor( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl floor(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly, + * the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x. + * + * \return + * - exp( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl exp(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic cosine of the input argument. + * + * Calculate the hyperbolic cosine of the input argument \p x. + * + * \return + * - cosh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cosh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl cosh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic sine of the input argument. + * + * Calculate the hyperbolic sine of the input argument \p x. + * + * \return + * - sinh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sinh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic tangent of the input argument. + * + * Calculate the hyperbolic tangent of the input argument \p x. + * + * \return + * - tanh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl tanh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the nonnegative inverse hyperbolic cosine of the input argument. + * + * Calculate the nonnegative inverse hyperbolic cosine of the input argument \p x. + * + * \return + * Result will be in the interval [0, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ]. + * - acosh(1) returns 0. + * - acosh(\p x) returns NaN for \p x in the interval [ + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , 1). + * - acosh( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double acosh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl acosh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the nonnegative inverse hyperbolic cosine of the input argument. + * + * Calculate the nonnegative inverse hyperbolic cosine of the input argument \p x. + * + * \return + * Result will be in the interval [0, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ]. + * - acoshf(1) returns 0. + * - acoshf(\p x) returns NaN for \p x in the interval [ + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , 1). + * - acoshf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float acoshf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl acoshf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse hyperbolic sine of the input argument. + * + * Calculate the inverse hyperbolic sine of the input argument \p x. + * + * \return + * - asinh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - asinh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double asinh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl asinh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse hyperbolic sine of the input argument. + * + * Calculate the inverse hyperbolic sine of the input argument \p x. + * + * \return + * - asinhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - asinhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float asinhf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl asinhf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse hyperbolic tangent of the input argument. + * + * Calculate the inverse hyperbolic tangent of the input argument \p x. + * + * \return + * - atanh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - atanh( + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - atanh(\p x) returns NaN for \p x outside interval [-1, 1]. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double atanh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl atanh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse hyperbolic tangent of the input argument. + * + * Calculate the inverse hyperbolic tangent of the input argument \p x. + * + * \return + * - atanhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - atanhf( + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - atanhf(\p x) returns NaN for \p x outside interval [-1, 1]. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atanhf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl atanhf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * + * \endxmlonly + * of the input arguments \p x and \p exp. + * + * \return + * - ldexp(\p x, \p exp) is equivalent to scalbn(\p x, \p exp). + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl ldexp(double x, int exp) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * + * \endxmlonly + * of the input arguments \p x and \p exp. + * + * \return + * - ldexpf(\p x, \p exp) is equivalent to scalbnf(\p x, \p exp). + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ldexpf(float x, int exp) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the floating-point representation of the exponent of the input argument. + * + * Calculate the floating-point representation of the exponent of the input argument \p x. + * + * \return + * - logb( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly. + * - logb( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double logb(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl logb(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the floating-point representation of the exponent of the input argument. + * + * Calculate the floating-point representation of the exponent of the input argument \p x. + * + * \return + * - logbf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly. + * - logbf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float logbf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl logbf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute the unbiased integer exponent of the argument. + * + * Calculates the unbiased integer exponent of the input argument \p x. + * + * \return + * - If successful, returns the unbiased exponent of the argument. + * - ilogb( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns INT_MIN. + * - ilogb(NaN) returns INT_MIN. + * - ilogb( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns INT_MAX. + * - Note: above behavior does not take into account FP_ILOGB0 nor FP_ILOGBNAN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int ilogb(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP int __cdecl ilogb(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute the unbiased integer exponent of the argument. + * + * Calculates the unbiased integer exponent of the input argument \p x. + * + * \return + * - If successful, returns the unbiased exponent of the argument. + * - ilogbf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns INT_MIN. + * - ilogbf(NaN) returns INT_MIN. + * - ilogbf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns INT_MAX. + * - Note: above behavior does not take into account FP_ILOGB0 nor FP_ILOGBNAN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int ilogbf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP int __cdecl ilogbf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbn( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbn(\p x, 0) returns \p x. + * - scalbn( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double scalbn(double x, int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl scalbn(double x, int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbnf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbnf(\p x, 0) returns \p x. + * - scalbnf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float scalbnf(float x, int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl scalbnf(float x, int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbln( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbln(\p x, 0) returns \p x. + * - scalbln( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double scalbln(double x, long int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl scalbln(double x, long int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalblnf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalblnf(\p x, 0) returns \p x. + * - scalblnf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float scalblnf(float x, long int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl scalblnf(float x, long int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Extract mantissa and exponent of a floating-point value + * + * Decompose the floating-point value \p x into a component \p m for the + * normalized fraction element and another term \p n for the exponent. + * The absolute value of \p m will be greater than or equal to 0.5 and + * less than 1.0 or it will be equal to 0; + * \latexonly $x = m\cdot 2^n$ \endlatexonly + * \xmlonly + * + * + * x + * = + * m + * + * + * 2 + * n + * + * + * \endxmlonly. + * The integer exponent \p n will be stored in the location to which \p nptr points. + * + * \return + * Returns the fractional component \p m. + * - frexp( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores zero in the location pointed to by \p nptr. + * - frexp( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * and stores an unspecified value in the + * location to which \p nptr points. + * - frexp(NaN, \p y) returns a NaN and stores an unspecified value in the location to which \p nptr points. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl frexp(double x, int *nptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Extract mantissa and exponent of a floating-point value + * + * Decomposes the floating-point value \p x into a component \p m for the + * normalized fraction element and another term \p n for the exponent. + * The absolute value of \p m will be greater than or equal to 0.5 and + * less than 1.0 or it will be equal to 0; + * \latexonly $x = m\cdot 2^n$ \endlatexonly + * \xmlonly + * + * + * x + * = + * m + * + * + * 2 + * n + * + * + * \endxmlonly. + * The integer exponent \p n will be stored in the location to which \p nptr points. + * + * \return + * Returns the fractional component \p m. + * - frexpf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores zero in the location pointed to by \p nptr. + * - frexpf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * and stores an unspecified value in the + * location to which \p nptr points. + * - frexpf(NaN, \p y) returns a NaN and stores an unspecified value in the location to which \p nptr points. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float frexpf(float x, int *nptr) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded away from zero. + * + * \return + * Returns rounded integer value. + * - round( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - round( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_slow_round See ::rint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double round(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl round(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded away from zero. + * + * \return + * Returns rounded integer value. + * - roundf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - roundf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_slow_round See ::rintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float roundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl roundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::lrint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lround(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lround(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::lrintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lroundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lroundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::llrint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llround(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llround(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::llrintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llroundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llroundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded to the nearest even integer value. + * + * \return + * Returns rounded integer value. + * - rint( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - rint( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if defined(__CUDA_ARCH__) || defined(__DOXYGEN_ONLY__) +/* + * We don't generate the declaration of rint for host compilation. + * This is acaully a workaround to compile the boost header file when + * Clang 3.8 is used as the host compiler. The boost header file has + * the following example code: + * namespace NS { extern "C" { double rint(double); } + * } + * + * After preprocessing, we get something like below: + * + * extern "C" { double rint(double x) throw(); } + * # 30 "/usr/include/math.h" 3 + * extern "C" { double rint(double x) throw(); } + * namespace NS { extern "C" { double rint(double); } } + * + * Although GCC accepts this output, Clang 3.8 doesn't. + * Furthermore, we cannot change the boost header file by adding "throw()" + * to rint's declaration there. So, as a workaround, we just don't generate + * our re-declaration for the host compilation. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl rint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#endif /* __CUDA_ARCH__ || __DOXYGEN_ONLY__ */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded to the nearest even integer value. + * + * \return + * Returns rounded integer value. + * - rintf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - rintf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl rintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lrint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lrint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lrintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lrintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llrint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llrint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llrintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llrintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round the input argument to the nearest integer. + * + * Round argument \p x to an integer value in double precision floating-point format. Uses round to nearest rounding, with ties rounding to even. + * + * \return + * - nearbyint( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - nearbyint( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nearbyint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nearbyint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round the input argument to the nearest integer. + * + * Round argument \p x to an integer value in single precision floating-point format. Uses round to nearest rounding, with ties rounding to even. + * + * \return + * - nearbyintf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - nearbyintf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nearbyintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nearbyintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate ceiling of the input argument. + * + * Compute the smallest integer value not less than \p x. + * + * \return + * Returns + * \latexonly $\lceil x \rceil$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + expressed as a floating-point number. + * - ceil( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - ceil( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl ceil(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Truncate input argument to the integral part. + * + * Round \p x to the nearest integer value that does not exceed \p x in + * magnitude. + * + * \return + * Returns truncated integer value. + * - trunc( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - trunc( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double trunc(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl trunc(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Truncate input argument to the integral part. + * + * Round \p x to the nearest integer value that does not exceed \p x in + * magnitude. + * + * \return + * Returns truncated integer value. + * - truncf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - truncf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float truncf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl truncf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute the positive difference between \p x and \p y. + * + * Compute the positive difference between \p x and \p y. The positive + * difference is \p x - \p y when \p x > \p y and +0 otherwise. + * + * \return + * Returns the positive difference between \p x and \p y. + * - fdim(\p x, \p y) returns \p x - \p y if \p x > \p y. + * - fdim(\p x, \p y) returns +0 if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly \p y. + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fdim(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fdim(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute the positive difference between \p x and \p y. + * + * Compute the positive difference between \p x and \p y. The positive + * difference is \p x - \p y when \p x > \p y and +0 otherwise. + * + * \return + * Returns the positive difference between \p x and \p y. + * - fdimf(\p x, \p y) returns \p x - \p y if \p x > \p y. + * - fdimf(\p x, \p y) returns +0 if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly \p y. + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fdimf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fdimf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc tangent of the ratio of first and second input arguments. + * + * Calculate the principal value of the arc tangent of the ratio of first + * and second input arguments \p y / \p x. The quadrant of the result is + * determined by the signs of inputs \p y and \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * , + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ]. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , -0) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , +0) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for \p x < 0. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p x > 0. + * - atan2(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\pi$ \endlatexonly + * \xmlonly + * + * + * - + * π + * + * + * \endxmlonly + * /2 for \p y < 0. + * - atan2(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2 for \p y > 0. + * - atan2( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2 for finite \p x. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 3\pi$ \endlatexonly + * \xmlonly + * + * + * ± + * 3 + * π + * + * + * \endxmlonly + * /4. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /4. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl atan2(double y, double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc tangent of the input argument. + * + * Calculate the principal value of the arc tangent of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2, + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2]. + * - atan( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl atan(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc cosine of the input argument. + * + * Calculate the principal value of the arc cosine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [0, + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - acos(1) returns +0. + * - acos(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl acos(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc sine of the input argument. + * + * Calculate the principal value of the arc sine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2, + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2] for \p x inside [-1, +1]. + * - asin( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - asin(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl asin(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of two arguments. + * + * Calculate the length of the hypotenuse of a right triangle whose two sides have lengths + * \p x and \p y without undue overflow or underflow. + * + * \return Returns the length of the hypotenuse + * \latexonly $\sqrt{x^2+y^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * \endxmlonly. + * - hypot(\p x,\p y), hypot(\p y,\p x), and hypot(\p x, \p -y) are equivalent. + * - hypot(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) is equivalent to fabs(\p x). + * - hypot( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly, + * even if \p y is a NaN. + * + * \note_accuracy_double + */ +#if defined(_WIN32) +#if defined(_MSC_VER) && _MSC_VER < 1900 +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __CRTDECL hypot(double x, double y); +#else +extern _ACRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl hypot(double x, double y); +#endif +#else /* _WIN32 */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double hypot(double x, double y) __THROW; +#endif /* _WIN32 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of two arguments. + * + * Calculate one over the length of the hypotenuse of a right triangle whose two sides have + * lengths \p x and \p y without undue overflow or underflow. + * + * \return Returns one over the length of the hypotenuse + * \latexonly $\frac{1}{\sqrt{x^2+y^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * + * + * \endxmlonly. + * - rhypot(\p x,\p y), rhypot(\p y,\p x), and rhypot(\p x, \p -y) are equivalent. + * - rhypot( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns +0, + * even if \p y is a NaN. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rhypot(double x, double y) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of two arguments. + * + * Calculates the length of the hypotenuse of a right triangle whose two sides have lengths + * \p x and \p y without undue overflow or underflow. + * + * \return Returns the length of the hypotenuse + * \latexonly $\sqrt{x^2+y^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * \endxmlonly. + * - hypotf(\p x,\p y), hypotf(\p y,\p x), and hypotf(\p x, \p -y) are equivalent. + * - hypotf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) is equivalent to fabsf(\p x). + * - hypotf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly, + * even if \p y is a NaN. + * + * \note_accuracy_single + */ +#if defined(_WIN32) +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __CRTDECL hypotf(float x, float y); +#else /* _WIN32 */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float hypotf(float x, float y) __THROW; +#endif /* _WIN32 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of two arguments. + * + * Calculates one over the length of the hypotenuse of a right triangle whose two sides have + * lengths \p x and \p y without undue overflow or underflow. + * + * \return Returns one over the length of the hypotenuse + * \latexonly $\frac{1}{\sqrt{x^2+y^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * + * + * \endxmlonly. + * - rhypotf(\p x,\p y), rhypotf(\p y,\p x), and rhypotf(\p x, \p -y) are equivalent. + * - rhypotf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns +0, + * even if \p y is a NaN. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rhypotf(float x, float y) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of three coordinates of the argument. + * + * Calculate the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of 3D vector + * \latexonly $\sqrt{a^2+b^2+c^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl norm3d(double a, double b, double c) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of three coordinates. + * + * Calculate one over the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm3d(double a, double b, double c) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of four coordinates of the argument. + * + * Calculate the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of 4D vector + * \latexonly $\sqrt{a^2+b^2+c^2+d^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl norm4d(double a, double b, double c, double d) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of four coordinates. + * + * Calculate one over the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2+d^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm4d(double a, double b, double c, double d) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of any number of coordinates. + * + * Calculate the length of a vector p, dimension of which is passed as an argument \p without undue overflow or underflow. + * + * \return Returns the length of the dim-D vector + * \latexonly $\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ + __device__ __device_builtin__ double norm(int dim, double const * p) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the reciprocal of square root of the sum of squares of any number of coordinates. + * + * Calculates one over the length of vector \p p, dimension of which is passed as an argument, in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the vector + * \latexonly $\frac{1}{\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm(int dim, double const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the reciprocal of square root of the sum of squares of any number of coordinates. + * + * Calculates one over the length of vector \p p, dimension of which is passed as an argument, in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the vector + * \latexonly $\frac{1}{\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + +extern __device__ __device_builtin__ float rnormf(int dim, float const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of any number of coordinates. + * + * Calculates the length of a vector \p p, dimension of which is passed as an argument without undue overflow or underflow. + * + * \return Returns the length of the dim-D vector + * \latexonly $\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + __device__ __device_builtin__ float normf(int dim, float const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of three coordinates of the argument. + * + * Calculates the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of the 3D vector + * \latexonly $\sqrt{a^2+b^2+c^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + +extern __device__ __device_builtin__ float norm3df(float a, float b, float c) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of three coordinates. + * + * Calculates one over the length of three dimension vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rnorm3df(float a, float b, float c) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of four coordinates of the argument. + * + * Calculates the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of the 4D vector + * \latexonly $\sqrt{a^2+b^2+c^2+d^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float norm4df(float a, float b, float c, float d) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of four coordinates. + * + * Calculates one over the length of four dimension vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2+d^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rnorm4df(float a, float b, float c, float d) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cube root of the input argument. + * + * Calculate the cube root of \p x, + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * - cbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - cbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double cbrt(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cbrt(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cube root of the input argument. + * + * Calculate the cube root of \p x, + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * - cbrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - cbrtf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cbrtf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl cbrtf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate reciprocal cube root function. + * + * Calculate reciprocal cube root function of \p x. + * + * \return + * - rcbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rcbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rcbrt(double x); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate reciprocal cube root function. + * + * Calculate reciprocal cube root function of \p x. + * + * \return + * - rcbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rcbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rcbrtf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - sinpi( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinpi( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double sinpi(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - sinpif( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinpif( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinpif(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cosine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the cosine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - cospi( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cospi( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double cospi(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cosine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the cosine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - cospif( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cospif( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cospif(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine and cosine of the first input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine and cosine of the first input argument, \p x (measured in radians), + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinpi() and ::cospi(). + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincospi(double x, double *sptr, double *cptr); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine and cosine of the first input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine and cosine of the first input argument, \p x (measured in radians), + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinpif() and ::cospif(). + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincospif(float x, float *sptr, float *cptr); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of first argument to the power of second argument. + * + * Calculate the value of \p x to the power of \p y. + * + * \return + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * for \p y an odd integer less than 0. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * +* + + * + * + * + * \endxmlonly + * for \p y less than 0 and not an odd integer. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns +0 for \p y > 0 and not an odd integer. + * - pow(-1, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns 1. + * - pow(+1, \p y) returns 1 for any \p y, even a NaN. + * - pow(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1 for any \p x, even a NaN. + * - pow(\p x, \p y) returns a NaN for finite \p x < 0 and finite non-integer \p y. + * - pow(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns -0 for \p y an odd integer less than 0. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0 and not an odd integer. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0 and not an odd integer. + * - pow( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0. + * - pow( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl pow(double x, double y) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Break down the input argument into fractional and integral parts. + * + * Break down the argument \p x into fractional and integral parts. The + * integral part is stored in the argument \p iptr. + * Fractional and integral parts are given the same sign as the argument \p x. + * + * \return + * - modf( + * \latexonly $\pm x$ \endlatexonly + * \xmlonly + * + * + * ± + * x + * + * + * \endxmlonly + * , \p iptr) returns a result with the same sign as \p x. + * - modf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p iptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * in the object pointed to by \p iptr. + * - modf(NaN, \p iptr) stores a NaN in the object pointed to by \p iptr and returns a NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl modf(double x, double *iptr) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the double-precision floating-point remainder of \p x / \p y. + * + * Calculate the double-precision floating-point remainder of \p x / \p y. + * The floating-point remainder of the division operation \p x / \p y calculated + * by this function is exactly the value x - n*y, where \p n is \p x / \p y with its fractional part truncated. + * The computed value will have the same sign as \p x, and its magnitude will be less than the magnitude of \p y. + * + * \return + * - Returns the floating-point remainder of \p x / \p y. + * - fmod( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * if \p y is not zero. + * - fmod(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x if \p x is finite. + * - fmod(\p x, \p y) returns NaN if \p x is + * \latexonly $\pm\infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * or \p y is zero. + * - If either argument is NaN, NaN is returned. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl fmod(double x, double y) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute double-precision floating-point remainder. + * + * Compute double-precision floating-point remainder \p r of dividing + * \p x by \p y for nonzero \p y. Thus + * \latexonly $ r = x - n y$ \endlatexonly + * \xmlonly + * + * + * r + * = + * x + * + * n + * y + * + * \endxmlonly. + * The value \p n is the integer value nearest + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * In the case when + * \latexonly $ | n -\frac{x}{y} | = \frac{1}{2} $ \endlatexonly + * \xmlonly + * + * + * + * | + * + * n + * + * + * x + * y + * + * + * | + * + * = + * + * 1 + * 2 + * + * + * + * \endxmlonly + * , the + * even \p n value is chosen. + * + * \return + * - remainder(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns NaN. + * - remainder( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y) returns NaN. + * - remainder(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x for finite \p x. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double remainder(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl remainder(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute single-precision floating-point remainder. + * + * Compute single-precision floating-point remainder \p r of dividing + * \p x by \p y for nonzero \p y. Thus + * \latexonly $ r = x - n y$ \endlatexonly + * \xmlonly + * + * + * r + * = + * x + * + * n + * y + * + * \endxmlonly. + * The value \p n is the integer value nearest + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * In the case when + * \latexonly $ | n -\frac{x}{y} | = \frac{1}{2} $ \endlatexonly + * \xmlonly + * + * + * + * | + * + * n + * + * + * x + * y + * + * + * | + * + * = + * + * 1 + * 2 + * + * + * + * \endxmlonly + * , the + * even \p n value is chosen. + * + * \return + * - remainderf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns NaN. + * - remainderf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y) returns NaN. + * - remainderf(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x for finite \p x. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float remainderf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl remainderf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute double-precision floating-point remainder and part of quotient. + * + * Compute a double-precision floating-point remainder in the same way as the + * ::remainder() function. Argument \p quo returns part of quotient upon + * division of \p x by \p y. Value \p quo has the same sign as + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * + * \endxmlonly + * and may not be the exact quotient but agrees with the exact quotient + * in the low order 3 bits. + * + * \return + * Returns the remainder. + * - remquo(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquo( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquo(\p x, \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points if either of \p x or \p y is NaN. + * - remquo(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p quo) returns \p x and stores zero + * in the location to which \p quo points for finite \p x. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double remquo(double x, double y, int *quo) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl remquo(double x, double y, int *quo); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute single-precision floating-point remainder and part of quotient. + * + * Compute a single-precision floating-point remainder in the same way as the + * ::remainderf() function. Argument \p quo returns part of quotient upon + * division of \p x by \p y. Value \p quo has the same sign as + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * + * \endxmlonly + * and may not be the exact quotient but agrees with the exact quotient + * in the low order 3 bits. + * + * \return + * Returns the remainder. + * - remquof(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquof( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquof(\p x, \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points if either of \p x or \p y is NaN. + * - remquof(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p quo) returns \p x and stores zero + * in the location to which \p quo points for finite \p x. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float remquof(float x, float y, int *quo) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl remquof(float x, float y, int *quo); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 0 for + * the input argument \p x, + * \latexonly $J_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 0. + * - j0( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns +0. + * - j0(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl j0(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 0 for + * the input argument \p x, + * \latexonly $J_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 0. + * - j0f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns +0. + * - j0f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float j0f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 1 for + * the input argument \p x, + * \latexonly $J_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 1. + * - j1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl j1(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 1 for + * the input argument \p x, + * \latexonly $J_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 1. + * - j1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float j1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order \p n for + * the input argument \p x, + * \latexonly $J_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order \p n. + * - jn(\p n, NaN) returns NaN. + * - jn(\p n, \p x) returns NaN for \p n < 0. + * - jn(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl jn(int n, double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order \p n for + * the input argument \p x, + * \latexonly $J_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order \p n. + * - jnf(\p n, NaN) returns NaN. + * - jnf(\p n, \p x) returns NaN for \p n < 0. + * - jnf(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float jnf(int n, float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 0 for + * the input argument \p x, + * \latexonly $Y_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 0. + * - y0( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y0(\p x) returns NaN for \p x < 0. + * - y0( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y0(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl y0(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 0 for + * the input argument \p x, + * \latexonly $Y_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 0. + * - y0f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y0f(\p x) returns NaN for \p x < 0. + * - y0f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y0f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float y0f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 1 for + * the input argument \p x, + * \latexonly $Y_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 1. + * - y1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y1(\p x) returns NaN for \p x < 0. + * - y1( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y1(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl y1(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 1 for + * the input argument \p x, + * \latexonly $Y_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 1. + * - y1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y1f(\p x) returns NaN for \p x < 0. + * - y1f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y1f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float y1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order \p n for + * the input argument \p x, + * \latexonly $Y_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order \p n. + * - yn(\p n, \p x) returns NaN for \p n < 0. + * - yn(\p n, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + *) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - yn(\p n, \p x) returns NaN for \p x < 0. + * - yn(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - yn(\p n, NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl yn(int n, double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order \p n for + * the input argument \p x, + * \latexonly $Y_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order \p n. + * - ynf(\p n, \p x) returns NaN for \p n < 0. + * - ynf(\p n, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - ynf(\p n, \p x) returns NaN for \p x < 0. + * - ynf(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - ynf(\p n, NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ynf(int n, float x) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 0 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 0 for + * the input argument \p x, + * \latexonly $I_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 0. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cyl_bessel_i0(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 0 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 0 for + * the input argument \p x, + * \latexonly $I_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 0. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float cyl_bessel_i0f(float x) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 1 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 1 for + * the input argument \p x, + * \latexonly $I_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 1. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cyl_bessel_i1(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 1 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 1 for + * the input argument \p x, + * \latexonly $I_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 1. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float cyl_bessel_i1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the error function of the input argument. + * + * Calculate the value of the error function for the input argument \p x, + * \latexonly $\frac{2}{\sqrt \pi} \int_0^x e^{-t^2} dt$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * π + * + * + * + * + * 0 + * x + * + * + * e + * + * + * + * t + * 2 + * + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - erf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - erf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erf(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl erf(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the error function of the input argument. + * + * Calculate the value of the error function for the input argument \p x, + * \latexonly $\frac{2}{\sqrt \pi} \int_0^x e^{-t^2} dt$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * π + * + * + * + * + * 0 + * x + * + * + * e + * + * + * + * t + * 2 + * + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - erff( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - erff( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erff(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl erff(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse error function of the input argument. + * + * Calculate the inverse error function + * \latexonly $\erf^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erf + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [-1, 1]. + * + * \return + * - erfinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - erfinv(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfinv(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfinv(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse error function of the input argument. + * + * Calculate the inverse error function + * \latexonly $\erf^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erf + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [-1, 1]. + * + * \return + * - erfinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - erfinvf(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfinvf(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfinvf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfinvf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the complementary error function of the input argument. + * + * Calculate the complementary error function of the input argument \p x, + * 1 - erf(\p x). + * + * \return + * - erfc( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns 2. + * - erfc( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfc(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl erfc(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the complementary error function of the input argument. + * + * Calculate the complementary error function of the input argument \p x, + * 1 - erf(\p x). + * + * \return + * - erfcf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns 2. + * - erfcf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl erfcf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the natural logarithm of the absolute value of the gamma function of the input argument. + * + * Calculate the natural logarithm of the absolute value of the gamma function of the input argument \p x, namely the value of + * \latexonly $\log_{e}\left|\int_{0}^{\infty} e^{-t}t^{x-1}dt\right|$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * + * + * + * \endxmlonly + * + * \return + * - lgamma(1) returns +0. + * - lgamma(2) returns +0. + * - lgamma(\p x) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly 0 and \p x is an integer. + * - lgamma( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - lgamma( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double lgamma(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl lgamma(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse complementary error function of the input argument. + * + * Calculate the inverse complementary error function + * \latexonly $\erfc^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erfc + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [0, 2]. + * + * \return + * - erfcinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfcinv(2) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfcinv(\p x) returns NaN for \p x outside [0, 2]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfcinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse complementary error function of the input argument. + * + * Calculate the inverse complementary error function + * \latexonly $\erfc^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erfc + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [0, 2]. + * + * \return + * - erfcinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfcinvf(2) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfcinvf(\p x) returns NaN for \p x outside [0, 2]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcinvf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse of the standard normal cumulative distribution function. + * + * Calculate the inverse of the standard normal cumulative distribution function for input argument \p x, + * \latexonly $\Phi^{-1}(x)$ \endlatexonly + * \xmlonly + * + * + * + * Φ + * + * + * 1 + * + * + * ( + * x + * ) + * + * \endxmlonly. The function is defined for input values in the interval + * \latexonly $(0, 1)$ \endlatexonly + * \xmlonly + * + * + * ( + * 0 + * , + * 1 + * ) + * + * \endxmlonly. + * + * \return + * - normcdfinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - normcdfinv(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - normcdfinv(\p x) returns NaN + * if \p x is not in the interval [0,1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double normcdfinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse of the standard normal cumulative distribution function. + * + * Calculate the inverse of the standard normal cumulative distribution function for input argument \p x, + * \latexonly $\Phi^{-1}(x)$ \endlatexonly + * \xmlonly + * + * + * + * Φ + * + * + * 1 + * + * + * ( + * x + * ) + * + * \endxmlonly. The function is defined for input values in the interval + * \latexonly $(0, 1)$ \endlatexonly + * \xmlonly + * + * + * ( + * 0 + * , + * 1 + * ) + * + * \endxmlonly. + * + * \return + * - normcdfinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - normcdfinvf(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - normcdfinvf(\p x) returns NaN + * if \p x is not in the interval [0,1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float normcdfinvf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the standard normal cumulative distribution function. + * + * Calculate the cumulative distribution function of the standard normal distribution for input argument \p x, + * \latexonly $\Phi(x)$ \endlatexonly + * \xmlonly + * + * + * Φ + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - normcdf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns 1. + * - normcdf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double normcdf(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the standard normal cumulative distribution function. + * + * Calculate the cumulative distribution function of the standard normal distribution for input argument \p x, + * \latexonly $\Phi(x)$ \endlatexonly + * \xmlonly + * + * + * Φ + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - normcdff( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns 1. + * - normcdff( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 + + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float normcdff(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the scaled complementary error function of the input argument. + * + * Calculate the scaled complementary error function of the input argument \p x, + * \latexonly $e^{x^2}\cdot \textrm{erfc}(x)$ \endlatexonly + * \xmlonly + * + * + * + * e + * + * + * x + * 2 + * + * + * + * + * + * erfc + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - erfcx( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * - erfcx( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfcx(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the scaled complementary error function of the input argument. + * + * Calculate the scaled complementary error function of the input argument \p x, + * \latexonly $e^{x^2}\cdot \textrm{erfc}(x)$ \endlatexonly + * \xmlonly + * + * + * + * e + * + * + * x + * 2 + * + * + * + * + * + * erfc + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - erfcxf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * - erfcxf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcxf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the natural logarithm of the absolute value of the gamma function of the input argument. + * + * Calculate the natural logarithm of the absolute value of the gamma function of the input argument \p x, namely the value of + * \latexonly $\log_{e}\left|\int_{0}^{\infty} e^{-t}t^{x-1}dt\right|$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * + * + * + * \endxmlonly + * + * \return + * - lgammaf(1) returns +0. + * - lgammaf(2) returns +0. + * - lgammaf(\p x) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly + * 0 and \p x is an integer. + * - lgammaf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - lgammaf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float lgammaf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl lgammaf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the gamma function of the input argument. + * + * Calculate the gamma function of the input argument \p x, namely the value of + * \latexonly $\int_{0}^{\infty} e^{-t}t^{x-1}dt$ \endlatexonly + * \xmlonly + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - tgamma( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - tgamma(2) returns +1. + * - tgamma(\p x) returns NaN if \p x < 0 and \p x is an integer. + * - tgamma( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN. + * - tgamma( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double tgamma(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl tgamma(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the gamma function of the input argument. + * + * Calculate the gamma function of the input argument \p x, namely the value of + * \latexonly $\int_{0}^{\infty} e^{-t}t^{x-1}dt$ \endlatexonly + * \xmlonly + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - tgammaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - tgammaf(2) returns +1. + * - tgammaf(\p x) returns NaN if \p x < 0 and \p x is an integer. + * - tgammaf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN. + * - tgammaf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tgammaf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl tgammaf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** \ingroup CUDA_MATH_DOUBLE + * \brief Create value with given magnitude, copying sign of second value. + * + * Create a floating-point value with the magnitude \p x and the sign of \p y. + * + * \return + * Returns a value with the magnitude of \p x and the sign of \p y. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double copysign(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl copysign(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** \ingroup CUDA_MATH_SINGLE + * \brief Create value with given magnitude, copying sign of second value. + * + * Create a floating-point value with the magnitude \p x and the sign of \p y. + * + * \return + * Returns a value with the magnitude of \p x and the sign of \p y. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float copysignf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl copysignf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Return next representable double-precision floating-point value after argument \p x in the direction of \p y. + * + * Calculate the next representable double-precision floating-point value + * following \p x in the direction of \p y. For example, if \p y is greater than \p x, ::nextafter() + * returns the smallest representable number greater than \p x + * + * \return + * - nextafter(\p x, \p y) = \p y if \p x equals \p y. + * - nextafter(\p x, \p y) = \p NaN if either \p x or \p y are \p NaN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nextafter(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nextafter(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Return next representable single-precision floating-point value after argument \p x in the direction of \p y. + * + * Calculate the next representable single-precision floating-point value + * following \p x in the direction of \p y. For example, if \p y is greater than \p x, ::nextafterf() + * returns the smallest representable number greater than \p x + * + * \return + * - nextafterf(\p x, \p y) = \p y if \p x equals \p y. + * - nextafterf(\p x, \p y) = \p NaN if either \p x or \p y are \p NaN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nextafterf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nextafterf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Returns "Not a Number" value. + * + * Return a representation of a quiet NaN. Argument \p tagp selects one of the possible representations. + * + * \return + * - nan(\p tagp) returns NaN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nan(const char *tagp) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nan(const char *tagp); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Returns "Not a Number" value + * + * Return a representation of a quiet NaN. Argument \p tagp selects one of the possible representations. + * + * \return + * - nanf(\p tagp) returns NaN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nanf(const char *tagp) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nanf(const char *tagp); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* namespace std */ +#endif +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinff(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnanf(float) __THROW; + + +#if defined(__APPLE__) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinited(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinitef(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitd(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnand(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinfd(double) __THROW; +#else /* __APPLE__ */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finite(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finitef(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbit(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnan(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinf(double) __THROW; +#endif /* __APPLE__ */ + +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitf(float) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * + * Compute the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation. After computing the value + * to infinite precision, the value is rounded once. + * + * \return + * Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fma( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fma( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fma(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fma(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fma(double x, double y, double z) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fma(double x, double y, double z); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * + * Compute the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation. After computing the value + * to infinite precision, the value is rounded once. + * + * \return + * Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmaf(float x, float y, float z) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fmaf(float x, float y, float z); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + + +/* these are here to avoid warnings on the call graph. + long double is not supported on the device */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitl(long double) __THROW; +#if defined(__APPLE__) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinite(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinf(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnan(long double) __THROW; +#else /* __APPLE__ */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finitel(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinfl(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnanl(long double) __THROW; +#endif /* __APPLE__ */ + +#if defined(_WIN32) && defined(_M_AMD64) +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl acosf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl asinf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl atanf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl atan2f(float, float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl cosf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sinf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl tanf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl coshf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sinhf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl tanhf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl expf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl logf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl log10f(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl modff(float, float*) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl powf(float, float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sqrtf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl ceilf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl floorf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl fmodf(float, float) __THROW; +#else /* _WIN32 && _M_AMD64 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc cosine of the input argument. + * + * Calculate the principal value of the arc cosine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [0, + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - acosf(1) returns +0. + * - acosf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float acosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc sine of the input argument. + * + * Calculate the principal value of the arc sine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * , + + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - asinf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - asinf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float asinf(float x) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc tangent of the input argument. + * + * Calculate the principal value of the arc tangent of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * , + + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * ]. + * - atanf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atanf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc tangent of the ratio of first and second input arguments. + * + * Calculate the principal value of the arc tangent of the ratio of first + * and second input arguments \p y / \p x. The quadrant of the result is + * determined by the signs of inputs \p y and \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * , + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ]. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , -0) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , +0) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for \p x < 0. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p x > 0. + * - atan2f(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\pi$ \endlatexonly + * \xmlonly + * + * + * - + * π + * + * + * \endxmlonly + * /2 for \p y < 0. + * - atan2f(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2 for \p y > 0. + * - atan2f( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2f( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2 for finite \p x. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 3\pi$ \endlatexonly + * \xmlonly + * + * + * ± + * 3 + * π + * + * + * \endxmlonly + * /4. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /4. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atan2f(float y, float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cosine of the input argument. + * + * Calculate the cosine of the input argument \p x (measured in radians). + * + * \return + * - cosf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cosf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine of the input argument. + * + * Calculate the sine of the input argument \p x (measured in radians). + * + * \return + * - sinf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the tangent of the input argument. + * + * Calculate the tangent of the input argument \p x (measured in radians). + * + * \return + * - tanf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic cosine of the input argument. + * + * Calculate the hyperbolic cosine of the input argument \p x. + * + * \return + * - coshf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - coshf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float coshf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic sine of the input argument. + * + * Calculate the hyperbolic sine of the input argument \p x. + * + * \return + * - sinhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinhf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic tangent of the input argument. + * + * Calculate the hyperbolic tangent of the input argument \p x. + * + * \return + * - tanhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tanhf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the natural logarithm of the input argument. + * + * Calculate the natural logarithm of the input argument \p x. + * + * \return + * - logf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - logf(1) returns +0. + * - logf(\p x) returns NaN for \p x < 0. + * - logf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float logf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly, + * the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x. + * + * \return + * - expf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - expf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - expf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float expf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 10 logarithm of the input argument. + * + * Calculate the base 10 logarithm of the input argument \p x. + * + * \return + * - log10f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log10f(1) returns +0. + * - log10f(\p x) returns NaN for \p x < 0. + * - log10f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Break down the input argument into fractional and integral parts. + * + * Break down the argument \p x into fractional and integral parts. The integral part is stored in the argument \p iptr. + * Fractional and integral parts are given the same sign as the argument \p x. + * + * \return + * - modff( + * \latexonly $\pm x$ \endlatexonly + * \xmlonly + * + * + * ± + * x + * + * + * \endxmlonly + * , \p iptr) returns a result with the same sign as \p x. + * - modff( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p iptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * in the object pointed to by \p iptr. + * - modff(NaN, \p iptr) stores a NaN in the object pointed to by \p iptr and returns a NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float modff(float x, float *iptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of first argument to the power of second argument. + * + * Calculate the value of \p x to the power of \p y. + * + * \return + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * for \p y an odd integer less than 0. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y less than 0 and not an odd integer. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns +0 for \p y > 0 and not an odd integer. + * - powf(-1, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns 1. + * - powf(+1, \p y) returns 1 for any \p y, even a NaN. + * - powf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1 for any \p x, even a NaN. + * - powf(\p x, \p y) returns a NaN for finite \p x < 0 and finite non-integer \p y. + * - powf(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns -0 for \p y an odd integer less than 0. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0 and not an odd integer. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0 and not an odd integer. + * - powf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0. + * - powf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float powf(float x, float y) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the input argument. + * + * Calculate the nonnegative square root of \p x, + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * - sqrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sqrtf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - sqrtf(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sqrtf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate ceiling of the input argument. + * + * Compute the smallest integer value not less than \p x. + * + * \return + * Returns + * \latexonly $\lceil x \rceil$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - ceilf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - ceilf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ceilf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the largest integer less than or equal to \p x. + * + * Calculate the largest integer value which is less than or equal to \p x. + * + * \return + * Returns + * \latexonly $\lfloor x \rfloor$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - floorf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - floorf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float floorf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the floating-point remainder of \p x / \p y. + * + * Calculate the floating-point remainder of \p x / \p y. + * The floating-point remainder of the division operation \p x / \p y calculated + * by this function is exactly the value x - n*y, where \p n is \p x / \p y with its fractional part truncated. + * The computed value will have the same sign as \p x, and its magnitude will be less than the magnitude of \p y. + * \return + * - Returns the floating-point remainder of \p x / \p y. + * - fmodf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * if \p y is not zero. + * - fmodf(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x if \p x is finite. + * - fmodf(\p x, \p y) returns NaN if \p x is + * \latexonly $\pm\infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * or \p y is zero. + * - If either argument is NaN, NaN is returned. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmodf(float x, float y) __THROW; +#if defined(__QNX__) +/* redeclare some builtins that QNX uses */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FLog(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FCosh(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FSinh(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FSinx(float, unsigned int, int); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int _FDsign(float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int _Dsign(double); +#endif +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +#endif /* _WIN32 && _M_AMD64 */ + +} + +#if !defined(__CUDACC_RTC__) +#include +#include + +#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__ +#include +#include +#endif /* __CUDA_INTERNAL_SKIP_CPP_HEADERS__ */ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); + +#elif defined(__GNUC__) + +#undef signbit +#undef isfinite +#undef isnan +#undef isinf + +#if defined(__APPLE__) + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(double x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(double x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); +#else /* !(!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000) */ +template +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool __libcpp_isnan(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(float x) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(long double x) _NOEXCEPT; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 */ + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); +#else /* !(!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000) */ +template +__cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ bool __libcpp_isinf(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(float x) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(long double x) _NOEXCEPT; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 */ + +#else /* __APPLE__ */ + +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +#if !defined(_NVHPC_CUDA) +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(float x); +/* GCC 6.1 uses ::isnan(double x) for isnan(double x) if the condition is true */ +#if _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#else /* !(_GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC) */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(double x); +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(float x); +/* GCC 6.1 uses ::isinf(double x) for isinf(double x) if the condition is true. */ +#if _GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#else /* !(_GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC) */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(double x); +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(long double x); +} +#endif + +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ + +#if defined(__QNX__) +#if (__QNX__) && !defined(_LIBCPP_VERSION) +/* QNX defines functions in std, need to declare them here */ +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(long double x); +} +#else +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const float x); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const double x); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const long double x); +#endif +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const long double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const long double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const long double a); +#else /* ! __QNX__ */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const float x); +#if defined(__ICC) +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const double x) throw(); +#else /* !__ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const double x); +#endif /* __ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const float x); +#if defined(__ICC) +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const double x) throw(); +#else /* !__ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const double x); +#endif /* __ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const long double x); + +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +template +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool __libcpp_isnan(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(float x) _NOEXCEPT; +#else /* !((defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ +#if defined(__ANDROID__) || defined(__HORIZON__) +#if !defined(_LIBCPP_VERSION) +__forceinline__ +#endif /* !defined(_LIBCPP_VERSION) */ +#if _LIBCPP_VERSION >= 7000 +#ifdef _LIBCPP_PREFERRED_OVERLOAD +_LIBCPP_INLINE_VISIBILITY _LIBCPP_PREFERRED_OVERLOAD __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(double x) _NOEXCEPT; +#endif /* _LIBCPP_PREFERRED_OVERLOAD */ +#else /* _LIBCPP_VERSION < 7000 */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x); +#endif /* _LIBCPP_VERSION >= 7000 */ +#else /* !(__ANDROID__ || __HORIZON__) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#endif /* __ANDROID__ */ +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(long double x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ + +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +static __inline__ __cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ unsigned __FLOAT_BITS(float __f); +static __inline__ __cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ unsigned long long __DOUBLE_BITS(double __f); +template +__cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ bool __libcpp_isinf(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(float x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ + +#if defined(__ANDROID__) || defined(__HORIZON__) +#if !defined(_LIBCPP_VERSION) +__forceinline__ +#endif /* !defined(_LIBCPP_VERSION) */ +#if _LIBCPP_VERSION >= 7000 +#ifdef _LIBCPP_PREFERRED_OVERLOAD +_LIBCPP_INLINE_VISIBILITY _LIBCPP_PREFERRED_OVERLOAD __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(double x) _NOEXCEPT; +#endif /* _LIBCPP_PREFERRED_OVERLOAD */ +#else /* _LIBCPP_VERSION < 7000 */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x); +#endif /* _LIBCPP_VERSION >= 7000 */ +#else /* ! (__ANDROID__ || __HORIZON__) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#endif /* __ANDROID__ || __HORIZON__ */ +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(long double x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ +#endif /* __QNX__ */ + +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* __APPLE__ */ + +#if !defined(_LIBCPP_VERSION) +#if defined(__clang__) +#if __has_include() +#define __NV_GLIBCXX_VERSION 40800 +#endif /* __has_include() */ +#endif /* __clang__ */ + +#if !defined(__NV_GLIBCXX_VERSION) +#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#endif /* !__NV_GLIBCXX_VERSION */ +#endif /* !defined(_LIBCPP_VERSION) */ + +#if !defined(__HORIZON__) || !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 +#if defined(__arm__) && !defined(_STLPORT_VERSION) && !_GLIBCXX_USE_C99 +#if !defined(__ANDROID__) || (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800) + +#if defined(__QNX__) +/* QNX defines functions in std, need to declare them here */ +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs (long long int a); +} +#elif defined(__HORIZON__) +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +#pragma GCC system_header +#endif +_LIBCPP_BEGIN_NAMESPACE_STD +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs (long long int a) throw(); +_LIBCPP_END_NAMESPACE_STD +#else +static __inline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int a); +#endif /* __QNX__ || __HORIZON__*/ + +#endif /* !__ANDROID__ || (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800) */ +#endif /* __arm__ && !_STLPORT_VERSION && !_GLIBCXX_USE_C99 */ +#endif /* !defined(__HORIZON__) || !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 */ + +#if defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800 && !defined(__ibmxl__) + +#if !defined(_STLPORT_VERSION) +namespace __gnu_cxx +{ +#endif /* !_STLPORT_VERSION */ + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int a); + +#if !defined(_STLPORT_VERSION) +} +#endif /* !_STLPORT_VERSION */ + +#endif /* defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800 && !__ibmxl__ */ + +namespace std +{ + template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T __pow_helper(T, int); + template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T __cmath_power(T, unsigned int); +} + +using std::abs; +using std::fabs; +using std::ceil; +using std::floor; +using std::sqrt; +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 +using std::pow; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 */ +using std::log; +using std::log10; +using std::fmod; +using std::modf; +using std::exp; +using std::frexp; +using std::ldexp; +using std::asin; +using std::sin; +using std::sinh; +using std::acos; +using std::cos; +using std::cosh; +using std::atan; +using std::atan2; +using std::tan; +using std::tanh; + +#elif defined(_WIN32) + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __CUDA_MATH_CRTIMP double __cdecl _hypot(double x, double y); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __CUDA_MATH_CRTIMP float __cdecl _hypotf(float x, float y); + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int signbit(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(long double) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _ldsign(long double); +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is negative. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE signbit(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is negative. + */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __RETURN_TYPE signbit(double) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _dsign(double); +#undef __RETURN_TYPE +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero value + * if and only if \p a is negative. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE signbit(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero value + * if and only if \p a is negative. + */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __RETURN_TYPE signbit(float) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _fdsign(float); +#undef __RETURN_TYPE +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isinf(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * \return + * - With Visual Studio 2013 host compiler: Returns true if and only + * if \p a is an infinite value. + * - With other host compilers: Returns a nonzero value if and only + * if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * \return + * - With Visual Studio 2013 host compiler: Returns true if and only + * if \p a is an infinite value. + * - With other host compilers: Returns a nonzero value if and only + * if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is an infinite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero + * value if and only if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is an infinite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero + * value if and only if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isnan(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isfinite(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T _Pow_int(T, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T _Pow_int(T, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int) throw(); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* __CUDACC_RTC__ */ + +#if __cplusplus >= 201103L +#define __NV_NOEXCEPT noexcept +#else /* !__cplusplus >= 201103L */ +#define __NV_NOEXCEPT throw() +#endif /* __cplusplus >= 201103L */ + +#if defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++11-extensions" +#endif /* __clang__ */ +#if _LIBCPP_VERSION < 3800 +_LIBCPP_BEGIN_NAMESPACE_STD +#endif /* _LIBCPP_VERSION < 3800 */ +#elif defined(__GNUC__) && !defined(_STLPORT_VERSION) +namespace std { +#endif /* defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) || + __GNUC__ && !_STLPORT_VERSION */ + +#if defined(__CUDACC_RTC__) || defined(__GNUC__) + +#if defined(__CUDACC_RTC__) || \ + (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION >= 40800) || \ + defined(__ibmxl__) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int); +#endif /* __CUDACC__RTC__ || + (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION >= 40800) || + __ibmxl__ */ + +#endif /* __CUDACC_RTC__ || __GNUC__ */ + +#if defined(__CUDACC_RTC__) || \ + (!defined(_MSC_VER) || _MSC_VER < 1800) && \ + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long int __cdecl abs(long int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl abs(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl abs(double); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fabs(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ceil(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl floor(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sqrt(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, float); + +#if !defined(__QNX__) + +#if defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +typename __gnu_cxx::__promote_2<_Tp, _Up>::__type pow(_Tp, _Up); +#else /* !(defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION)) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl pow(double, int); +#endif /* defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION) */ + +#endif /* !defined(__QNX__) */ + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log10(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fmod(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl modf(float, float*); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl exp(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl frexp(float, int*); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ldexp(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl asin(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sin(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sinh(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl acos(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cos(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cosh(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan2(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tan(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tanh(float); +#else /* __CUDACC_RTC__ || + (!defined(_MSC_VER) || _MSC_VER < 1800) && + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long int __cdecl abs(long int) throw(); +#if defined(_LIBCPP_VERSION) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int __cdecl abs(long long int) throw(); +#endif /* defined(_LIBCPP_VERSION) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl abs(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl abs(double) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fabs(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ceil(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl floor(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sqrt(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, float) throw(); +#if defined(_LIBCPP_VERSION) +#if (defined (__ANDROID__) || defined(__HORIZON__)) && (_LIBCPP_VERSION >= 9000) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +#if _LIBCPP_VERSION >= 14000 +typename std::__enable_if_t +#else /* _LIBCPP_VERSION < 14000 */ +typename std::_EnableIf +#endif /* _LIBCPP_VERSION >= 14000 */ +< + std::is_arithmetic<_A1>::value && + std::is_arithmetic<_A2>::value, + std::__promote<_A1, _A2> +>::type pow(_A1 __lcpp_x, _A2 __lcpp_y) __NV_NOEXCEPT; +#elif (defined(__APPLE__) && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800 || defined(__QNX__) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +#if _LIBCPP_VERSION >= 13000 +typename std::enable_if < +#else /* _LIBCPP_VERSION < 13000 */ +typename std::__lazy_enable_if < +#endif /* _LIBCPP_VERSION >= 13000 */ + std::is_arithmetic<_Tp>::value && std::is_arithmetic<_Up>::value, + std::__promote<_Tp, _Up> +>::type pow(_Tp __x, _Up __y) __NV_NOEXCEPT; +#else /* !((__APPLE__ && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800) */ +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +typename enable_if < + std::is_arithmetic<_Tp>::value && std::is_arithmetic<_Up>::value, + typename std::__promote<_Tp, _Up>::type +>::type pow(_Tp __x, _Up __y) __NV_NOEXCEPT; +#endif /* (__APPLE__ && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800 */ +#else /* !defined(_LIBCPP_VERSION) */ +#if !(defined(__GNUC__) && __cplusplus >= 201103L) +#if (defined(_MSC_VER) && (_MSC_VER >= 1928)) && !(defined __CUDA_INTERNAL_SKIP_CPP_HEADERS__) +template && ::std:: is_arithmetic_v<_Ty2>, int> > [[nodiscard]] __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ ::std:: _Common_float_type_t<_Ty1, _Ty2> __cdecl pow(_Ty1 _Left, _Ty2 _Right) noexcept; +#else +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl pow(double, int) throw(); +#endif /* (defined(_MSC_VER) && (_MSC_VER >= 1928)) && !(defined __CUDA_INTERNAL_SKIP_CPP_HEADERS__) */ +#endif /* !(defined(__GNUC__) && __cplusplus >= 201103L) */ +#endif /* defined(_LIBCPP_VERSION) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log10(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fmod(float, float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl modf(float, float*) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl exp(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl frexp(float, int*) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ldexp(float, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl asin(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sin(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sinh(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl acos(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cos(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cosh(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan2(float, float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tan(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tanh(float) throw(); +#endif /* __CUDACC_RTC__ || + (!defined(_MSC_VER) || _MSC_VER < 1800) && + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) */ + +#if defined(_LIBCPP_VERSION) && defined(_LIBCPP_END_NAMESPACE_STD) && !defined(_STLPORT_VERSION) +#if _LIBCPP_VERSION < 3800 +_LIBCPP_END_NAMESPACE_STD +#endif /* _LIBCPP_VERSION < 3800 */ +#if defined(__clang__) +#pragma clang diagnostic pop +#endif /* __clang__ */ +#elif defined(__GNUC__) && !defined(_STLPORT_VERSION) +} +#endif /* defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) || + __GNUC__ && !_STLPORT_VERSION */ + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __NV_NOEXCEPT + +#if defined(__CUDACC_RTC__) +#define __MATH_FUNCTIONS_DECL__ __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __MATH_FUNCTIONS_DECL__ static inline __host__ __device__ __cudart_builtin__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ static inline __device__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#if defined(__QNX__) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) +#if defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) +#if defined(_LIBCPP_VERSION) +#define __NV_NOEXCEPT _NOEXCEPT +_LIBCPP_BEGIN_NAMESPACE_STD +#else +#define __NV_NOEXCEPT +namespace std { +__host__ __device__ __cudart_builtin__ int ilogbf(float a); +#endif +#else /* !(defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000)) */ +#define __NV_NOEXCEPT _NOEXCEPT +#endif /* defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) */ +__host__ __device__ __cudart_builtin__ float logb(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ int ilogb(float a) __NV_NOEXCEPT; + +__host__ __device__ __cudart_builtin__ float scalbn(float a, int b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float scalbln(float a, long int b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float exp2(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float expm1(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float log2(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float log1p(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float acosh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float asinh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float atanh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float hypot(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float cbrt(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float erf(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float erfc(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float lgamma(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float tgamma(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float copysign(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float nextafter(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float remainder(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float remquo(float a, float b, int *quo) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float round(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long int lround(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long long int llround(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float trunc(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float rint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long int lrint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long long int llrint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float nearbyint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fdim(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fma(float a, float b, float c) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fmax(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fmin(float a, float b) __NV_NOEXCEPT; +#if defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) +#if defined(_LIBCPP_VERSION) +_LIBCPP_END_NAMESPACE_STD +using _VSTD::logb; +using _VSTD::ilogb; +using _VSTD::scalbn; +using _VSTD::scalbln; +using _VSTD::exp2; +using _VSTD::expm1; +using _VSTD::log2; +using _VSTD::log1p; +using _VSTD::acosh; +using _VSTD::asinh; +using _VSTD::atanh; +using _VSTD::hypot; +using _VSTD::cbrt; +using _VSTD::erf; +using _VSTD::erfc; +using _VSTD::lgamma; +using _VSTD::tgamma; +using _VSTD::copysign; +using _VSTD::nextafter; +using _VSTD::remainder; +using _VSTD::remquo; +using _VSTD::round; +using _VSTD::lround; +using _VSTD::llround; +using _VSTD::trunc; +using _VSTD::rint; +using _VSTD::lrint; +using _VSTD::llrint; +using _VSTD::nearbyint; +using _VSTD::fdim; +using _VSTD::fma; +using _VSTD::fmax; +using _VSTD::fmin; +#else +} +#endif +#endif /* defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) */ +#undef __NV_NOEXCEPT +#else /* !(defined(__QNX__ ) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800)) */ +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +namespace std { +__host__ __device__ __cudart_builtin__ constexpr float logb(float a); +__host__ __device__ __cudart_builtin__ constexpr int ilogb(float a); +__host__ __device__ __cudart_builtin__ constexpr float scalbn(float a, int b); +__host__ __device__ __cudart_builtin__ constexpr float scalbln(float a, long int b); +__host__ __device__ __cudart_builtin__ constexpr float exp2(float a); +__host__ __device__ __cudart_builtin__ constexpr float expm1(float a); +__host__ __device__ __cudart_builtin__ constexpr float log2(float a); +__host__ __device__ __cudart_builtin__ constexpr float log1p(float a); +__host__ __device__ __cudart_builtin__ constexpr float acosh(float a); +__host__ __device__ __cudart_builtin__ constexpr float asinh(float a); +__host__ __device__ __cudart_builtin__ constexpr float atanh(float a); +__host__ __device__ __cudart_builtin__ constexpr float hypot(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float cbrt(float a); +__host__ __device__ __cudart_builtin__ constexpr float erf(float a); +__host__ __device__ __cudart_builtin__ constexpr float erfc(float a); +__host__ __device__ __cudart_builtin__ constexpr float lgamma(float a); +__host__ __device__ __cudart_builtin__ constexpr float tgamma(float a); +__host__ __device__ __cudart_builtin__ constexpr float copysign(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float nextafter(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float remainder(float a, float b); +__host__ __device__ __cudart_builtin__ float remquo(float a, float b, int *quo); +__host__ __device__ __cudart_builtin__ constexpr float round(float a); +__host__ __device__ __cudart_builtin__ constexpr long int lround(float a); +__host__ __device__ __cudart_builtin__ constexpr long long int llround(float a); +__host__ __device__ __cudart_builtin__ constexpr float trunc(float a); +__host__ __device__ __cudart_builtin__ constexpr float rint(float a); +__host__ __device__ __cudart_builtin__ constexpr long int lrint(float a); +__host__ __device__ __cudart_builtin__ constexpr long long int llrint(float a); +__host__ __device__ __cudart_builtin__ constexpr float nearbyint(float a); +__host__ __device__ __cudart_builtin__ constexpr float fdim(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float fma(float a, float b, float c); +__host__ __device__ __cudart_builtin__ constexpr float fmax(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float fmin(float a, float b); +} +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ +__MATH_FUNCTIONS_DECL__ float logb(float a); + +__MATH_FUNCTIONS_DECL__ int ilogb(float a); + +__MATH_FUNCTIONS_DECL__ float scalbn(float a, int b); + +__MATH_FUNCTIONS_DECL__ float scalbln(float a, long int b); + +__MATH_FUNCTIONS_DECL__ float exp2(float a); + +__MATH_FUNCTIONS_DECL__ float expm1(float a); + +__MATH_FUNCTIONS_DECL__ float log2(float a); + +__MATH_FUNCTIONS_DECL__ float log1p(float a); + +__MATH_FUNCTIONS_DECL__ float acosh(float a); + +__MATH_FUNCTIONS_DECL__ float asinh(float a); + +__MATH_FUNCTIONS_DECL__ float atanh(float a); + +__MATH_FUNCTIONS_DECL__ float hypot(float a, float b); + +__MATH_FUNCTIONS_DECL__ float cbrt(float a); + +__MATH_FUNCTIONS_DECL__ float erf(float a); + +__MATH_FUNCTIONS_DECL__ float erfc(float a); + +__MATH_FUNCTIONS_DECL__ float lgamma(float a); + +__MATH_FUNCTIONS_DECL__ float tgamma(float a); + +__MATH_FUNCTIONS_DECL__ float copysign(float a, float b); + +__MATH_FUNCTIONS_DECL__ float nextafter(float a, float b); + +__MATH_FUNCTIONS_DECL__ float remainder(float a, float b); + +__MATH_FUNCTIONS_DECL__ float remquo(float a, float b, int *quo); + +__MATH_FUNCTIONS_DECL__ float round(float a); + +__MATH_FUNCTIONS_DECL__ long int lround(float a); + +__MATH_FUNCTIONS_DECL__ long long int llround(float a); + +__MATH_FUNCTIONS_DECL__ float trunc(float a); + +__MATH_FUNCTIONS_DECL__ float rint(float a); + +__MATH_FUNCTIONS_DECL__ long int lrint(float a); + +__MATH_FUNCTIONS_DECL__ long long int llrint(float a); + +__MATH_FUNCTIONS_DECL__ float nearbyint(float a); + +__MATH_FUNCTIONS_DECL__ float fdim(float a, float b); + +__MATH_FUNCTIONS_DECL__ float fma(float a, float b, float c); + +__MATH_FUNCTIONS_DECL__ float fmax(float a, float b); + +__MATH_FUNCTIONS_DECL__ float fmin(float a, float b); +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* defined(__QNX__) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) */ +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __host__ __device__ __cudart_builtin__ float __cdecl logb(float) throw(); +extern __host__ __device__ __cudart_builtin__ int __cdecl ilogb(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl scalbn(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl scalbln(float, long int) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl exp2(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl expm1(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl log2(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl log1p(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl acosh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl asinh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl atanh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl hypot(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl cbrt(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl erf(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl erfc(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl lgamma(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl tgamma(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl copysign(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl nextafter(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl remainder(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl remquo(float, float, int *) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl round(float) throw(); +extern __host__ __device__ __cudart_builtin__ long int __cdecl lround(float) throw(); +extern __host__ __device__ __cudart_builtin__ long long int __cdecl llround(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl trunc(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl rint(float) throw(); +extern __host__ __device__ __cudart_builtin__ long int __cdecl lrint(float) throw(); +extern __host__ __device__ __cudart_builtin__ long long int __cdecl llrint(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl nearbyint(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fdim(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fma(float, float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fmax(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fmin(float, float) throw(); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +__MATH_FUNCTIONS_DECL__ float exp10(const float a); + +__MATH_FUNCTIONS_DECL__ float rsqrt(const float a); + +__MATH_FUNCTIONS_DECL__ float rcbrt(const float a); + +__MATH_FUNCTIONS_DECL__ float sinpi(const float a); + +__MATH_FUNCTIONS_DECL__ float cospi(const float a); + +__MATH_FUNCTIONS_DECL__ void sincospi(const float a, float *const sptr, float *const cptr); + +__MATH_FUNCTIONS_DECL__ void sincos(const float a, float *const sptr, float *const cptr); + +__MATH_FUNCTIONS_DECL__ float j0(const float a); + +__MATH_FUNCTIONS_DECL__ float j1(const float a); + +__MATH_FUNCTIONS_DECL__ float jn(const int n, const float a); + +__MATH_FUNCTIONS_DECL__ float y0(const float a); + +__MATH_FUNCTIONS_DECL__ float y1(const float a); + +__MATH_FUNCTIONS_DECL__ float yn(const int n, const float a); + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i0(const float a); + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i1(const float a); + +__MATH_FUNCTIONS_DECL__ float erfinv(const float a); + +__MATH_FUNCTIONS_DECL__ float erfcinv(const float a); + +__MATH_FUNCTIONS_DECL__ float normcdfinv(const float a); + +__MATH_FUNCTIONS_DECL__ float normcdf(const float a); + +__MATH_FUNCTIONS_DECL__ float erfcx(const float a); + +__MATH_FUNCTIONS_DECL__ double copysign(const double a, const float b); + +__MATH_FUNCTIONS_DECL__ double copysign(const float a, const double b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p int and \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int and \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p int and \p unsigned \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p int and \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int and \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int and \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the minimum value of the input \p float arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fminf() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ float min(const float a, const float b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p float arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmin() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const double a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p float and \p double arguments. + * + * Convert \p float argument \p a to \p double, followed by ::fmin(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const float a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p double and \p float arguments. + * + * Convert \p float argument \p b to \p double, followed by ::fmin(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const double a, const float b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p int and \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int and \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long int max(const long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p int and \p unsigned \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p int and \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long long int max(const long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int and \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int and \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the maximum value of the input \p float arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmaxf() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ float max(const float a, const float b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p float arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmax() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const double a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p float and \p double arguments. + * + * Convert \p float argument \p a to \p double, followed by ::fmax(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const float a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p double and \p float arguments. + * + * Convert \p float argument \p b to \p double, followed by ::fmax(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const double a, const float b); + +#undef __MATH_FUNCTIONS_DECL__ +#undef __MATH_FUNCTIONS_DEVICE_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ +#undef EXCLUDE_FROM_RTC + +extern "C"{ +inline __device__ void *__nv_aligned_device_malloc(size_t size, size_t align) +{ + __device__ void *__nv_aligned_device_malloc_impl(size_t, size_t); + return __nv_aligned_device_malloc_impl(size, align); +} +} + +#endif /* __cplusplus && __CUDACC__ */ + +#define EXCLUDE_FROM_RTC + +#if !defined(__CUDACC__) + +/******************************************************************************* +* * +* ONLY FOR HOST CODE! NOT FOR DEVICE EXECUTION * +* * +*******************************************************************************/ + +#include + +#if defined(_WIN32) + +#pragma warning(disable : 4211) + +#endif /* _WIN32 */ + +__func__(double rsqrt(double a)); + +__func__(double rcbrt(double a)); + +__func__(double sinpi(double a)); + +__func__(double cospi(double a)); + +__func__(void sincospi(double a, double *sptr, double *cptr)); + +__func__(double erfinv(double a)); + +__func__(double erfcinv(double a)); + +__func__(double normcdfinv(double a)); + +__func__(double normcdf(double a)); + +__func__(double erfcx(double a)); + +__func__(float rsqrtf(float a)); + +__func__(float rcbrtf(float a)); + +__func__(float sinpif(float a)); + +__func__(float cospif(float a)); + +__func__(void sincospif(float a, float *sptr, float *cptr)); + +__func__(float erfinvf(float a)); + +__func__(float erfcinvf(float a)); + +__func__(float normcdfinvf(float a)); + +__func__(float normcdff(float a)); + +__func__(float erfcxf(float a)); + +__func__(int min(int a, int b)); + +__func__(unsigned int umin(unsigned int a, unsigned int b)); + +__func__(long long int llmin(long long int a, long long int b)); + +__func__(unsigned long long int ullmin(unsigned long long int a, unsigned long long int b)); + +__func__(int max(int a, int b)); + +__func__(unsigned int umax(unsigned int a, unsigned int b)); + +__func__(long long int llmax(long long int a, long long int b)); + +__func__(unsigned long long int ullmax(unsigned long long int a, unsigned long long int b)); + +#if defined(_WIN32) || defined(__APPLE__) || defined (__ANDROID__) + +__func__(int __isnan(double a)); + +#endif /* _WIN32 || __APPLE__ || __ANDROID__ */ + +#if defined(_WIN32) || defined(__APPLE__) || defined (__QNX__) + +__func__(void sincos(double a, double *sptr, double *cptr)); + +#endif /* _WIN32 || __APPLE__ || __QNX__ */ + +#if defined(_WIN32) || defined(__APPLE__) + +__func__(double exp10(double a)); + +__func__(float exp10f(float a)); + +__func__(void sincosf(float a, float *sptr, float *cptr)); + +__func__(int __isinf(double a)); + +#endif /* _WIN32 || __APPLE__ */ + +#if (defined(_WIN32) && (!defined(_MSC_VER) || _MSC_VER < 1800)) || defined (__ANDROID__) + +__func__(double log2(double a)); + +#endif /* (_WIN32 && (!defined(_MSC_VER) || _MSC_VER < 1800)) || __ANDROID__ */ + +#if defined(_WIN32) + +__func__(int __signbit(double a)); + +__func__(int __finite(double a)); + +__func__(int __signbitl(long double a)); + +__func__(int __signbitf(float a)); + +__func__(int __finitel(long double a)); + +__func__(int __finitef(float a)); + +__func__(int __isinfl(long double a)); + +__func__(int __isinff(float a)); + +__func__(int __isnanl(long double a)); + +__func__(int __isnanf(float a)); + +#endif /* _WIN32 */ + +#if defined(_WIN32) && (!defined(_MSC_VER) || _MSC_VER < 1800) + +__func__(double copysign(double a, double b)); + +__func__(double fmax(double a, double b)); + +__func__(double fmin(double a, double b)); + +__func__(double trunc(double a)); + +__func__(double round(double a)); + +__func__(long int lround(double a)); + +__func__(long long int llround(double a)); + +__func__(double rint(double a)); + +__func__(double nearbyint(double a)); + +__func__(long int lrint(double a)); + +__func__(long long int llrint(double a)); + +__func__(double fdim(double a, double b)); + +__func__(double scalbn(double a, int b)); + +__func__(double scalbln(double a, long int b)); + +__func__(double exp2(double a)); + +__func__(double log1p(double a)); + +__func__(double expm1(double a)); + +__func__(double cbrt(double a)); + +__func__(double acosh(double a)); + +__func__(double asinh(double a)); + +__func__(double atanh(double a)); + +__func__(int ilogb(double a)); + +__func__(double logb(double a)); + +__func__(double remquo(double a, double b, int *quo)); + +__func__(double remainder(double a, double b)); + +__func__(double fma (double a, double b, double c)); + +__func__(double nextafter(double a, double b)); + +__func__(double erf(double a)); + +__func__(double erfc(double a)); + +__func__(double lgamma(double a)); + +__func__(unsigned long long int __internal_host_nan_kernel(const char *s)); + +__func__(double nan(const char *tagp)); + +__func__(double __host_tgamma_kernel(double a)); + +__func__(double __host_stirling_poly(double a)); + +__func__(double __host_tgamma_stirling(double a)); + +__func__(double tgamma(double a)); + +__func__(float fmaxf(float a, float b)); + +__func__(float fminf(float a, float b)); + +__func__(float roundf(float a)); + +__func__(long int lroundf(float a)); + +__func__(long long int llroundf(float a)); + +__func__(float truncf(float a)); + +__func__(float rintf(float a)); + +__func__(float nearbyintf(float a)); + +__func__(long int lrintf(float a)); + +__func__(long long int llrintf(float a)); + +__func__(float logbf(float a)); + +__func__(float scalblnf(float a, long int b)); + +__func__(float log2f(float a)); + +__func__(float exp2f(float a)); + +__func__(float acoshf(float a)); + +__func__(float asinhf(float a)); + +__func__(float atanhf(float a)); + +__func__(float cbrtf(float a)); + +__func__(float expm1f(float a)); + +__func__(float fdimf(float a, float b)); + +__func__(float log1pf(float a)); + +__func__(float scalbnf(float a, int b)); + +__func__(float fmaf(float a, float b, float c)); + +__func__(int ilogbf(float a)); + +__func__(float erff(float a)); + +__func__(float erfcf(float a)); + +__func__(float lgammaf(float a)); + +__func__(float tgammaf(float a)); + +__func__(float remquof(float a, float b, int *quo)); + +__func__(float remainderf(float a, float b)); + +__func__(float copysignf(float a, float b)); + +__func__(float nextafterf(float a, float b)); + +__func__(float nanf(const char *tagp)); + +#endif /* _WIN32 && (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if defined(_WIN32) + +#pragma warning(default: 4211) + +#endif /* _WIN32 */ + +#endif /* !__CUDACC__ */ + +#undef EXCLUDE_FROM_RTC + +#if !defined(__CUDACC_RTC__) + +#include "math_functions.hpp" + +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__MATH_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3e434aa0c38dd6e62efde83bc1ba3313a8d2bf07 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.hpp @@ -0,0 +1,3391 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/math_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/math_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__ +#endif + +#if !defined(__MATH_FUNCTIONS_HPP__) +#define __MATH_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) + +__host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +__host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(static_cast(x));} + +__host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +__host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +__host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +__host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +__host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ long long int abs(const long long int a) { return llabs(a); } + +__host__ __device__ __cudart_builtin__ long int abs(const long int in) { return llabs(in); } +__host__ __device__ __cudart_builtin__ float abs(const float in) { return fabsf(in); } +__host__ __device__ __cudart_builtin__ double abs(const double in) { return fabs(in); } +__host__ __device__ __cudart_builtin__ float fabs(const float in) { return fabsf(in); } +__host__ __device__ __cudart_builtin__ float ceil(const float in) { return ceilf(in); } +__host__ __device__ __cudart_builtin__ float floor(const float in) { return floorf(in); } +__host__ __device__ __cudart_builtin__ float sqrt(const float in) { return sqrtf(in); } +__host__ __device__ __cudart_builtin__ float pow(const float a, const float b) { return powf(a, b); } +extern "C" __device__ float powif(float, int); +__host__ __device__ __cudart_builtin__ float pow(const float a, const int b) { return powif(a, b); } +extern "C" __device__ double powi(double, int); +__host__ __device__ __cudart_builtin__ double pow(const double a, const int b) { return powi(a, b); } +__host__ __device__ __cudart_builtin__ float log(const float in) { return logf(in); } +__host__ __device__ __cudart_builtin__ float log10(const float in) { return log10f(in); } +__host__ __device__ __cudart_builtin__ float fmod(const float a, const float b) { return fmodf(a, b); } +__host__ __device__ __cudart_builtin__ float modf(const float a, float*b) { return modff(a, b); } +__host__ __device__ __cudart_builtin__ float exp(const float in) { return expf(in); } +__host__ __device__ __cudart_builtin__ float frexp(const float a, int*b) { return frexpf(a, b); } +__host__ __device__ __cudart_builtin__ float ldexp(const float a, int b) { return ldexpf(a, b); } +__host__ __device__ __cudart_builtin__ float asin(const float in) { return asinf(in); } +__host__ __device__ __cudart_builtin__ float sin(const float in) { return sinf(in); } +__host__ __device__ __cudart_builtin__ float sinh(const float in) { return sinhf(in); } +__host__ __device__ __cudart_builtin__ float acos(const float in) { return acosf(in); } +__host__ __device__ __cudart_builtin__ float cos(const float in) { return cosf(in); } +__host__ __device__ __cudart_builtin__ float cosh(const float in) { return coshf(in); } +__host__ __device__ __cudart_builtin__ float atan(const float in) { return atanf(in); } +__host__ __device__ __cudart_builtin__ float atan2(const float a, const float b) { return atan2f(a, b); } +__host__ __device__ __cudart_builtin__ float tan(const float in) { return tanf(in); } +__host__ __device__ __cudart_builtin__ float tanh(const float in) { return tanhf(in); } + +#elif defined(__GNUC__) + +#undef signbit +#undef isfinite +#undef isnan +#undef isinf + +#if defined(_LIBCPP_VERSION) +extern "C" __device__ float powif(float, int); +extern "C" __device__ double powi(double, int); +#endif /* _LIBCPP_VERSION */ + +#if defined(__APPLE__) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbitd(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __isfinitef(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __isfinited(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __isfinite(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) throw() { return __isnand(x); } +#if defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnan(x); } +#endif /* defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) throw() { return __isinfd(x); } +#if defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinf(x); } +#endif /* defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 */ +#else /* __APPLE__ */ + +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +#if defined(__CUDA_ARCH__) +#define __NV_BUILTIN_FUNC_DECL__ __forceinline__ __host__ __device__ __cudart_builtin__ +#if _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__NV_BUILTIN_FUNC_DECL__ int isnan(const double a) throw() { return __isnan(a); } +__NV_BUILTIN_FUNC_DECL__ int isinf(const double x) throw() { return __isinf(x); } +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +#undef __NV_BUILTIN_FUNC_DECL__ +#endif /* __CUDA_ARCH */ +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ + +#if defined(__QNX__) +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const float x) +{ +#if defined(__CUDA_ARCH__) + return (__signbitf(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const double x) +{ +#if defined(__CUDA_ARCH__) + return (__signbit(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const long double x) +{ +#if defined(__CUDA_ARCH__) + return (__signbitl(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +#endif /* (__QNX__ && _LIBCPP_VERSION) */ + +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__finitel(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__finite(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__finitef(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnan(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinfl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isinff(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +#elif ( (defined(__ANDROID__) || defined(__HORIZON__)) && defined(_LIBCPP_VERSION)) +#if defined(__CUDA_ARCH__) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(x); } +#endif /* _LIBCPP_VERSION < 8000 */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(x); } +#endif /* _LIBCPP_VERSION < 8000 */ +#else /* !defined(__CUDA_ARCH__) */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return signbit(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return isfinite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return isfinite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return isfinite(x); } + +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return isnan(x); } +/* int isnan(double) provided by math.h */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return isnan(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return isinf(x); } +/* int isinf(double) provided by math.h */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return isinf(x); } +#endif /* _LIBCPP_VERSION < 8000 */ + +#endif /* defined(__CUDA_ARCH__) */ + +#else /* !__QNX__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +#if defined(__ICC) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) throw() { return __signbit(x); } +#else /* !__ICC */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +#endif /* __ICC */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { +#if defined(__CUDA_ARCH__) + return __finitef(x); +#else /* !__CUDA_ARCH__ */ + return __isfinitef(x); +#endif /* __CUDA_ARCH__ */ +} +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +#endif /* __ANDROID__ */ + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) +{ +#ifdef __CUDA_ARCH__ + return __finite(x); +#else /* !__CUDA_ARCH__ */ + return __isfinite(x); +#endif /* __CUDA_ARCH__ */ +} +#elif defined(__ICC) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) throw() { return __finite(x); } +#else +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +#endif /* __ANDROID__ */ + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) +{ +#ifdef __CUDA_ARCH__ + return __finitel(x); +#else /* !__CUDA_ARCH__ */ + return __isfinitel(x); +#endif /* __CUDA_ARCH__ */ +} +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(x); } +#endif /* __ANDROID__ */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) throw() { return __isnan(x); } +#endif /* __ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) throw() { return __isinf(x); } +#endif /* __ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(x); } +#endif /* __QNX__ || __HORIZON__ */ + +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* __APPLE__ */ + +#if defined(__arm__) && !defined(_STLPORT_VERSION) && !_GLIBCXX_USE_C99 +#if !defined(__ANDROID__) || (!defined(_LIBCPP_VERSION) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))) + +#if !defined(__QNX__) && !defined(__HORIZON__) +static __inline__ __host__ __device__ __cudart_builtin__ long long int abs(const long long int a) +{ + return llabs(a); +} +#endif /* !__QNX__ && !__HORIZON__*/ + +#endif /* !defined(__ANDROID__) || (!defined(_LIBCPP_VERSION) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))) */ +#endif /* __arm__ && !_STLPORT_VERSION && !_GLIBCXX_USE_C99 */ + +#elif defined(_WIN32) + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const long double a) +{ + return __signbitl(a); +} + +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const double a) +{ + return __signbit(a); +} + +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const float a) +{ + return __signbitf(a); +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const long double a) +{ + return __isinfl(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinfl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const double a) +{ + return __isinf(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const float a) +{ + return __isinff(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ bool isinf(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isinff(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const long double a) +{ + return __isnanl(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const double a) +{ + return __isnan(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnan(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const float a) +{ + return __isnanf(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double a) +{ + return __finitel(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__finitel(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const double a) +{ + return __finite(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__finite(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const float a) +{ + return __finitef(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__finitef(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* __CUDACC_RTC__ */ + +#if defined(__CUDACC_RTC__) +#define __MATH_FUNCTIONS_DECL__ __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __MATH_FUNCTIONS_DECL__ static inline __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ static inline __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) || _MSC_VER < 1800) +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +_LIBCPP_BEGIN_NAMESPACE_STD +#endif /* __QNX__ && _LIBCPP_VERSION */ +#if !defined(__QNX__) && !(defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) +#if !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) +__MATH_FUNCTIONS_DECL__ float logb(const float a) +{ + return logbf(a); +} + +__MATH_FUNCTIONS_DECL__ int ilogb(const float a) +{ + return ilogbf(a); +} + +__MATH_FUNCTIONS_DECL__ float scalbn(const float a, const int b) +{ + return scalbnf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float scalbln(const float a, const long int b) +{ + return scalblnf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float exp2(const float a) +{ + return exp2f(a); +} + +__MATH_FUNCTIONS_DECL__ float expm1(const float a) +{ + return expm1f(a); +} + +__MATH_FUNCTIONS_DECL__ float log2(const float a) +{ + return log2f(a); +} + +__MATH_FUNCTIONS_DECL__ float log1p(const float a) +{ + return log1pf(a); +} + +__MATH_FUNCTIONS_DECL__ float acosh(const float a) +{ + return acoshf(a); +} + +__MATH_FUNCTIONS_DECL__ float asinh(const float a) +{ + return asinhf(a); +} + +__MATH_FUNCTIONS_DECL__ float atanh(const float a) +{ + return atanhf(a); +} + +__MATH_FUNCTIONS_DECL__ float hypot(const float a, const float b) +{ + return hypotf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float cbrt(const float a) +{ + return cbrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float erf(const float a) +{ + return erff(a); +} + +__MATH_FUNCTIONS_DECL__ float erfc(const float a) +{ + return erfcf(a); +} + +__MATH_FUNCTIONS_DECL__ float lgamma(const float a) +{ + return lgammaf(a); +} + +__MATH_FUNCTIONS_DECL__ float tgamma(const float a) +{ + return tgammaf(a); +} + +__MATH_FUNCTIONS_DECL__ float copysign(const float a, const float b) +{ + return copysignf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float nextafter(const float a, const float b) +{ + return nextafterf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float remainder(const float a, const float b) +{ + return remainderf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float remquo(const float a, const float b, int *quo) +{ + return remquof(a, b, quo); +} + +__MATH_FUNCTIONS_DECL__ float round(const float a) +{ + return roundf(a); +} + +__MATH_FUNCTIONS_DECL__ long int lround(const float a) +{ + return lroundf(a); +} + +__MATH_FUNCTIONS_DECL__ long long int llround(const float a) +{ + return llroundf(a); +} + +__MATH_FUNCTIONS_DECL__ float trunc(const float a) +{ + return truncf(a); +} + +__MATH_FUNCTIONS_DECL__ float rint(const float a) +{ + return rintf(a); +} + +__MATH_FUNCTIONS_DECL__ long int lrint(const float a) +{ + return lrintf(a); +} + +__MATH_FUNCTIONS_DECL__ long long int llrint(const float a) +{ + return llrintf(a); +} + +__MATH_FUNCTIONS_DECL__ float nearbyint(const float a) +{ + return nearbyintf(a); +} + +__MATH_FUNCTIONS_DECL__ float fdim(const float a, const float b) +{ + return fdimf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float fma(const float a, const float b, const float c) +{ + return fmaf(a, b, c); +} + +__MATH_FUNCTIONS_DECL__ float fmax(const float a, const float b) +{ + return fmaxf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float fmin(const float a, const float b) +{ + return fminf(a, b); +} +#endif /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ +#endif /* !(!defined(__QNX__) && !(defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800)) */ +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +_LIBCPP_END_NAMESPACE_STD +#endif +#endif /* __CUDACC_RTC__ || (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +__MATH_FUNCTIONS_DECL__ float exp10(const float a) +{ + return exp10f(a); +} + +__MATH_FUNCTIONS_DECL__ float rsqrt(const float a) +{ + return rsqrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float rcbrt(const float a) +{ + return rcbrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float sinpi(const float a) +{ + return sinpif(a); +} + +__MATH_FUNCTIONS_DECL__ float cospi(const float a) +{ + return cospif(a); +} + +__MATH_FUNCTIONS_DECL__ void sincospi(const float a, float *const sptr, float *const cptr) +{ + sincospif(a, sptr, cptr); +} + +__MATH_FUNCTIONS_DECL__ void sincos(const float a, float *const sptr, float *const cptr) +{ + sincosf(a, sptr, cptr); +} + +__MATH_FUNCTIONS_DECL__ float j0(const float a) +{ + return j0f(a); +} + +__MATH_FUNCTIONS_DECL__ float j1(const float a) +{ + return j1f(a); +} + +__MATH_FUNCTIONS_DECL__ float jn(const int n, const float a) +{ + return jnf(n, a); +} + +__MATH_FUNCTIONS_DECL__ float y0(const float a) +{ + return y0f(a); +} + +__MATH_FUNCTIONS_DECL__ float y1(const float a) +{ + return y1f(a); +} + +__MATH_FUNCTIONS_DECL__ float yn(const int n, const float a) +{ + return ynf(n, a); +} + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i0(const float a) +{ + return cyl_bessel_i0f(a); +} + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i1(const float a) +{ + return cyl_bessel_i1f(a); +} + +__MATH_FUNCTIONS_DECL__ float erfinv(const float a) +{ + return erfinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float erfcinv(const float a) +{ + return erfcinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float normcdfinv(const float a) +{ + return normcdfinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float normcdf(const float a) +{ + return normcdff(a); +} + +__MATH_FUNCTIONS_DECL__ float erfcx(const float a) +{ + return erfcxf(a); +} + +__MATH_FUNCTIONS_DECL__ double copysign(const double a, const float b) +{ + return copysign(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ double copysign(const float a, const double b) +{ + return copysign(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b) +{ + return umin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b) +{ + return umin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b) +{ + return umin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b) +{ + long int retval; + /* Suppress VS warning: warning C4127: conditional expression is constant */ +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + /* long can be of 32-bit type on some systems. */ + if (sizeof(long int) == sizeof(int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(min(static_cast(a), static_cast(b))); + } else { + retval = static_cast(llmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b) +{ + return llmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b) +{ + return ullmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b) +{ + return ullmin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b) +{ + return ullmin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ float min(const float a, const float b) +{ + return fminf(a, b); +} + +__MATH_FUNCTIONS_DECL__ double min(const double a, const double b) +{ + return fmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ double min(const float a, const double b) +{ + return fmin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ double min(const double a, const float b) +{ + return fmin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const unsigned int b) +{ + return umax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const int a, const unsigned int b) +{ + return umax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const int b) +{ + return umax(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ long int max(const long int a, const long int b) +{ + long int retval; + /* long can be of 32-bit type on some systems. */ +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(long int) == sizeof(int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(max(static_cast(a), static_cast(b))); + } else { + retval = static_cast(llmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (default: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ long long int max(const long long int a, const long long int b) +{ + return llmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const unsigned long long int b) +{ + return ullmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const long long int a, const unsigned long long int b) +{ + return ullmax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const long long int b) +{ + return ullmax(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ float max(const float a, const float b) +{ + return fmaxf(a, b); +} + +__MATH_FUNCTIONS_DECL__ double max(const double a, const double b) +{ + return fmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ double max(const float a, const double b) +{ + return fmax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ double max(const double a, const float b) +{ + return fmax(a, static_cast(b)); +} + + +#if !defined(__CUDA_ARCH__) +#if defined(_WIN32) +#define __HELPER_FUNC_LINKAGE static inline __host__ __device__ +#pragma warning(disable : 4211) +#else /* !defined(_WIN32) */ +#define __HELPER_FUNC_LINKAGE inline __host__ __device__ +#endif /* defined(_WIN32) */ + +__HELPER_FUNC_LINKAGE int min(const int a, const int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned int umin(const unsigned int a, const unsigned int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE long long int llmin(const long long int a, const long long int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned long long int ullmin(const unsigned long long int a, + const unsigned long long int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE int max(const int a, const int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned int umax(const unsigned int a, const unsigned int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE long long int llmax(const long long int a, const long long int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned long long int ullmax(const unsigned long long int a, + const unsigned long long int b) +{ + return (a > b) ? a : b; +} + +#if defined(_WIN32) +#pragma warning(default: 4211) +#endif /* defined(_WIN32) */ + +#undef __HELPER_FUNC_LINKAGE + +#endif /* !defined(__CUDA_ARCH__) */ + +#undef __MATH_FUNCTIONS_DECL__ +#undef __MATH_FUNCTIONS_DEVICE_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ +#if !defined(__CUDACC__) + +#include "host_defines.h" +#include "math_constants.h" + +#define __cuda_INT_MAX \ + ((int)((unsigned int)-1 >> 1)) + +/******************************************************************************* +* * +* ONLY FOR HOST CODE! NOT FOR DEVICE EXECUTION * +* * +*******************************************************************************/ + +#include + +#if defined(_WIN32) + +#pragma warning(disable : 4211) + +#endif /* _WIN32 */ + +#if defined(_WIN32) || defined(__APPLE__) || defined (__ANDROID__) || defined(__QNX__) + +__func__(int __isnan(const double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) > 0xffe0000000000000ULL; +} + +#endif /* _WIN32 || __APPLE__ || __ANDROID__ || __QNX__ */ + +#if defined(_WIN32) || defined(__APPLE__) || defined(__QNX__) + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE ROUTINES FOR WINDOWS & APPLE PLATFORMS * +* * +*******************************************************************************/ + +__func__(double exp10(const double a)) +{ + return pow(10.0, a); +} + +__func__(float exp10f(const float a)) +{ + return static_cast(exp10(static_cast(a))); +} + +__func__(void sincos(const double a, double *sptr, double *cptr)) +{ + *sptr = sin(a); + *cptr = cos(a); +} + +__func__(void sincosf(const float a, float *sptr, float *cptr)) +{ + double s, c; + + sincos(static_cast(a), &s, &c); + *sptr = static_cast(s); + *cptr = static_cast(c); +} + +__func__(int __isinf(const double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) == 0xffe0000000000000ULL; +} + +#endif /* _WIN32 || __APPLE__ */ + +#if defined(_WIN32) || defined (__ANDROID__) + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double log2(const double a)) +{ + return log(a) * 1.44269504088896340; +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* _WIN32 || __ANDROID__ */ + +#if defined(_WIN32) + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE ROUTINES FOR WINDOWS PLATFORM * +* * +*******************************************************************************/ + +__func__(int __signbit(double a)) +{ + signed long long int l; + memcpy(&l, &a, sizeof(double)); + return l < 0LL; +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double copysign(double a, double b)) +{ + unsigned long long int la, lb; + memcpy(&la, &a, sizeof(double)); + memcpy(&lb, &b, sizeof(double)); + la = (la & 0x7fffffffffffffffULL) | (lb & 0x8000000000000000ULL); + memcpy(&a, &la, sizeof(double)); + return a; +} +#endif /* MSC_VER < 1800 */ + +__func__(int __finite(double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) < 0xffe0000000000000ULL; +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double fmax(double a, double b)) +{ + if (__isnan(a) && __isnan(b)) return a + b; + if (__isnan(a)) return b; + if (__isnan(b)) return a; + if ((a == 0.0) && (b == 0.0) && __signbit(b)) return a; + return a > b ? a : b; +} + +__func__(double fmin(double a, double b)) +{ + if (__isnan(a) && __isnan(b)) return a + b; + if (__isnan(a)) return b; + if (__isnan(b)) return a; + if ((a == 0.0) && (b == 0.0) && __signbit(a)) return a; + return a < b ? a : b; +} + +__func__(double trunc(double a)) +{ + return a < 0.0 ? ceil(a) : floor(a); +} + +__func__(double round(double a)) +{ + double fa = fabs(a); + + if (fa > CUDART_TWO_TO_52) { + return a; + } else { + double u = floor(fa + 0.5); + if (fa < 0.5) u = 0; + u = copysign (u, a); + return u; + } +} + +__func__(long int lround(double a)) +{ + return static_cast(round(a)); +} + +__func__(long long int llround(double a)) +{ + return static_cast(round(a)); +} + +__func__(double rint(double a)) +{ + double fa = fabs(a); + double u = CUDART_TWO_TO_52 + fa; + if (fa >= CUDART_TWO_TO_52) { + u = a; + } else { + u = u - CUDART_TWO_TO_52; + u = copysign (u, a); + } + return u; +} + +__func__(double nearbyint(double a)) +{ + return rint(a); +} + +__func__(long int lrint(double a)) +{ + return static_cast(rint(a)); +} + +__func__(long long int llrint(double a)) +{ + return static_cast(rint(a)); +} + +__func__(double fdim(double a, double b)) +{ + if (a > b) { + return (a - b); + } else if (a <= b) { + return 0.0; + } else if (__isnan(a)) { + return a; + } else { + return b; + } +} + +__func__(double scalbn(double a, int b)) +{ + return ldexp(a, b); +} + +__func__(double scalbln(double a, long int b)) +{ + int t; + + if (b > 2147483647L) { + t = 2147483647; + } else if (b < (-2147483647 - 1)) { + t = (-2147483647 - 1); + } else { + t = static_cast(b); + } + return scalbn(a, t); +} + +__func__(double exp2(double a)) +{ + return pow(2.0, a); +} + +/* + * The following is based on: David Goldberg, "What every computer scientist + * should know about floating-point arithmetic", ACM Computing Surveys, Volume + * 23, Issue 1, March 1991. + */ +__func__(double log1p(double a)) +{ + volatile double u, m; + + u = 1.0 + a; + if (u == 1.0) { + /* a very close to zero */ + u = a; + } else { + m = u - 1.0; + u = log(u); + if (a < 1.0) { + /* a somewhat close to zero */ + u = a * u; + u = u / m; + } + } + return u; +} + +/* + * This code based on: http://www.cs.berkeley.edu/~wkahan/Math128/Sumnfp.pdf + */ +__func__(double expm1(double a)) +{ + volatile double u, m; + + u = exp(a); + m = u - 1.0; + if (m == 0.0) { + /* a very close zero */ + m = a; + } + else if (fabs(a) < 1.0) { + /* a somewhat close zero */ + u = log(u); + m = m * a; + m = m / u; + } + return m; +} + +__func__(double cbrt(double a)) +{ + double s, t; + + if (a == 0.0 || __isinf(a)) { + return a; + } + s = fabs(a); + t = exp2(CUDART_THIRD * log2(s)); /* initial approximation */ + t = t - (t - (s / (t * t))) * CUDART_THIRD; /* refine approximation */ + t = copysign(t, a); + return t; +} + +__func__(double acosh(double a)) +{ + double s, t; + + t = a - 1.0; + if (t == a) { + return log(2.0) + log(a); + } else { + s = a + 1.0; + t = t + sqrt(s * t); + return log1p(t); + } +} + +__func__(double asinh(double a)) +{ + double fa, oofa, t; + + fa = fabs(a); + if (fa > 1e18) { + t = log(2.0) + log(fa); + } else { + oofa = 1.0 / fa; + t = fa + fa / (oofa + sqrt(1.0 + oofa * oofa)); + t = log1p(t); + } + t = copysign(t, a); + return t; +} + +__func__(double atanh(double a)) +{ + double fa, t; + + if (__isnan(a)) { + return a + a; + } + fa = fabs(a); + t = (2.0 * fa) / (1.0 - fa); + t = 0.5 * log1p(t); + if (__isnan(t) || !__signbit(a)) { + return t; + } + return -t; +} + +__func__(int ilogb(double a)) +{ + unsigned long long int i; + int expo = -1022; + + if (__isnan(a)) return -__cuda_INT_MAX-1; + if (__isinf(a)) return __cuda_INT_MAX; + memcpy(&i, &a, sizeof(double)); + i = i & 0x7fffffffffffffffULL; + if (i == 0) return -__cuda_INT_MAX-1; + if (i >= 0x0010000000000000ULL) { + return (int)(((i >> 52ULL) & 0x7ffU) - 1023); + } + while (i < 0x0010000000000000ULL) { + expo--; + i <<= 1; + } + return expo; +} + +__func__(double logb(double a)) +{ + unsigned long long int i; + int expo = -1022; + + if (__isnan(a)) return a + a; + if (__isinf(a)) return fabs(a); + memcpy(&i, &a, sizeof(double)); + i = i & 0x7fffffffffffffffULL; + if (i == 0) return -1.0/fabs(a); + if (i >= 0x0010000000000000ULL) { + return (double)((int)((i >> 52ULL) & 0x7ffU) - 1023); + } + while (i < 0x0010000000000000ULL) { + expo--; + i <<= 1; + } + return static_cast(expo); +} + +__func__(double remquo(double a, double b, int *quo)) +{ + unsigned long long int aa, bb; + int rem1 = 1; /* do FPREM1, a.k.a IEEE remainder */ + int expo_a; + int expo_b; + unsigned long long mant_a; + unsigned long long mant_b; + unsigned long long mant_c; + unsigned long long temp; + int sign_a; + int sign_b; + int sign_c; + int expo_c; + int expodiff; + int quot = 0; /* initialize quotient */ + int l; + int iter; + + memcpy(&aa, &a, sizeof(double)); + mant_a = (aa << 11ULL) | 0x8000000000000000ULL; + expo_a = (int)((aa >> 52ULL) & 0x7ffU) - 1023; + sign_a = (int)(aa >> 63ULL); + + memcpy(&bb, &b, sizeof(double)); + mant_b = (bb << 11ULL) | 0x8000000000000000ULL; + expo_b = (int)((bb >> 52ULL) & 0x7ffU) - 1023; + sign_b = (int)(bb >> 63ULL); + + sign_c = sign_a; /* remainder has sign of dividend */ + expo_c = expo_a; /* default */ + + /* handled NaNs and infinities */ + if (__isnan(a) || __isnan(b)) { + *quo = quot; + return a + b; + } + if (__isinf(a) || (b == 0.0)) { + *quo = quot; + aa = 0xfff8000000000000ULL; + memcpy(&a, &aa, sizeof(double)); + return a; + } + if ((a == 0.0) || (__isinf(b))) { + *quo = quot; + return a; + } + /* normalize denormals */ + if (expo_a < -1022) { + mant_a = mant_a + mant_a; + while (mant_a < 0x8000000000000000ULL) { + mant_a = mant_a + mant_a; + expo_a--; + } + } + if (expo_b < -1022) { + mant_b = mant_b + mant_b; + while (mant_b < 0x8000000000000000ULL) { + mant_b = mant_b + mant_b; + expo_b--; + } + } + expodiff = expo_a - expo_b; + /* clamp iterations if exponent difference negative */ + if (expodiff < 0) { + iter = -1; + } else { + iter = expodiff; + } + /* Shift dividend and divisor right by one bit to prevent overflow + during the division algorithm. + */ + mant_a = mant_a >> 1ULL; + mant_b = mant_b >> 1ULL; + expo_c = expo_a - iter; /* default exponent of result */ + + /* Use binary longhand division (restoring) */ + for (l = 0; l < (iter + 1); l++) { + mant_a = mant_a - mant_b; + if (mant_a & 0x8000000000000000ULL) { + mant_a = mant_a + mant_b; + quot = quot + quot; + } else { + quot = quot + quot + 1; + } + mant_a = mant_a + mant_a; + } + + /* Save current remainder */ + mant_c = mant_a; + /* If remainder's mantissa is all zeroes, final result is zero. */ + if (mant_c == 0) { + quot = quot & 7; + *quo = (sign_a ^ sign_b) ? -quot : quot; + aa = static_cast(sign_c) << 63ULL; + memcpy(&a, &aa, sizeof(double)); + return a; + } + /* Normalize result */ + while (!(mant_c & 0x8000000000000000ULL)) { + mant_c = mant_c + mant_c; + expo_c--; + } + /* For IEEE remainder (quotient rounded to nearest-even we might need to + do a final subtraction of the divisor from the remainder. + */ + if (rem1 && ((expodiff+1) >= 0)) { + temp = mant_a - mant_b; + /* round quotient to nearest even */ + if (((temp != 0ULL) && (!(temp & 0x8000000000000000ULL))) || + ((temp == 0ULL) && (quot & 1))) { + mant_a = mant_a >> 1ULL; + quot++; + /* Since the divisor is greater than the remainder, the result will + have opposite sign of the dividend. To avoid a negative mantissa + when subtracting the divisor from remainder, reverse subtraction + */ + sign_c = 1 ^ sign_c; + expo_c = expo_a - iter + 1; + mant_c = mant_b - mant_a; + /* normalize result */ + while (!(mant_c & 0x8000000000000000ULL)) { + mant_c = mant_c + mant_c; + expo_c--; + } + } + } + /* package up result */ + if (expo_c >= -1022) { /* normal */ + mant_c = ((mant_c >> 11ULL) + + (((static_cast(sign_c)) << 63ULL) + + (((unsigned long long)(expo_c + 1022)) << 52ULL))); + } else { /* denormal */ + mant_c = (((static_cast(sign_c)) << 63ULL) + + (mant_c >> (unsigned long long)(11 - expo_c - 1022))); + } + quot = quot & 7; /* mask quotient down to least significant three bits */ + *quo = (sign_a ^ sign_b) ? -quot : quot; + memcpy(&a, &mant_c, sizeof(double)); + return a; +} + +__func__(double remainder(double a, double b)) +{ + int quo; + return remquo (a, b, &quo); +} + +__func__(double fma (double a, double b, double c)) +{ + struct { + unsigned int lo; + unsigned int hi; + } xx, yy, zz, ww; + double d; + unsigned int s, t, u, prod0, prod1, prod2, prod3, expo_x, expo_y, expo_z; + + memcpy(&xx, &a, sizeof(double)); + memcpy(&yy, &b, sizeof(double)); + memcpy(&zz, &c, sizeof(double)); + + expo_z = 0x7FFU; + t = xx.hi >> 20; + expo_x = expo_z & t; + expo_x = expo_x - 1; /* expo(x) - 1 */ + t = yy.hi >> 20; + expo_y = expo_z & t; + expo_y = expo_y - 1; /* expo(y) - 1 */ + t = zz.hi >> 20; + expo_z = expo_z & t; + expo_z = expo_z - 1; /* expo(z) - 1 */ + + if (!((expo_x <= 0x7FDU) && + (expo_y <= 0x7FDU) && + (expo_z <= 0x7FDU))) { + + /* fma (nan, y, z) --> nan + fma (x, nan, z) --> nan + fma (x, y, nan) --> nan + */ + if (((yy.hi << 1) | (yy.lo != 0)) > 0xffe00000U) { + yy.hi |= 0x00080000U; + memcpy(&d, &yy, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) > 0xffe00000U) { + zz.hi |= 0x00080000U; + memcpy(&d, &zz, sizeof(double)); + return d; + } + if (((xx.hi << 1) | (xx.lo != 0)) > 0xffe00000U) { + xx.hi |= 0x00080000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + + /* fma (0, inf, z) --> INDEFINITE + fma (inf, 0, z) --> INDEFINITE + fma (-inf,+y,+inf) --> INDEFINITE + fma (+x,-inf,+inf) --> INDEFINITE + fma (+inf,-y,+inf) --> INDEFINITE + fma (-x,+inf,+inf) --> INDEFINITE + fma (-inf,-y,-inf) --> INDEFINITE + fma (-x,-inf,-inf) --> INDEFINITE + fma (+inf,+y,-inf) --> INDEFINITE + fma (+x,+inf,-inf) --> INDEFINITE + */ + if (((((xx.hi << 1) | xx.lo) == 0) && + (((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U)) || + ((((yy.hi << 1) | yy.lo) == 0) && + (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U))) { + xx.hi = 0xfff80000U; + xx.lo = 0x00000000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) == 0xffe00000U) { + if ((((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U) || + (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U)) { + if ((int)(xx.hi ^ yy.hi ^ zz.hi) < 0) { + xx.hi = 0xfff80000U; + xx.lo = 0x00000000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + } + } + /* fma (inf, y, z) --> inf + fma (x, inf, z) --> inf + fma (x, y, inf) --> inf + */ + if (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U) { + xx.hi = xx.hi ^ (yy.hi & 0x80000000U); + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U) { + yy.hi = yy.hi ^ (xx.hi & 0x80000000U); + memcpy(&d, &yy, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) == 0xffe00000U) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + /* fma (+0, -y, -0) --> -0 + fma (-0, +y, -0) --> -0 + fma (+x, -0, -0) --> -0 + fma (-x, +0, -0) --> -0 + */ + if ((zz.hi == 0x80000000U) && (zz.lo == 0)) { + if ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0)) { + if ((int)(xx.hi ^ yy.hi) < 0) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + } + } + /* fma (0, y, 0) --> +0 (-0 if round down and signs of addend differ) + fma (x, 0, 0) --> +0 (-0 if round down and signs of addend differ) + */ + if ((((zz.hi << 1) | zz.lo) == 0) && + ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0))) { + zz.hi &= 0x7fffffffU; + memcpy(&d, &zz, sizeof(double)); + return d; + } + + /* fma (0, y, z) --> z + fma (x, 0, z) --> z + */ + if ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0)) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + + if (expo_x == 0xffffffffU) { + expo_x++; + t = xx.hi & 0x80000000U; + s = xx.lo >> 21; + xx.lo = xx.lo << 11; + xx.hi = xx.hi << 11; + xx.hi = xx.hi | s; + if (!xx.hi) { + xx.hi = xx.lo; + xx.lo = 0; + expo_x -= 32; + } + while (static_cast(xx.hi) > 0) { + s = xx.lo >> 31; + xx.lo = xx.lo + xx.lo; + xx.hi = xx.hi + xx.hi; + xx.hi = xx.hi | s; + expo_x--; + } + xx.lo = (xx.lo >> 11); + xx.lo |= (xx.hi << 21); + xx.hi = (xx.hi >> 11) | t; + } + if (expo_y == 0xffffffffU) { + expo_y++; + t = yy.hi & 0x80000000U; + s = yy.lo >> 21; + yy.lo = yy.lo << 11; + yy.hi = yy.hi << 11; + yy.hi = yy.hi | s; + if (!yy.hi) { + yy.hi = yy.lo; + yy.lo = 0; + expo_y -= 32; + } + while (static_cast(yy.hi) > 0) { + s = yy.lo >> 31; + yy.lo = yy.lo + yy.lo; + yy.hi = yy.hi + yy.hi; + yy.hi = yy.hi | s; + expo_y--; + } + yy.lo = (yy.lo >> 11); + yy.lo |= (yy.hi << 21); + yy.hi = (yy.hi >> 11) | t; + } + if (expo_z == 0xffffffffU) { + expo_z++; + t = zz.hi & 0x80000000U; + s = zz.lo >> 21; + zz.lo = zz.lo << 11; + zz.hi = zz.hi << 11; + zz.hi = zz.hi | s; + if (!zz.hi) { + zz.hi = zz.lo; + zz.lo = 0; + expo_z -= 32; + } + while (static_cast(zz.hi) > 0) { + s = zz.lo >> 31; + zz.lo = zz.lo + zz.lo; + zz.hi = zz.hi + zz.hi; + zz.hi = zz.hi | s; + expo_z--; + } + zz.lo = (zz.lo >> 11); + zz.lo |= (zz.hi << 21); + zz.hi = (zz.hi >> 11) | t; + } + } + + expo_x = expo_x + expo_y; + expo_y = xx.hi ^ yy.hi; + t = xx.lo >> 21; + xx.lo = xx.lo << 11; + xx.hi = xx.hi << 11; + xx.hi = xx.hi | t; + yy.hi = yy.hi & 0x000fffffU; + xx.hi = xx.hi | 0x80000000U; /* set mantissa hidden bit */ + yy.hi = yy.hi | 0x00100000U; /* set mantissa hidden bit */ + + prod0 = xx.lo * yy.lo; + prod1 =(unsigned)((static_cast(xx.lo)*static_cast(yy.lo))>>32ULL); + prod2 = xx.hi * yy.lo; + prod3 = xx.lo * yy.hi; + prod1 += prod2; + t = (unsigned)(prod1 < prod2); + prod1 += prod3; + t += prod1 < prod3; + prod2 =(unsigned)((static_cast(xx.hi)*static_cast(yy.lo))>>32ULL); + prod3 =(unsigned)((static_cast(xx.lo)*static_cast(yy.hi))>>32ULL); + prod2 += prod3; + s = (unsigned)(prod2 < prod3); + prod3 = xx.hi * yy.hi; + prod2 += prod3; + s += prod2 < prod3; + prod2 += t; + s += prod2 < t; + prod3 =(unsigned)((static_cast(xx.hi)*static_cast(yy.hi))>>32ULL); + prod3 = prod3 + s; + + yy.lo = prod0; /* mantissa */ + yy.hi = prod1; /* mantissa */ + xx.lo = prod2; /* mantissa */ + xx.hi = prod3; /* mantissa */ + expo_x = expo_x - (1023 - 2); /* expo-1 */ + expo_y = expo_y & 0x80000000U; /* sign */ + + if (xx.hi < 0x00100000U) { + s = xx.lo >> 31; + s = (xx.hi << 1) + s; + xx.hi = s; + s = yy.hi >> 31; + s = (xx.lo << 1) + s; + xx.lo = s; + s = yy.lo >> 31; + s = (yy.hi << 1) + s; + yy.hi = s; + s = yy.lo << 1; + yy.lo = s; + expo_x--; + } + + t = 0; + if (((zz.hi << 1) | zz.lo) != 0) { /* z is not zero */ + + s = zz.hi & 0x80000000U; + + zz.hi &= 0x000fffffU; + zz.hi |= 0x00100000U; + ww.hi = 0; + ww.lo = 0; + + /* compare and swap. put augend into xx:yy */ + if (static_cast(expo_z) > static_cast(expo_x)) { + t = expo_z; + expo_z = expo_x; + expo_x = t; + t = zz.hi; + zz.hi = xx.hi; + xx.hi = t; + t = zz.lo; + zz.lo = xx.lo; + xx.lo = t; + t = ww.hi; + ww.hi = yy.hi; + yy.hi = t; + t = ww.lo; + ww.lo = yy.lo; + yy.lo = t; + t = expo_y; + expo_y = s; + s = t; + } + + /* augend_sign = expo_y, augend_mant = xx:yy, augend_expo = expo_x */ + /* addend_sign = s, addend_mant = zz:ww, addend_expo = expo_z */ + expo_z = expo_x - expo_z; + u = expo_y ^ s; + if (expo_z <= 107) { + /* denormalize addend */ + t = 0; + while (expo_z >= 32) { + t = ww.lo | (t != 0); + ww.lo = ww.hi; + ww.hi = zz.lo; + zz.lo = zz.hi; + zz.hi = 0; + expo_z -= 32; + } + if (expo_z) { + t = (t >> expo_z) | (ww.lo << (32 - expo_z)) | + ((t << (32 - expo_z)) != 0); + ww.lo = (ww.lo >> expo_z) | (ww.hi << (32 - expo_z)); + ww.hi = (ww.hi >> expo_z) | (zz.lo << (32 - expo_z)); + zz.lo = (zz.lo >> expo_z) | (zz.hi << (32 - expo_z)); + zz.hi = (zz.hi >> expo_z); + } + } else { + t = 1; + ww.lo = 0; + ww.hi = 0; + zz.lo = 0; + zz.hi = 0; + } + if (static_cast(u) < 0) { + /* signs differ, effective subtraction */ + t = (unsigned)(-static_cast(t)); + s = (unsigned)(t != 0); + u = yy.lo - s; + s = (unsigned)(u > yy.lo); + yy.lo = u - ww.lo; + s += yy.lo > u; + u = yy.hi - s; + s = (unsigned)(u > yy.hi); + yy.hi = u - ww.hi; + s += yy.hi > u; + u = xx.lo - s; + s = (unsigned)(u > xx.lo); + xx.lo = u - zz.lo; + s += xx.lo > u; + xx.hi = (xx.hi - zz.hi) - s; + if (!(xx.hi | xx.lo | yy.hi | yy.lo | t)) { + /* complete cancelation, return 0 */ + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (static_cast(xx.hi) < 0) { + /* Oops, augend had smaller mantissa. Negate mantissa and flip + sign of result + */ + t = ~t; + yy.lo = ~yy.lo; + yy.hi = ~yy.hi; + xx.lo = ~xx.lo; + xx.hi = ~xx.hi; + if (++t == 0) { + if (++yy.lo == 0) { + if (++yy.hi == 0) { + if (++xx.lo == 0) { + ++xx.hi; + } + } + } + } + expo_y ^= 0x80000000U; + } + + /* normalize mantissa, if necessary */ + while (!(xx.hi & 0x00100000U)) { + xx.hi = (xx.hi << 1) | (xx.lo >> 31); + xx.lo = (xx.lo << 1) | (yy.hi >> 31); + yy.hi = (yy.hi << 1) | (yy.lo >> 31); + yy.lo = (yy.lo << 1); + expo_x--; + } + } else { + /* signs are the same, effective addition */ + yy.lo = yy.lo + ww.lo; + s = (unsigned)(yy.lo < ww.lo); + yy.hi = yy.hi + s; + u = (unsigned)(yy.hi < s); + yy.hi = yy.hi + ww.hi; + u += yy.hi < ww.hi; + xx.lo = xx.lo + u; + s = (unsigned)(xx.lo < u); + xx.lo = xx.lo + zz.lo; + s += xx.lo < zz.lo; + xx.hi = xx.hi + zz.hi + s; + if (xx.hi & 0x00200000U) { + t = t | (yy.lo << 31); + yy.lo = (yy.lo >> 1) | (yy.hi << 31); + yy.hi = (yy.hi >> 1) | (xx.lo << 31); + xx.lo = (xx.lo >> 1) | (xx.hi << 31); + xx.hi = ((xx.hi & 0x80000000U) | (xx.hi >> 1)) & ~0x40000000U; + expo_x++; + } + } + } + t = yy.lo | (t != 0); + t = yy.hi | (t != 0); + + xx.hi |= expo_y; /* or in sign bit */ + if (expo_x <= 0x7FDU) { + /* normal */ + xx.hi = xx.hi & ~0x00100000U; /* lop off integer bit */ + s = xx.lo & 1; /* mantissa lsb */ + u = xx.lo; + xx.lo += (t == 0x80000000U) ? s : (t >> 31); + xx.hi += (u > xx.lo); + xx.hi += ((expo_x + 1) << 20); + memcpy(&d, &xx, sizeof(double)); + return d; + } else if (static_cast(expo_x) >= 2046) { + /* overflow */ + xx.hi = (xx.hi & 0x80000000U) | 0x7ff00000U; + xx.lo = 0; + memcpy(&d, &xx, sizeof(double)); + return d; + } + /* subnormal */ + expo_x = (unsigned)(-static_cast(expo_x)); + if (expo_x > 54) { + xx.hi = xx.hi & 0x80000000U; + xx.lo = 0; + memcpy(&d, &xx, sizeof(double)); + return d; + } + yy.hi = xx.hi & 0x80000000U; /* save sign bit */ + xx.hi = xx.hi & ~0xffe00000U; + if (expo_x >= 32) { + t = xx.lo | (t != 0); + xx.lo = xx.hi; + xx.hi = 0; + expo_x -= 32; + } + if (expo_x) { + t = (t >> expo_x) | (xx.lo << (32 - expo_x)) | (t != 0); + xx.lo = (xx.lo >> expo_x) | (xx.hi << (32 - expo_x)); + xx.hi = (xx.hi >> expo_x); + } + expo_x = xx.lo & 1; + u = xx.lo; + xx.lo += (t == 0x80000000U) ? expo_x : (t >> 31); + xx.hi += (u > xx.lo); + xx.hi |= yy.hi; + memcpy(&d, &xx, sizeof(double)); + return d; +} + +__func__(double nextafter(double a, double b)) +{ + unsigned long long int ia; + unsigned long long int ib; + memcpy(&ia, &a, sizeof(double)); + memcpy(&ib, &b, sizeof(double)); + if (__isnan(a) || __isnan(b)) return a + b; /* NaN */ + if (((ia | ib) << 1ULL) == 0ULL) return b; + if (a == 0.0) { + return copysign (4.9406564584124654e-324, b); /* crossover */ + } + if ((a < b) && (a < 0.0)) ia--; + if ((a < b) && (a > 0.0)) ia++; + if ((a > b) && (a < 0.0)) ia++; + if ((a > b) && (a > 0.0)) ia--; + memcpy(&a, &ia, sizeof(double)); + return a; +} + +__func__(double erf(double a)) +{ + double t, r, q; + + t = fabs(a); + if (t >= 1.0) { + r = -1.28836351230756500E-019; + r = r * t + 1.30597472161093370E-017; + r = r * t - 6.33924401259620500E-016; + r = r * t + 1.96231865908940140E-014; + r = r * t - 4.35272243559990750E-013; + r = r * t + 7.37083927929352150E-012; + r = r * t - 9.91402142550461630E-011; + r = r * t + 1.08817017167760820E-009; + r = r * t - 9.93918713097634620E-009; + r = r * t + 7.66739923255145500E-008; + r = r * t - 5.05440278302806720E-007; + r = r * t + 2.87474157099000620E-006; + r = r * t - 1.42246725399722510E-005; + r = r * t + 6.16994555079419460E-005; + r = r * t - 2.36305221938908790E-004; + r = r * t + 8.05032844055371070E-004; + r = r * t - 2.45833366629108140E-003; + r = r * t + 6.78340988296706120E-003; + r = r * t - 1.70509103597554640E-002; + r = r * t + 3.93322852515666300E-002; + r = r * t - 8.37271292613764040E-002; + r = r * t + 1.64870423707623280E-001; + r = r * t - 2.99729521787681470E-001; + r = r * t + 4.99394435612628580E-001; + r = r * t - 7.52014596480123030E-001; + r = r * t + 9.99933138314926250E-001; + r = r * t - 1.12836725321102670E+000; + r = r * t + 9.99998988715182450E-001; + q = exp (-t * t); + r = 1.0 - r * q; + if (t >= 6.5) { + r = 1.0; + } + a = copysign (r, a); + } else { + q = a * a; + r = -7.77946848895991420E-010; + r = r * q + 1.37109803980285950E-008; + r = r * q - 1.62063137584932240E-007; + r = r * q + 1.64471315712790040E-006; + r = r * q - 1.49247123020098620E-005; + r = r * q + 1.20552935769006260E-004; + r = r * q - 8.54832592931448980E-004; + r = r * q + 5.22397760611847340E-003; + r = r * q - 2.68661706431114690E-002; + r = r * q + 1.12837916709441850E-001; + r = r * q - 3.76126389031835210E-001; + r = r * q + 1.12837916709551260E+000; + a = r * a; + } + return a; +} + +__func__(double erfc(double a)) +{ + double p, q, h, l; + + if (a < 0.75) { + return 1.0 - erf(a); + } + if (a > 27.3) { + return 0.0; + } + if (a < 5.0) { + double t; + t = 1.0 / a; + p = 1.9759923722227928E-008; + p = p * t - 1.0000002670474897E+000; + p = p * t - 7.4935303236347828E-001; + p = p * t - 1.5648136328071860E-001; + p = p * t + 1.2871196242447239E-001; + p = p * t + 1.1126459974811195E-001; + p = p * t + 4.0678642255914332E-002; + p = p * t + 7.9915414156678296E-003; + p = p * t + 7.1458332107840234E-004; + q = t + 2.7493547525030619E+000; + q = q * t + 3.3984254815725423E+000; + q = q * t + 2.4635304979947761E+000; + q = q * t + 1.1405284734691286E+000; + q = q * t + 3.4130157606195649E-001; + q = q * t + 6.2250967676044953E-002; + q = q * t + 5.5661370941268700E-003; + q = q * t + 1.0575248365468671E-009; + p = p / q; + p = p * t; + h = ((int)(a * 16.0)) * 0.0625; + l = (a - h) * (a + h); + q = exp(-h * h) * exp(-l); + q = q * 0.5; + p = p * q + q; + p = p * t; + } else { + double ooa, ooasq; + + ooa = 1.0 / a; + ooasq = ooa * ooa; + p = -4.0025406686930527E+005; + p = p * ooasq + 1.4420582543942123E+005; + p = p * ooasq - 2.7664185780951841E+004; + p = p * ooasq + 4.1144611644767283E+003; + p = p * ooasq - 5.8706000519209351E+002; + p = p * ooasq + 9.1490086446323375E+001; + p = p * ooasq - 1.6659491387740221E+001; + p = p * ooasq + 3.7024804085481784E+000; + p = p * ooasq - 1.0578553994424316E+000; + p = p * ooasq + 4.2314218745087778E-001; + p = p * ooasq - 2.8209479177354962E-001; + p = p * ooasq + 5.6418958354775606E-001; + h = a * a; + h = ((int)(a * 16.0)) * 0.0625; + l = (a - h) * (a + h); + q = exp(-h * h) * exp(-l); + p = p * ooa; + p = p * q; + } + return p; +} + +__func__(double lgamma(double a)) +{ + double s; + double t; + double i; + double fa; + double sum; + long long int quot; + if (__isnan(a) || __isinf(a)) { + return a * a; + } + fa = fabs(a); + if (fa >= 3.0) { + if (fa >= 8.0) { + /* Stirling approximation; coefficients from Hart et al, "Computer + * Approximations", Wiley 1968. Approximation 5404. + */ + s = 1.0 / fa; + t = s * s; + sum = -0.1633436431e-2; + sum = sum * t + 0.83645878922e-3; + sum = sum * t - 0.5951896861197e-3; + sum = sum * t + 0.793650576493454e-3; + sum = sum * t - 0.277777777735865004e-2; + sum = sum * t + 0.833333333333331018375e-1; + sum = sum * s + 0.918938533204672; + s = 0.5 * log (fa); + t = fa - 0.5; + s = s * t; + t = s - fa; + s = s + sum; + t = t + s; + } else { + i = fa - 3.0; + s = -4.02412642744125560E+003; + s = s * i - 2.97693796998962000E+005; + s = s * i - 6.38367087682528790E+006; + s = s * i - 5.57807214576539320E+007; + s = s * i - 2.24585140671479230E+008; + s = s * i - 4.70690608529125090E+008; + s = s * i - 7.62587065363263010E+008; + s = s * i - 9.71405112477113250E+008; + t = i - 1.02277248359873170E+003; + t = t * i - 1.34815350617954480E+005; + t = t * i - 4.64321188814343610E+006; + t = t * i - 6.48011106025542540E+007; + t = t * i - 4.19763847787431360E+008; + t = t * i - 1.25629926018000720E+009; + t = t * i - 1.40144133846491690E+009; + t = s / t; + t = t + i; + } + } else if (fa >= 1.5) { + i = fa - 2.0; + t = 9.84839283076310610E-009; + t = t * i - 6.69743850483466500E-008; + t = t * i + 2.16565148880011450E-007; + t = t * i - 4.86170275781575260E-007; + t = t * i + 9.77962097401114400E-007; + t = t * i - 2.03041287574791810E-006; + t = t * i + 4.36119725805364580E-006; + t = t * i - 9.43829310866446590E-006; + t = t * i + 2.05106878496644220E-005; + t = t * i - 4.49271383742108440E-005; + t = t * i + 9.94570466342226000E-005; + t = t * i - 2.23154589559238440E-004; + t = t * i + 5.09669559149637430E-004; + t = t * i - 1.19275392649162300E-003; + t = t * i + 2.89051032936815490E-003; + t = t * i - 7.38555102806811700E-003; + t = t * i + 2.05808084278121250E-002; + t = t * i - 6.73523010532073720E-002; + t = t * i + 3.22467033424113040E-001; + t = t * i + 4.22784335098467190E-001; + t = t * i; + } else if (fa >= 0.7) { + i = 1.0 - fa; + t = 1.17786911519331130E-002; + t = t * i + 3.89046747413522300E-002; + t = t * i + 5.90045711362049900E-002; + t = t * i + 6.02143305254344420E-002; + t = t * i + 5.61652708964839180E-002; + t = t * i + 5.75052755193461370E-002; + t = t * i + 6.21061973447320710E-002; + t = t * i + 6.67614724532521880E-002; + t = t * i + 7.14856037245421020E-002; + t = t * i + 7.69311251313347100E-002; + t = t * i + 8.33503129714946310E-002; + t = t * i + 9.09538288991182800E-002; + t = t * i + 1.00099591546322310E-001; + t = t * i + 1.11334278141734510E-001; + t = t * i + 1.25509666613462880E-001; + t = t * i + 1.44049896457704160E-001; + t = t * i + 1.69557177031481600E-001; + t = t * i + 2.07385551032182120E-001; + t = t * i + 2.70580808427600350E-001; + t = t * i + 4.00685634386517050E-001; + t = t * i + 8.22467033424113540E-001; + t = t * i + 5.77215664901532870E-001; + t = t * i; + } else { + t = -9.04051686831357990E-008; + t = t * fa + 7.06814224969349250E-007; + t = t * fa - 3.80702154637902830E-007; + t = t * fa - 2.12880892189316100E-005; + t = t * fa + 1.29108470307156190E-004; + t = t * fa - 2.15932815215386580E-004; + t = t * fa - 1.16484324388538480E-003; + t = t * fa + 7.21883433044470670E-003; + t = t * fa - 9.62194579514229560E-003; + t = t * fa - 4.21977386992884450E-002; + t = t * fa + 1.66538611813682460E-001; + t = t * fa - 4.20026350606819980E-002; + t = t * fa - 6.55878071519427450E-001; + t = t * fa + 5.77215664901523870E-001; + t = t * fa; + t = t * fa + fa; + t = -log (t); + } + if (a >= 0.0) return t; + if (fa < 1e-19) return -log(fa); + i = floor(fa); + if (fa == i) return 1.0 / (fa - i); /* a is an integer: return infinity */ + i = rint (2.0 * fa); + quot = static_cast(i); + i = fa - 0.5 * i; + i = i * CUDART_PI; + if (quot & 1) { + i = cos(i); + } else { + i = sin(i); + } + i = fabs(i); + t = log(CUDART_PI / (i * fa)) - t; + return t; +} + +__func__(unsigned long long int __internal_host_nan_kernel(const char *s)) +{ + unsigned long long i = 0; + int c; + int ovfl = 0; + int invld = 0; + if (s && (*s == '0')) { + s++; + if ((*s == 'x') || (*s == 'X')) { + s++; + while (*s == '0') s++; + while (*s) { + if (i > 0x0fffffffffffffffULL) { + ovfl = 1; + } + c = (((*s) >= 'A') && ((*s) <= 'F')) ? (*s + 'a' - 'A') : (*s); + if ((c >= 'a') && (c <= 'f')) { + c = c - 'a' + 10; + i = i * 16 + c; + } else if ((c >= '0') && (c <= '9')) { + c = c - '0'; + i = i * 16 + c; + } else { + invld = 1; + } + s++; + } + } else { + while (*s == '0') s++; + while (*s) { + if (i > 0x1fffffffffffffffULL) { + ovfl = 1; + } + c = *s; + if ((c >= '0') && (c <= '7')) { + c = c - '0'; + i = i * 8 + c; + } else { + invld = 1; + } + s++; + } + } + } else if (s) { + while (*s) { + c = *s; + if ((i > 1844674407370955161ULL) || + ((i == 1844674407370955161ULL) && (c > '5'))) { + ovfl = 1; + } + if ((c >= '0') && (c <= '9')) { + c = c - '0'; + i = i * 10 + c; + } else { + invld = 1; + } + s++; + } + } + if (ovfl) { + i = ~0ULL; + } + if (invld) { + i = 0ULL; + } + i = (i & 0x000fffffffffffffULL) | 0x7ff8000000000000ULL; + return i; +} + +__func__(double nan(const char *tagp)) +{ + unsigned long long l; + double d; + l = __internal_host_nan_kernel(tagp); + memcpy(&d, &l, sizeof(double)); + return d; +} + +__func__(double __host_tgamma_kernel(double a)) +{ + double t; + t = - 4.4268934071252475E-010; + t = t * a - 2.0266591846658954E-007; + t = t * a + 1.1381211721119527E-006; + t = t * a - 1.2507734816630748E-006; + t = t * a - 2.0136501740408771E-005; + t = t * a + 1.2805012607354486E-004; + t = t * a - 2.1524140811527418E-004; + t = t * a - 1.1651675459704604E-003; + t = t * a + 7.2189432248466381E-003; + t = t * a - 9.6219715326862632E-003; + t = t * a - 4.2197734554722394E-002; + t = t * a + 1.6653861138250356E-001; + t = t * a - 4.2002635034105444E-002; + t = t * a - 6.5587807152025712E-001; + t = t * a + 5.7721566490153287E-001; + t = t * a + 1.0000000000000000E+000; + return t; +} + +__func__(double __host_stirling_poly(double a)) +{ + double x = 1.0 / a; + double z = 0.0; + z = + 8.3949872067208726e-004; + z = z * x - 5.1717909082605919e-005; + z = z * x - 5.9216643735369393e-004; + z = z * x + 6.9728137583658571e-005; + z = z * x + 7.8403922172006662e-004; + z = z * x - 2.2947209362139917e-004; + z = z * x - 2.6813271604938273e-003; + z = z * x + 3.4722222222222220e-003; + z = z * x + 8.3333333333333329e-002; + z = z * x + 1.0000000000000000e+000; + return z; +} + +__func__(double __host_tgamma_stirling(double a)) +{ + double z; + double x; + z = __host_stirling_poly (a); + if (a < 142.0) { + x = pow (a, a - 0.5); + a = x * exp (-a); + a = a * CUDART_SQRT_2PI; + return a * z; + } else if (a < 172.0) { + x = pow (a, 0.5 * a - 0.25); + a = x * exp (-a); + a = a * CUDART_SQRT_2PI; + a = a * z; + return a * x; + } else { + return exp(1000.0); /* INF */ + } +} + +__func__(double tgamma(double a)) +{ + double s, xx, x = a; + if (__isnan(a)) { + return a + a; + } + if (fabs(x) < 20.0) { + if (x >= 0.0) { + s = 1.0; + xx = x; + while (xx > 1.5) { + xx = xx - 1.0; + s = s * xx; + } + if (x >= 0.5) { + xx = xx - 1.0; + } + xx = __host_tgamma_kernel (xx); + if (x < 0.5) { + xx = xx * x; + } + s = s / xx; + } else { + xx = x; + s = xx; + if (x == floor(x)) { + return 0.0 / (x - floor(x)); + } + while (xx < -0.5) { + xx = xx + 1.0; + s = s * xx; + } + xx = __host_tgamma_kernel (xx); + s = s * xx; + s = 1.0 / s; + } + return s; + } else { + if (x >= 0.0) { + return __host_tgamma_stirling (x); + } else { + double t; + int quot; + if (x == floor(x)) { + return 0.0 / (x - floor(x)); + } + if (x < -185.0) { + int negative; + x = floor(x); + negative = ((x - (2.0 * floor(0.5 * x))) == 1.0); + return negative ? (-1.0 / 1e308 / 1e308) : CUDART_ZERO; + } + /* compute sin(pi*x) accurately */ + xx = rint (2.0 * x); + quot = static_cast(xx); + xx = -0.5 * xx + x; + xx = xx * CUDART_PI; + if (quot & 1) { + xx = cos (xx); + } else { + xx = sin (xx); + } + if (quot & 2) { + xx = -xx; + } + x = fabs (x); + s = exp (-x); + t = x - 0.5; + if (x > 140.0) t = 0.5 * t; + t = pow (x, t); + if (x > 140.0) s = s * t; + s = s * __host_stirling_poly (x); + s = s * x; + s = s * xx; + s = 1.0 / s; + s = s * CUDART_SQRT_PIO2; + s = s / t; + return s; + } + } +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR FLOAT AND LONG DOUBLE ROUTINES FOR WINDOWS PLATFORM * +* MAP FLOAT AND LONG DOUBLE ROUTINES TO DOUBLE ROUTINES * +* * +*******************************************************************************/ + +__func__(int __signbitl(const long double a)) +{ + return __signbit(static_cast(a)); +} + +__func__(int __signbitf(const float a)) +{ + return __signbit(static_cast(a)); +} + +__func__(int __finitel(const long double a)) +{ + return __finite(static_cast(a)); +} + +__func__(int __finitef(const float a)) +{ + return __finite(static_cast(a)); +} + +__func__(int __isinfl(const long double a)) +{ + return __isinf(static_cast(a)); +} + +__func__(int __isinff(const float a)) +{ + return __isinf(static_cast(a)); +} + +__func__(int __isnanl(const long double a)) +{ + return __isnan(static_cast(a)); +} + +__func__(int __isnanf(const float a)) +{ + return __isnan(static_cast(a)); +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(float fmaxf(const float a, const float b)) +{ + return static_cast(fmax(static_cast(a), static_cast(b))); +} + +__func__(float fminf(const float a, const float b)) +{ + return static_cast(fmin(static_cast(a), static_cast(b))); +} + +__func__(float roundf(const float a)) +{ + return static_cast(round(static_cast(a))); +} + +__func__(long int lroundf(const float a)) +{ + return lround(static_cast(a)); +} + +__func__(long long int llroundf(const float a)) +{ + return llround(static_cast(a)); +} + +__func__(float truncf(const float a)) +{ + return static_cast(trunc(static_cast(a))); +} + +__func__(float rintf(const float a)) +{ + return static_cast(rint(static_cast(a))); +} + +__func__(float nearbyintf(const float a)) +{ + return static_cast(nearbyint(static_cast(a))); +} + +__func__(long int lrintf(const float a)) +{ + return lrint(static_cast(a)); +} + +__func__(long long int llrintf(const float a)) +{ + return llrint(static_cast(a)); +} + +__func__(float logbf(const float a)) +{ + return static_cast(logb(static_cast(a))); +} + +__func__(float scalblnf(const float a, const long int b)) +{ + return static_cast(scalbln(static_cast(a), b)); +} + +__func__(float log2f(const float a)) +{ + return static_cast(log2(static_cast(a))); +} + +__func__(float exp2f(const float a)) +{ + return static_cast(exp2(static_cast(a))); +} + +__func__(float acoshf(const float a)) +{ + return static_cast(acosh(static_cast(a))); +} + +__func__(float asinhf(const float a)) +{ + return static_cast(asinh(static_cast(a))); +} + +__func__(float atanhf(const float a)) +{ + return static_cast(atanh(static_cast(a))); +} + +__func__(float cbrtf(const float a)) +{ + return static_cast(cbrt(static_cast(a))); +} + +__func__(float expm1f(const float a)) +{ + return static_cast(expm1(static_cast(a))); +} + +__func__(float fdimf(const float a, const float b)) +{ + return static_cast(fdim(static_cast(a), static_cast(b))); +} + +__func__(float log1pf(const float a)) +{ + return static_cast(log1p(static_cast(a))); +} + +__func__(float scalbnf(const float a, const int b)) +{ + return static_cast(scalbn(static_cast(a), b)); +} + +__func__(float fmaf(const float a, const float b, const float c)) +{ + return static_cast(fma(static_cast(a), static_cast(b), static_cast(c))); +} + +__func__(int ilogbf(const float a)) +{ + return ilogb(static_cast(a)); +} + +__func__(float erff(const float a)) +{ + return static_cast(erf(static_cast(a))); +} + +__func__(float erfcf(const float a)) +{ + return static_cast(erfc(static_cast(a))); +} + +__func__(float lgammaf(const float a)) +{ + return static_cast(lgamma(static_cast(a))); +} + +__func__(float tgammaf(const float a)) +{ + return static_cast(tgamma(static_cast(a))); +} + +__func__(float remquof(const float a, const float b, int *quo)) +{ + return static_cast(remquo(static_cast(a), static_cast(b), quo)); +} + +__func__(float remainderf(const float a, const float b)) +{ + return static_cast(remainder(static_cast(a), static_cast(b))); +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (defined _MSC_VER) && (_MSC_VER >= 1700) +__func__(float j0f(const float a)) +{ + return static_cast(_j0(static_cast(a))); +} + +__func__(float j1f(const float a)) +{ + return static_cast(_j1(static_cast(a))); +} + +__func__(float jnf(const int n, const float a)) +{ + return static_cast(_jn(n, static_cast(a))); +} + +__func__(float y0f(const float a)) +{ + return static_cast(_y0(static_cast(a))); +} + +__func__(float y1f(const float a)) +{ + return static_cast(_y1(static_cast(a))); +} + +__func__(float ynf(const int n, const float a)) +{ + return static_cast(_yn(n, static_cast(a))); +} +#endif /* (defined _MSC_VER) && (_MSC_VER >= 1700) */ + + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR FLOAT ROUTINES FOR WINDOWS PLATFORM * +* * +*******************************************************************************/ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(float copysignf(float a, const float b)) +{ + unsigned int aa, bb; + memcpy(&aa, &a, sizeof(float)); + memcpy(&bb, &b, sizeof(float)); + aa = (aa & ~0x80000000U) | (bb & 0x80000000U); + memcpy(&a, &aa, sizeof(float)); + return a; +} + +__func__(float nextafterf(float a, const float b)) +{ + unsigned int ia; + unsigned int ib; + memcpy(&ia, &a, sizeof(float)); + memcpy(&ib, &b, sizeof(float)); + if (__isnanf(a) || __isnanf(b)) return a + b; /*NaN*/ + if (((ia | ib) << 1U) == 0U) return b; + if (a == 0.0F) { + return copysignf(1.401298464e-045F, b); /*crossover*/ + } + if ((a < b) && (a < 0.0F)) ia--; + if ((a < b) && (a > 0.0F)) ia++; + if ((a > b) && (a < 0.0F)) ia++; + if ((a > b) && (a > 0.0F)) ia--; + memcpy(&a, &ia, sizeof(float)); + return a; +} + +__func__(float nanf(const char *tagp)) +{ + float f; + unsigned int i; + i = static_cast(__internal_host_nan_kernel(tagp)); + i = (i & 0x007fffffU) | 0x7fc00000U; + memcpy(&f, &i, sizeof(float)); + return f; +} + +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* _WIN32 */ + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE AND FLOAT ROUTINES. ALL PLATFORMS * +* * +*******************************************************************************/ + +__func__(double rsqrt(const double a)) +{ + return 1.0 / sqrt(a); +} + +__func__(double rcbrt(const double a)) +{ + double s, t; + + if (__isnan(a)) { + return a + a; + } + if (a == 0.0 || __isinf(a)) { + return 1.0 / a; + } + s = fabs(a); + t = exp2(-CUDART_THIRD * log2(s)); /* initial approximation */ + t = ((t*t) * (-s*t) + 1.0) * (CUDART_THIRD*t) + t;/* refine approximation */ +#if defined(__APPLE__) + if (__signbitd(a)) +#else /* __APPLE__ */ + if (__signbit(a)) +#endif /* __APPLE__ */ + { + t = -t; + } + return t; +} + +__func__(double sinpi(double a)) +{ + int n; + + if (__isnan(a)) { + return a + a; + } + if (a == 0.0 || __isinf(a)) { + return sin (a); + } + if (a == floor(a)) { + return ((a / 1.0e308) / 1.0e308) / 1.0e308; + } + double twoa = a + a; + double rtwoa = round(twoa); + long long int l = (long long int)rtwoa; + n = (int)l; + a -= rtwoa * 0.5; + a = a * CUDART_PI; + if (n & 1) { + a = cos (a); + } else { + a = sin (a); + } + if (n & 2) { + a = -a; + } + return a; +} + +__func__(double cospi(double a)) +{ + int n; + + if (__isnan(a)) { + return a + a; + } + if (__isinf(a)) { + return cos (a); + } + if (fabs(a) > 9.0071992547409920e+015) { + a = 0.0; + } + double twoa = a + a; + double rtwoa = round(twoa); + long long int l = (long long int)rtwoa; + n = (int)l; + a -= rtwoa * 0.5; + a = a * CUDART_PI; + n++; + if (n & 1) { + a = cos (a); + } else { + a = sin (a); + } + if (n & 2) { + a = -a; + } + if (a == 0.0) { + a = fabs(a); + } + return a; +} + +__func__(void sincospi(const double a, double *sptr, double *cptr)) +{ + *sptr = sinpi(a); + *cptr = cospi(a); +} + +__func__(double erfinv(const double a)) +{ + double p, q, t, fa; + unsigned long long int l; + + fa = fabs(a); + if (fa >= 1.0) { + l = 0xfff8000000000000ULL; + memcpy(&t, &l, sizeof(double)); /* INDEFINITE */ + if (fa == 1.0) { + t = a * exp(1000.0); /* Infinity */ + } + } else if (fa >= 0.9375) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 59 + */ + t = log1p(-fa); + t = 1.0 / sqrt(-t); + p = 2.7834010353747001060e-3; + p = p * t + 8.6030097526280260580e-1; + p = p * t + 2.1371214997265515515e+0; + p = p * t + 3.1598519601132090206e+0; + p = p * t + 3.5780402569085996758e+0; + p = p * t + 1.5335297523989890804e+0; + p = p * t + 3.4839207139657522572e-1; + p = p * t + 5.3644861147153648366e-2; + p = p * t + 4.3836709877126095665e-3; + p = p * t + 1.3858518113496718808e-4; + p = p * t + 1.1738352509991666680e-6; + q = t + 2.2859981272422905412e+0; + q = q * t + 4.3859045256449554654e+0; + q = q * t + 4.6632960348736635331e+0; + q = q * t + 3.9846608184671757296e+0; + q = q * t + 1.6068377709719017609e+0; + q = q * t + 3.5609087305900265560e-1; + q = q * t + 5.3963550303200816744e-2; + q = q * t + 4.3873424022706935023e-3; + q = q * t + 1.3858762165532246059e-4; + q = q * t + 1.1738313872397777529e-6; + t = p / (q * t); + if (a < 0.0) t = -t; + } else if (fa >= 0.75) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 39 + */ + t = a * a - .87890625; + p = .21489185007307062000e+0; + p = p * t - .64200071507209448655e+1; + p = p * t + .29631331505876308123e+2; + p = p * t - .47644367129787181803e+2; + p = p * t + .34810057749357500873e+2; + p = p * t - .12954198980646771502e+2; + p = p * t + .25349389220714893917e+1; + p = p * t - .24758242362823355486e+0; + p = p * t + .94897362808681080020e-2; + q = t - .12831383833953226499e+2; + q = q * t + .41409991778428888716e+2; + q = q * t - .53715373448862143349e+2; + q = q * t + .33880176779595142685e+2; + q = q * t - .11315360624238054876e+2; + q = q * t + .20369295047216351160e+1; + q = q * t - .18611650627372178511e+0; + q = q * t + .67544512778850945940e-2; + p = p / q; + t = a * p; + } else { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 18 + */ + t = a * a - .5625; + p = - .23886240104308755900e+2; + p = p * t + .45560204272689128170e+3; + p = p * t - .22977467176607144887e+4; + p = p * t + .46631433533434331287e+4; + p = p * t - .43799652308386926161e+4; + p = p * t + .19007153590528134753e+4; + p = p * t - .30786872642313695280e+3; + q = t - .83288327901936570000e+2; + q = q * t + .92741319160935318800e+3; + q = q * t - .35088976383877264098e+4; + q = q * t + .59039348134843665626e+4; + q = q * t - .48481635430048872102e+4; + q = q * t + .18997769186453057810e+4; + q = q * t - .28386514725366621129e+3; + p = p / q; + t = a * p; + } + return t; +} + +__func__(double erfcinv(const double a)) +{ + double t; + unsigned long long int l; + + if (__isnan(a)) { + return a + a; + } + if (a <= 0.0) { + l = 0xfff8000000000000ULL; + memcpy(&t, &l, sizeof(double)); /* INDEFINITE */ + if (a == 0.0) { + t = (1.0 - a) * exp(1000.0); /* Infinity */ + } + } + else if (a >= 0.0625) { + t = erfinv (1.0 - a); + } + else if (a >= 1e-100) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 59 + */ + double p, q; + t = log(a); + t = 1.0 / sqrt(-t); + p = 2.7834010353747001060e-3; + p = p * t + 8.6030097526280260580e-1; + p = p * t + 2.1371214997265515515e+0; + p = p * t + 3.1598519601132090206e+0; + p = p * t + 3.5780402569085996758e+0; + p = p * t + 1.5335297523989890804e+0; + p = p * t + 3.4839207139657522572e-1; + p = p * t + 5.3644861147153648366e-2; + p = p * t + 4.3836709877126095665e-3; + p = p * t + 1.3858518113496718808e-4; + p = p * t + 1.1738352509991666680e-6; + q = t + 2.2859981272422905412e+0; + q = q * t + 4.3859045256449554654e+0; + q = q * t + 4.6632960348736635331e+0; + q = q * t + 3.9846608184671757296e+0; + q = q * t + 1.6068377709719017609e+0; + q = q * t + 3.5609087305900265560e-1; + q = q * t + 5.3963550303200816744e-2; + q = q * t + 4.3873424022706935023e-3; + q = q * t + 1.3858762165532246059e-4; + q = q * t + 1.1738313872397777529e-6; + t = p / (q * t); + } + else { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 82 + */ + double p, q; + t = log(a); + t = 1.0 / sqrt(-t); + p = 6.9952990607058154858e-1; + p = p * t + 1.9507620287580568829e+0; + p = p * t + 8.2810030904462690216e-1; + p = p * t + 1.1279046353630280005e-1; + p = p * t + 6.0537914739162189689e-3; + p = p * t + 1.3714329569665128933e-4; + p = p * t + 1.2964481560643197452e-6; + p = p * t + 4.6156006321345332510e-9; + p = p * t + 4.5344689563209398450e-12; + q = t + 1.5771922386662040546e+0; + q = q * t + 2.1238242087454993542e+0; + q = q * t + 8.4001814918178042919e-1; + q = q * t + 1.1311889334355782065e-1; + q = q * t + 6.0574830550097140404e-3; + q = q * t + 1.3715891988350205065e-4; + q = q * t + 1.2964671850944981713e-6; + q = q * t + 4.6156017600933592558e-9; + q = q * t + 4.5344687377088206783e-12; + t = p / (q * t); + } + return t; +} + +__func__(double normcdfinv(const double a)) +{ + return -1.4142135623730951 * erfcinv(a + a); +} + +__func__(double normcdf(double a)) +{ + double ah, al, t1, t2, u1, u2, v1, v2, z; + if (fabs (a) > 38.5) a = copysign (38.5, a); + ah = a * 134217729.0; + u1 = (a - ah) + ah; + u2 = a - u1; + v1 = -7.0710678398609161e-01; + v2 = 2.7995440410322203e-09; + t1 = a * -CUDART_SQRT_HALF_HI; + t2 = (((u1 * v1 - t1) + u1 * v2) + u2 * v1) + u2 * v2; + t2 = (a * -CUDART_SQRT_HALF_LO) + t2; + ah = t1 + t2; + z = erfc (ah); + if (a < -1.0) { + al = (t1 - ah) + t2; + t1 = -2.0 * ah * z; + z = t1 * al + z; + } + return 0.5 * z; +} + +__func__(double erfcx(const double a)) +{ + double x, t1, t2, t3; + + if (__isnan(a)) { + return a + a; + } + x = fabs(a); + if (x < 32.0) { + /* + * This implementation of erfcx() is based on the algorithm in: M. M. + * Shepherd and J. G. Laframboise, "Chebyshev Approximation of (1 + 2x) + * exp(x^2)erfc x in 0 <= x < INF", Mathematics of Computation, Vol. + * 36, No. 153, January 1981, pp. 249-253. For the core approximation, + * the input domain [0,INF] is transformed via (x-k) / (x+k) where k is + * a precision-dependent constant. Here, we choose k = 4.0, so the input + * domain [0, 27.3] is transformed into the core approximation domain + * [-1, 0.744409]. + */ + /* + // Compute (1+2*x)*exp(x*x)*erfc(x) + */ + /* t2 = (x-4.0)/(x+4.0), transforming [0,INF] to [-1,+1] */ + t1 = x - 4.0; + t2 = x + 4.0; + t2 = t1 / t2; + /* approximate on [-1, 0.744409] */ + t1 = - 3.5602694826817400E-010; + t1 = t1 * t2 - 9.7239122591447274E-009; + t1 = t1 * t2 - 8.9350224851649119E-009; + t1 = t1 * t2 + 1.0404430921625484E-007; + t1 = t1 * t2 + 5.8806698585341259E-008; + t1 = t1 * t2 - 8.2147414929116908E-007; + t1 = t1 * t2 + 3.0956409853306241E-007; + t1 = t1 * t2 + 5.7087871844325649E-006; + t1 = t1 * t2 - 1.1231787437600085E-005; + t1 = t1 * t2 - 2.4399558857200190E-005; + t1 = t1 * t2 + 1.5062557169571788E-004; + t1 = t1 * t2 - 1.9925637684786154E-004; + t1 = t1 * t2 - 7.5777429182785833E-004; + t1 = t1 * t2 + 5.0319698792599572E-003; + t1 = t1 * t2 - 1.6197733895953217E-002; + t1 = t1 * t2 + 3.7167515553018733E-002; + t1 = t1 * t2 - 6.6330365827532434E-002; + t1 = t1 * t2 + 9.3732834997115544E-002; + t1 = t1 * t2 - 1.0103906603555676E-001; + t1 = t1 * t2 + 6.8097054254735140E-002; + t1 = t1 * t2 + 1.5379652102605428E-002; + t1 = t1 * t2 - 1.3962111684056291E-001; + t1 = t1 * t2 + 1.2329951186255526E+000; + /* + // Note: (1+2*x)*exp(x*x)*erfc(x) / (1+2*x) = exp(x*x)*erfc(x) + */ + t2 = 2.0 * x + 1.0; + t1 = t1 / t2; + } else { + /* asymptotic expansion for large aguments */ + t2 = 1.0 / x; + t3 = t2 * t2; + t1 = -29.53125; + t1 = t1 * t3 + 6.5625; + t1 = t1 * t3 - 1.875; + t1 = t1 * t3 + 0.75; + t1 = t1 * t3 - 0.5; + t1 = t1 * t3 + 1.0; + t2 = t2 * 5.6418958354775628e-001; + t1 = t1 * t2; + } + if (a < 0.0) { + /* + // Note: erfcx(x) = 2*exp(x^2) - erfcx(|x|) + */ + t2 = (static_cast(x * 16.0)) * 0.0625; + t3 = (x - t2) * (x + t2); + t3 = exp(t2 * t2) * exp(t3); + t3 = t3 + t3; + t1 = t3 - t1; + } + return t1; +} + +__func__(float rsqrtf(const float a)) +{ + return static_cast(rsqrt(static_cast(a))); +} + +__func__(float rcbrtf(const float a)) +{ + return static_cast(rcbrt(static_cast(a))); +} + +__func__(float sinpif(const float a)) +{ + return static_cast(sinpi(static_cast(a))); +} + +__func__(float cospif(const float a)) +{ + return static_cast(cospi(static_cast(a))); +} + +__func__(void sincospif(const float a, float *sptr, float *cptr)) +{ + double s, c; + + sincospi(static_cast(a), &s, &c); + *sptr = static_cast(s); + *cptr = static_cast(c); +} + +__func__(float erfinvf(const float a)) +{ + return static_cast(erfinv(static_cast(a))); +} + +__func__(float erfcinvf(const float a)) +{ + return static_cast(erfcinv(static_cast(a))); +} + +__func__(float normcdfinvf(const float a)) +{ + return static_cast(normcdfinv(static_cast(a))); +} + +__func__(float normcdff(const float a)) +{ + return static_cast(normcdf(static_cast(a))); +} + +__func__(float erfcxf(const float a)) +{ + return static_cast(erfcx(static_cast(a))); +} + +#if defined(_WIN32) + +#pragma warning(default: 4211) + +#endif /* _WIN32 */ + +#endif /* !__CUDACC__ */ + +#endif /* !__MATH_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.h new file mode 100644 index 0000000000000000000000000000000000000000..3a85d6374eca408010f1221b698344b2cfb1ac2c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.h @@ -0,0 +1,754 @@ +/* + * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead.") +#else +#warning "crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__ +#endif + +#if !defined(__CUDA_MMA_H__) +#define __CUDA_MMA_H__ + +#include +#include + +#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 +#define __CUDA_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 +#define __CUDA_SUBBYTE_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#define __CUDA_AMPERE_MMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */ + +namespace nvcuda { +namespace wmma { + + // utility functions +#ifdef __CUDA_AMPERE_MMA__ + inline __device__ float __float_to_tf32(float in) + { + float ret; + asm("{\n .reg .b32 __$1;" + "\n cvt.rna.tf32.f32 __$1, %1;" + "\n mov.b32 %0, __$1;\n}\n" : "=f"(ret) : "f"(in) ); + return ret; + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // tags + // + struct row_major; + struct col_major; + struct matrix_a; + struct matrix_b; + struct accumulator; + +#ifdef __CUDA_AMPERE_MMA__ + namespace precision { + struct tf32; + } +#endif /* __CUDA_AMPERE_MMA__ */ +#ifdef __CUDA_SUBBYTE_IMMA__ + namespace experimental { + namespace precision { + struct u4; // 4-bit unsigned + struct s4; // 4-bit signed + struct b1; // 1-bit + } + enum bmmaBitOp { bmmaBitOpXOR = 1 +#ifdef __CUDA_AMPERE_MMA__ + , bmmaBitOpAND = 2 +#endif /* __CUDA_AMPERE_MMA__ */ + }; + enum bmmaAccumulateOp { bmmaAccumulateOpPOPC = 1 }; + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + // + // layout + // + enum layout_t { + mem_row_major, mem_col_major + }; + + template + struct helper_traits { + typedef T element_type; + typedef T storage_element_type; + typedef T fill_argument_type; + }; + +#ifdef __CUDA_SUBBYTE_IMMA__ + template<> struct helper_traits { + typedef experimental::precision::u4 element_type; + typedef unsigned int storage_element_type; + typedef unsigned int fill_argument_type; + }; + + template<> struct helper_traits { + typedef experimental::precision::s4 element_type; + typedef int storage_element_type; + typedef int fill_argument_type; + }; + + template<> struct helper_traits { + typedef experimental::precision::b1 element_type; + typedef unsigned int storage_element_type; + typedef unsigned int fill_argument_type; + }; +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> struct helper_traits { + typedef precision::tf32 element_type; + typedef float storage_element_type; + typedef float fill_argument_type; + }; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // The base fragment type + // + /* note: alignment required for compiler implementation */ + template + struct __align__(8) __frag_base { + + /* Number of elements in the fragment */ + enum {num_elements = size}; + + /* Number of storage elements in the fragment. + + The elements of the fragment are packed together when the + fragment element type is experimental::precision::u4, + experimental::precision::s4 or experimental::precision::b1. + When elements are packed, num_storage_elements + will be smaller than num_elements. + */ + enum {num_storage_elements = packed_size}; + + /* element type of the fragment */ + typedef T element_type; + + /* element type of the storage representation. + + The mapping from element_type to storage_element_type is as follows: + experimental::precision::u4 -> unsigned (8 elements in 1 storage element) + experimental::precision::s4 -> int (8 elements in 1 storage element) + experimental::precision::b1 -> unsigned (32 elements in 1 storage element) + precision::tf32 -> float (1 element in 1 storage element) + all other types T -> T + */ + typedef typename helper_traits::storage_element_type storage_element_type; + + /* Storage for the (possibly packed) fragment elements. */ + storage_element_type x[num_storage_elements]; + }; + + template + static inline __device__ StorageType __get_storage_value(ArgType in) { return in; } + +#ifdef __CUDA_SUBBYTE_IMMA__ + template<> + __device__ inline unsigned + __get_storage_value(unsigned in) + { + /* For experimental::precision::u4 fragment element type, pack 8 elements into a single + 32-bit unsigned int storage element */ + unsigned val = in & 0xf; + return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) | + (val << 20) | (val << 24) | (val << 28)); + }; + + template<> + __device__ inline int + __get_storage_value(int in) + { + /* For experimental::precision::s4 fragment element type, pack 8 elements into a single + 32-bit signed int storage element */ + int val = in & 0xf; + return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) | + (val << 20) | (val << 24) | (val << 28)); + }; + + template<> + __device__ inline unsigned + __get_storage_value(unsigned in) + { + /* For experimental::precision::b1 fragment element type, pack 32 elements into a + single 32-bit unsigned int storage element */ + return (in & 0x1) ? 0xFFFFFFFFU : 0; + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + template + __CUDA_MMA_DEVICE_DECL__ void fill_fragment(__frag_base& f, + /* The mapping from fragment element type (FragEleType) to fill_argument_type is: + experimental::precision::u4 -> unsigned (only lower 4 bits taken) + experimental::precision::s4 -> int (only lower 4 bits taken) + experimental::precision::b1 -> unsigned (only lowest 1 bit taken) + precision::tf32 -> float + all other types T -> T + */ + const typename helper_traits::fill_argument_type & in) { + + /* get the (possibly packed) storage element value. See the specializations above for fragment + element types where the storage representation is packed */ + typedef typename helper_traits::storage_element_type storage_type; + storage_type v = __get_storage_value(in); +#pragma unroll + for (int i=0; i< f.num_storage_elements; i++) + f.x[i] = v; + } + + // + // Fragment template + // + template class fragment; + + // + // Fragments for 16x16x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Fragments for 32x8x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Fragments for 8x32x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Fragments for 8x8x32 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + + // + // Fragments for 8x8x128 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // Fragments for 16x16x8 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + + // + // Fragments for 8x8x4 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Load functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Load functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Load functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Load functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + +#ifdef __CUDA_AMPERE_MMA__ + // + // Load functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Load functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Store functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Store functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Store functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // Store functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Store functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // MMA functions for shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + + + // + // MMA functions for shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, + experimental::bmmaBitOp = experimental::bmmaBitOpXOR, + experimental::bmmaAccumulateOp = experimental::bmmaAccumulateOpPOPC) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // MMA functions for shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + + // + // MMA functions for shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ +}; +}; + +#undef __DEF_IF_HOST +#undef __CUDA_IMMA__ +#undef __CUDA_SUBBYTE_IMMA__ +#undef __CUDA_AMPERE_MMA__ +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __CUDA_MMA_DEVICE_DECL__ + +#if defined(__CUDA_ARCH__) +#include "mma.hpp" +#endif /* defined(__CUDA_ARCH__) */ + + +#endif /* !__CUDA_MMA_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..bd34ec210fa7eb861a3aacbd27090e8c420f9c5f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.h @@ -0,0 +1,137 @@ +/* + * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__ +#endif + +#if !defined(__SM_70_RT_H__) +#define __SM_70_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_70_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_70_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +/****************************************************************************** + * match * + ******************************************************************************/ +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) __DEF_IF_HOST + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) __DEF_IF_HOST + +__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) __DEF_IF_HOST + +__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_70_RT_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_70_rt.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_70_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__ +#endif + + +#undef EXCLUDE_FROM_RTC \ No newline at end of file diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..322496587325a1387e4280a509455e3ccc7caa1b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.hpp @@ -0,0 +1,192 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__ +#endif + +#if !defined(__SM_70_RT_HPP__) +#define __SM_70_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_70_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_70_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-7.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// +// __match_any_sync +// +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) { + return __match32_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) { + return __match32_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) { + return (sizeof(long) == sizeof(long long)) ? + __match64_any_sync(mask, (unsigned long long)value): + __match32_any_sync(mask, (unsigned)value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) { + return (sizeof(long) == sizeof(long long)) ? + __match64_any_sync(mask, (unsigned long long)value): + __match32_any_sync(mask, (unsigned)value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) { + return __match64_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) { + return __match64_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) { + return __match32_any_sync(mask, __float_as_uint(value)); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) { + return __match64_any_sync(mask, __double_as_longlong(value)); +} + +// +// __match_all_sync +// +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) { + return __match32_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) { + return __match32_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) { + return (sizeof(long) == sizeof(long long)) ? + __match64_all_sync(mask, (unsigned long long)value, pred): + __match32_all_sync(mask, (unsigned)value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) { + return (sizeof(long) == sizeof(long long)) ? + __match64_all_sync(mask, (unsigned long long)value, pred): + __match32_all_sync(mask, (unsigned)value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) { + return __match64_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) { + return __match64_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) { + return __match32_all_sync(mask, __float_as_uint(value), pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) { + return __match64_all_sync(mask, __double_as_longlong(value), pred); +} + +__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) { + asm volatile("nanosleep.u32 %0;" :: "r"(ns)); +} + + +extern "C" __device__ __device_builtin__ +unsigned short __usAtomicCAS(unsigned short *, unsigned short, unsigned short); + +__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) { + return __usAtomicCAS(address, compare, val); +} + + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_70_RT_DECL__ + +#endif /* !__SM_70_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..42c69291a7101eb4dfc4b2303b4580c5765cb8b6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.h @@ -0,0 +1,162 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__ +#endif + +#if !defined(__SM_80_RT_H__) +#define __SM_80_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_80_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_80_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC +/****************************************************************************** + * reduce * + ******************************************************************************/ +__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) __DEF_IF_HOST + +__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) __DEF_IF_HOST + +__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) __DEF_IF_HOST + +#undef EXCLUDE_FROM_RTC + + +extern "C" { +inline __device__ void *__nv_associate_access_property(const void *ptr, + unsigned long long property) { + extern __device__ void *__nv_associate_access_property_impl(const void *, + unsigned long long); + return __nv_associate_access_property_impl(ptr, property); +} + +inline __device__ void __nv_memcpy_async_shared_global_4(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_4_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_4_impl(dst, src, src_size); +} + +inline __device__ void __nv_memcpy_async_shared_global_8(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_8_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_8_impl(dst, src, src_size); +} + +inline __device__ void __nv_memcpy_async_shared_global_16(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_16_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_16_impl(dst, src, src_size); +} + +} +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_80_RT_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_80_rt.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_80_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..857bd44a3bb0d8480560047a85f9059bc370b52f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.hpp @@ -0,0 +1,148 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__ +#endif + +#if !defined(__SM_80_RT_HPP__) +#define __SM_80_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_80_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_80_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-8.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +extern "C" { + __device_builtin__ __device__ unsigned __reduce_add_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_min_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_max_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ int __reduce_add_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ int __reduce_min_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ int __reduce_max_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ unsigned __reduce_or_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_and_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_xor_sync_unsigned_impl(unsigned, unsigned); +} + +__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) { + return __reduce_add_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) { + return __reduce_min_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) { + return __reduce_max_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) { + return __reduce_add_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) { + return __reduce_min_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) { + return __reduce_max_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) { + return __reduce_and_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) { + return __reduce_or_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) { + return __reduce_xor_sync_unsigned_impl(mask, value); +} +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_80_RT_DECL__ + +#endif /* !__SM_80_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..b3e3d38e42037ed31c42aa220c9c66a012dd51a1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.h @@ -0,0 +1,259 @@ +/* + * Copyright 2022-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__ +#endif + +#if !defined(__SM_90_RT_H__) +#define __SM_90_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_90_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_90_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#ifndef __CUDA_ARCH__ +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, unsigned target_block_rank) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, unsigned cluster_cta_mask) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterDim() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterIdx() __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_arrive() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_wait() __DEF_IF_HOST +__SM_90_RT_DECL__ void __threadfence_cluster() __DEF_IF_HOST + +__SM_90_RT_DECL__ float2 atomicAdd(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd(float4 *__address, float4 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *__address, float4 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *__address, float4 val) __DEF_IF_HOST + +#undef EXCLUDE_FROM_RTC + +//Note: below atomic functions are templates, so cannot be represented in NVRTC +//builtins representation, so they have to be parsed on every NVRTC compilation. +//(notice 'EXCLUDE_FROM_RTC' ends above) + + +#ifndef __NV_DISABLE_128_ATOMICS +// lgen definitions for 128b atomics +extern "C" { + __device__ __device_builtin__ void __u128AtomicCAS(void *, void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicCAS_block(void *, void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicExch(void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicExch_block(void *, void *, void *); +} + +// macro to get address of object, to workaround situations where the type overloads the "&" operator +#define __NV_ATOMIC_ADDRESSOF(__val) \ + (void *)(&(const_cast(reinterpret_cast(__val)))) + +// enable_if +template +struct __nv_atomic_enable_if { }; + +template +struct __nv_atomic_enable_if { typedef _T __type; }; + +// alignof +#if defined(__CUDACC_RTC__) +#define __NV_ATOMIC_ALIGNOF __alignof__ +#else +#define __NV_ATOMIC_ALIGNOF __alignof +#endif + +// trivially copyable +template +struct __nv_atomic_triv_cp_helper { +#if defined(__GNUC__) +#if (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) + static const bool __val = true; +#elif (__GNUC__ < 5) + static const bool __val = __has_trivial_copy(_T); +#else + static const bool __val = __is_trivially_copyable(_T); +#endif +#else + static const bool __val = __is_trivially_copyable(_T); +#endif +}; +#define __NV_ATOMIC_TRIVIALLY_COPYABLE(_T) \ + __nv_atomic_triv_cp_helper<_T>::__val + +// return type +#if __cplusplus >= 202002L // C++20 or greater +#define __NV_ATOMIC_RET_TYPE(_T) _T +#else +#define __NV_ATOMIC_RET_TYPE(_T) typename \ + __nv_atomic_enable_if= 16 && \ + __NV_ATOMIC_TRIVIALLY_COPYABLE(_T), _T>::__type +#endif + +// requires +#if __cplusplus >= 202002L // C++20 or greater +#define __NV_ATOMIC_REQUIRES(_T) \ + requires(sizeof(_T) == 16 && \ + __NV_ATOMIC_ALIGNOF(_T) >= 16 && \ + __NV_ATOMIC_TRIVIALLY_COPYABLE(_T)) +#else +#define __NV_ATOMIC_REQUIRES(_T) +#endif + +// temp value and return value +#if __cplusplus >= 201103L || defined(_MSC_VER) // C++11 or greater, or MSC +#define __NV_ATOMIC_TEMP(_T) union _U \ + {_T __ret; __device__ __inline__ _U() {}}; _U __u +#define __NV_ATOMIC_RET(_T) __u.__ret +#else +#define __NV_ATOMIC_TEMP(_T) _T __ret +#define __NV_ATOMIC_RET(_T) __ret +#endif + +// templated 128-bit atomics +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicCAS(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicCAS((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__compare), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicCAS_block(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicCAS_block((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__compare), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicExch(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicExch((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicExch_block(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicExch_block((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} +#endif /* !__NV_DISABLE_128_ATOMICS */ + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_90_RT_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_90_rt.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_90_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__ +#endif + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/storage_class.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/storage_class.h new file mode 100644 index 0000000000000000000000000000000000000000..1fb19bd46ebde4a53dfad866050fad9fb0cbd222 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/storage_class.h @@ -0,0 +1,142 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__ +#endif + +#if !defined(__STORAGE_CLASS_H__) +#define __STORAGE_CLASS_H__ + +#if !defined(__var_used__) + +#define __var_used__ + +#endif /* __var_used__ */ + +#if !defined(__loc_sc__) + +#define __loc_sc__(loc, size, sc) \ + __storage##_##sc##size##loc loc + +#endif /* !__loc_sc__ */ + +#if !defined(__storage___device__) +#define __storage___device__ static __var_used__ +#endif /* __storage___device__ */ + +#if !defined(__storage_extern__device__) +#define __storage_extern__device__ static __var_used__ +#endif /* __storage_extern__device__ */ + +#if !defined(__storage_auto__device__) +#define __storage_auto__device__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__device__ */ + +#if !defined(__storage_static__device__) +#define __storage_static__device__ static __var_used__ +#endif /* __storage_static__device__ */ + +#if !defined(__storage___constant__) +#define __storage___constant__ static __var_used__ +#endif /* __storage___constant__ */ + +#if !defined(__storage_extern__constant__) +#define __storage_extern__constant__ static __var_used__ +#endif /* __storage_extern__constant__ */ + +#if !defined(__storage_auto__constant__) +#define __storage_auto__constant__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__constant__ */ + +#if !defined(__storage_static__constant__) +#define __storage_static__constant__ static __var_used__ +#endif /* __storage_static__constant__ */ + +#if !defined(__storage___shared__) +#define __storage___shared__ static __var_used__ +#endif /* __storage___shared__ */ + +#if !defined(__storage_extern__shared__) +#define __storage_extern__shared__ static __var_used__ +#endif /* __storage_extern__shared__ */ + +#if !defined(__storage_auto__shared__) +#define __storage_auto__shared__ static +#endif /* __storage_auto__shared__ */ + +#if !defined(__storage_static__shared__) +#define __storage_static__shared__ static __var_used__ +#endif /* __storage_static__shared__ */ + +#if !defined(__storage__unsized__shared__) +#define __storage__unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage__unsized__shared__ */ + +#if !defined(__storage_extern_unsized__shared__) +#define __storage_extern_unsized__shared__ static __var_used__ +#endif /* __storage_extern_unsized__shared__ */ + +#if !defined(__storage_auto_unsized__shared__) +#define __storage_auto_unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto_unsized__shared__ */ + +#if !defined(__storage_static_unsized__shared__) +#define __storage_static_unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_static_unsized__shared__ */ + +#if !defined(__storage___text__) +#define __storage___text__ static __var_used__ +#endif /* __storage___text__ */ + +#if !defined(__storage_extern__text__) +#define __storage_extern__text__ static __var_used__ +#endif /* __storage_extern__text__ */ + +#if !defined(__storage_auto__text__) +#define __storage_auto__text__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__text__ */ + +#if !defined(__storage_static__text__) +#define __storage_static__text__ static __var_used__ +#endif /* __storage_static__text__ */ + +#if !defined(__storage___surf__) +#define __storage___surf__ static __var_used__ +#endif /* __storage___surf__ */ + +#if !defined(__storage_extern__surf__) +#define __storage_extern__surf__ static __var_used__ +#endif /* __storage_extern__surf__ */ + +#if !defined(__storage_auto__surf__) +#define __storage_auto__surf__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__surf__ */ + +#if !defined(__storage_static__surf__) +#define __storage_static__surf__ static __var_used__ +#endif /* __storage_static__surf__ */ + +#endif /* !__STORAGE_CLASS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h new file mode 100644 index 0000000000000000000000000000000000000000..6beb6588c441e2ad2ecf35dd91dea24e266a3d5b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h @@ -0,0 +1,96 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MEMORY_ADDRESSOF_H +#define _LIBCUDACXX___MEMORY_ADDRESSOF_H + +#ifndef __cuda_std__ +#include <__config> +#endif //__cuda_std__ + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// addressof +// NVCXX has the builtin defined but did not mark it as supported +#if defined(_LIBCUDACXX_ADDRESSOF) + +template +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 +_LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY +_Tp* +addressof(_Tp& __x) _NOEXCEPT +{ + return __builtin_addressof(__x); +} + +#else + +template +inline _LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY +_Tp* +addressof(_Tp& __x) _NOEXCEPT +{ + return reinterpret_cast<_Tp *>( + const_cast(&reinterpret_cast(__x))); +} + +#endif // defined(_LIBCUDACXX_ADDRESSOF) + +#if defined(_LIBCUDACXX_HAS_OBJC_ARC) && !defined(_LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF) +// Objective-C++ Automatic Reference Counting uses qualified pointers +// that require special addressof() signatures. When +// _LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF is defined, the compiler +// itself is providing these definitions. Otherwise, we provide them. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__strong _Tp* +addressof(__strong _Tp& __x) _NOEXCEPT +{ + return &__x; +} + +#ifdef _LIBCUDACXX_HAS_OBJC_ARC_WEAK +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__weak _Tp* +addressof(__weak _Tp& __x) _NOEXCEPT +{ + return &__x; +} +#endif + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__autoreleasing _Tp* +addressof(__autoreleasing _Tp& __x) _NOEXCEPT +{ + return &__x; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__unsafe_unretained _Tp* +addressof(__unsafe_unretained _Tp& __x) _NOEXCEPT +{ + return &__x; +} +#endif + +#if !defined(_LIBCUDACXX_CXX03_LANG) +template _Tp* addressof(const _Tp&&) noexcept = delete; +#endif + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX___MEMORY_ADDRESSOF_H diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h new file mode 100644 index 0000000000000000000000000000000000000000..44b747efd9a7ac9e45931ec22b808b79fdf6f924 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h @@ -0,0 +1,209 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H +#define _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H + +#ifndef __cuda_std__ +#include <__config> +#endif //__cuda_std__ + +#include "../__assert" +#include "../__iterator/access.h" +#include "../__memory/addressof.h" +#include "../__memory/voidify.h" +#include "../__type_traits/enable_if.h" +#include "../__type_traits/is_array.h" +#include "../__type_traits/is_constant_evaluated.h" +#include "../__type_traits/is_trivially_move_assignable.h" +#include "../__type_traits/is_trivially_constructible.h" +#include "../__utility/forward.h" +#include "../__utility/move.h" + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17 // need to backfill ::std::construct_at +#ifndef _LIBCUDACXX_COMPILER_NVRTC +#include +#endif // _LIBCUDACXX_COMPILER_NVRTC + +#ifndef __cpp_lib_constexpr_dynamic_alloc +namespace std { +template ()) _Tp(_CUDA_VSTD::declval<_Args>()...))> +_LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp* construct_at(_Tp* __location, _Args&&... __args) { +#if defined(_LIBCUDACXX_ADDRESSOF) + return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...); +#else + return ::new (const_cast(static_cast(__location))) _Tp(_CUDA_VSTD::forward<_Args>(__args)...); +#endif +} +} // namespace std +#endif // __cpp_lib_constexpr_dynamic_alloc +#endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// There is a performance issue with placement new, where EDG based compiler insert a nullptr check that is superfluous +// Because this is a noticable performance regression, we specialize for trivially constructible types +// This is possible because we are calling ::new ignoring any user defined overloads of operator placement new + +// construct_at +#if _LIBCUDACXX_STD_VER > 17 + +_LIBCUDACXX_DISABLE_EXEC_CHECK +template ()) _Tp(_CUDA_VSTD::declval<_Args>()...))> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +__enable_if_t || + !is_trivially_move_assignable_v<_Tp>, _Tp*> +construct_at(_Tp* __location, _Args&&... __args) { + _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at"); +#if defined(__cuda_std__) + // Need to go through `std::construct_at` as that is the explicitly blessed function + if (__libcpp_is_constant_evaluated()) { + return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...); + } +#endif // __cuda_std__ + return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...); +} + +_LIBCUDACXX_DISABLE_EXEC_CHECK +template ()) _Tp(_CUDA_VSTD::declval<_Args>()...))> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +__enable_if_t && + is_trivially_move_assignable_v<_Tp>, _Tp*> +construct_at(_Tp* __location, _Args&&... __args) { + _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at"); +#if defined(__cuda_std__) + // Need to go through `std::construct_at` as that is the explicitly blessed function + if (__libcpp_is_constant_evaluated()) { + return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...); + } + *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...}; + return __location; +#else // ^^^ __cuda_std__ ^^^ / vvv !__cuda_std__ vvv + // NVCC always considers construction + move assignment, other compilers are smarter using copy construction + // So rather than adding all kinds of workarounds simply fall back to the correct implementation for libcxx mode + return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...); +#endif // !__cuda_std__ +} + +#endif // _LIBCUDACXX_STD_VER > 17 + +_LIBCUDACXX_DISABLE_EXEC_CHECK +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +__enable_if_t +__construct_at(_Tp* __location, _Args&&... __args) { + _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at"); +#if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17 + // Need to go through `std::construct_at` as that is the explicitly blessed function + if (__libcpp_is_constant_evaluated()) { + return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...); + } +#endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17 + return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...); +} + +_LIBCUDACXX_DISABLE_EXEC_CHECK +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +__enable_if_t<_LIBCUDACXX_TRAIT(is_trivially_constructible, _Tp, _Args...) && _LIBCUDACXX_TRAIT(is_trivially_move_assignable, _Tp), _Tp*> +__construct_at(_Tp* __location, _Args&&... __args) { + _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at"); +#if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17 + // Need to go through `std::construct_at` as that is the explicitly blessed function + if (__libcpp_is_constant_evaluated()) { + return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...); + } +#endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17 + *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...}; + return __location; +} + +// destroy_at + +// The internal functions are available regardless of the language version (with the exception of the `__destroy_at` +// taking an array). +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +_ForwardIterator __destroy(_ForwardIterator, _ForwardIterator); + +template ::value, int> = 0> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +void __destroy_at(_Tp* __loc) { + _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at"); + __loc->~_Tp(); +} + +#if _LIBCUDACXX_STD_VER > 17 +template ::value, int> = 0> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +void __destroy_at(_Tp* __loc) { + _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at"); + _CUDA_VSTD::__destroy(_CUDA_VSTD::begin(*__loc), _CUDA_VSTD::end(*__loc)); +} +#endif + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +_ForwardIterator __destroy(_ForwardIterator __first, _ForwardIterator __last) { + for (; __first != __last; ++__first) + _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first)); + return __first; +} + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +_BidirectionalIterator __reverse_destroy(_BidirectionalIterator __first, _BidirectionalIterator __last) { + while (__last != __first) { + --__last; + _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__last)); + } + return __last; +} + +#if _LIBCUDACXX_STD_VER > 14 + +template , int> = 0> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +void destroy_at(_Tp* __loc) { + _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at"); + __loc->~_Tp(); +} + +#if _LIBCUDACXX_STD_VER > 17 +template , int> = 0> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +void destroy_at(_Tp* __loc) { + _CUDA_VSTD::__destroy_at(__loc); +} +#endif // _LIBCUDACXX_STD_VER > 17 + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +void destroy(_ForwardIterator __first, _ForwardIterator __last) { + (void)_CUDA_VSTD::__destroy(_CUDA_VSTD::move(__first), _CUDA_VSTD::move(__last)); +} + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +_ForwardIterator destroy_n(_ForwardIterator __first, _Size __n) { + for (; __n > 0; (void)++__first, --__n) + _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first)); + return __first; +} + +#endif // _LIBCUDACXX_STD_VER > 14 + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..b3fa8b6af63cb9e59f001e0999057ecd57050667 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h @@ -0,0 +1,380 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MEMORY_POINTER_TRAITS_H +#define _LIBCUDACXX___MEMORY_POINTER_TRAITS_H + +#ifndef __cuda_std__ +#include <__config> +#endif //__cuda_std__ + +#include "../__memory/addressof.h" +#include "../__type_traits/conjunction.h" +#include "../__type_traits/conditional.h" +#include "../__type_traits/decay.h" +#include "../__type_traits/enable_if.h" +#include "../__type_traits/integral_constant.h" +#include "../__type_traits/is_class.h" +#include "../__type_traits/is_function.h" +#include "../__type_traits/is_void.h" +#include "../__type_traits/void_t.h" +#include "../__utility/declval.h" +#include "../cstddef" + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template +struct __has_element_type : false_type {}; + +template +struct __has_element_type<_Tp, + __void_t> : true_type {}; + +template ::value> +struct __pointer_traits_element_type; + +template +struct __pointer_traits_element_type<_Ptr, true> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename _Ptr::element_type type; +}; + +#ifndef _LIBCUDACXX_HAS_NO_VARIADICS + +template