diff --git a/.gitattributes b/.gitattributes index b9ac04c5ee17620ac3ddf9360c4b1837465370e6..2053aeffb0d0da3fceab9e381349ff461288b3e7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -79,3 +79,8 @@ mingw/lib/gcc/mingw32/4.3.3/libstdc++.a filter=lfs diff=lfs merge=lfs -text mingw/bin/windres.exe filter=lfs diff=lfs merge=lfs -text mingw/libexec/gcc/mingw32/4.3.3/cc1plus.exe filter=lfs diff=lfs merge=lfs -text mingw/libexec/gcc/mingw32/4.3.3/cc1.exe filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/cuinj64_124.dll filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/cuobjdump.exe filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/nppicc64_12.dll filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/nppial64_12.dll filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/cudafe++.exe filter=lfs diff=lfs merge=lfs -text diff --git a/miniCUDA124/bin/cudafe++.exe b/miniCUDA124/bin/cudafe++.exe new file mode 100644 index 0000000000000000000000000000000000000000..7c5a518eb5fd84625bcef3f007de09709b08cd7b --- /dev/null +++ b/miniCUDA124/bin/cudafe++.exe @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a782d452c6eff5adc291c1114e978716a8a971271f964830fabe1f5caa8d304 +size 7393280 diff --git a/miniCUDA124/bin/cuinj64_124.dll b/miniCUDA124/bin/cuinj64_124.dll new file mode 100644 index 0000000000000000000000000000000000000000..14c613c0bc12f50efbab5b400b6f62f8ce3a6852 --- /dev/null +++ b/miniCUDA124/bin/cuinj64_124.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dc11bd8c0a2c8e18b454b8e2fcee19cf1f515349d4f8fd149da25e7577a94a8 +size 1513472 diff --git a/miniCUDA124/bin/cuobjdump.exe b/miniCUDA124/bin/cuobjdump.exe new file mode 100644 index 0000000000000000000000000000000000000000..e4e34431adb830ff735afb5a185f1d4c9845d182 --- /dev/null +++ b/miniCUDA124/bin/cuobjdump.exe @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:578644883fbce889a88b5752f3c8616b846f0dac455885cea12a206989bece6a +size 11395584 diff --git a/miniCUDA124/bin/nppial64_12.dll b/miniCUDA124/bin/nppial64_12.dll new file mode 100644 index 0000000000000000000000000000000000000000..262877ff910438cc34552ea0ef144979e5d881d3 --- /dev/null +++ b/miniCUDA124/bin/nppial64_12.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2350fcd7433901e365ac532b68863120276bbb82a0db5f3913be5bb8d6377518 +size 16312832 diff --git a/miniCUDA124/bin/nppicc64_12.dll b/miniCUDA124/bin/nppicc64_12.dll new file mode 100644 index 0000000000000000000000000000000000000000..b2fa04825f3da7219931cf1c9e3f79412fb47447 --- /dev/null +++ b/miniCUDA124/bin/nppicc64_12.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b71840b2fa93878cc0a8e3c76d019436900c157427ea6d6c362ce0213fcb9e53 +size 6125056 diff --git a/miniCUDA124/include/CL/cl_gl.h b/miniCUDA124/include/CL/cl_gl.h new file mode 100644 index 0000000000000000000000000000000000000000..516cf646e1827a18c71c7ba09e457140e9eae831 --- /dev/null +++ b/miniCUDA124/include/CL/cl_gl.h @@ -0,0 +1,372 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_GL_H_ +#define OPENCL_CL_GL_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_gl_sharing +***************************************************************/ +#define cl_khr_gl_sharing 1 +#define CL_KHR_GL_SHARING_EXTENSION_NAME \ + "cl_khr_gl_sharing" + +typedef cl_uint cl_gl_context_info; + +/* Error codes */ +#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000 + +/* cl_gl_context_info */ +#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006 +#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007 + +/* Additional cl_context_properties */ +#define CL_GL_CONTEXT_KHR 0x2008 +#define CL_EGL_DISPLAY_KHR 0x2009 +#define CL_GLX_DISPLAY_KHR 0x200A +#define CL_WGL_HDC_KHR 0x200B +#define CL_CGL_SHAREGROUP_KHR 0x200C + +typedef cl_uint cl_gl_object_type; +typedef cl_uint cl_gl_texture_info; +typedef cl_uint cl_gl_platform_info; + +/* cl_gl_object_type */ +#define CL_GL_OBJECT_BUFFER 0x2000 +#define CL_GL_OBJECT_TEXTURE2D 0x2001 +#define CL_GL_OBJECT_TEXTURE3D 0x2002 +#define CL_GL_OBJECT_RENDERBUFFER 0x2003 + +#if defined(CL_VERSION_1_2) +/* cl_gl_object_type */ +#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E +#define CL_GL_OBJECT_TEXTURE1D 0x200F +#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010 +#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011 + +#endif /* defined(CL_VERSION_1_2) */ + +/* cl_gl_texture_info */ +#define CL_GL_TEXTURE_TARGET 0x2004 +#define CL_GL_MIPMAP_LEVEL 0x2005 + + +typedef cl_int (CL_API_CALL * +clGetGLContextInfoKHR_fn)( + const cl_context_properties* properties, + cl_gl_context_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem (CL_API_CALL * +clCreateFromGLBuffer_fn)( + cl_context context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLContextInfoKHR( + const cl_context_properties* properties, + cl_gl_context_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLBuffer( + cl_context context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#if defined(CL_VERSION_1_2) + +typedef cl_mem (CL_API_CALL * +clCreateFromGLTexture_fn)( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_VERSION_1_2) */ + + +typedef cl_mem (CL_API_CALL * +clCreateFromGLRenderbuffer_fn)( + cl_context context, + cl_mem_flags flags, + cl_GLuint renderbuffer, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int (CL_API_CALL * +clGetGLObjectInfo_fn)( + cl_mem memobj, + cl_gl_object_type* gl_object_type, + cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int (CL_API_CALL * +clGetGLTextureInfo_fn)( + cl_mem memobj, + cl_gl_texture_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int (CL_API_CALL * +clEnqueueAcquireGLObjects_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int (CL_API_CALL * +clEnqueueReleaseGLObjects_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLRenderbuffer( + cl_context context, + cl_mem_flags flags, + cl_GLuint renderbuffer, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLObjectInfo( + cl_mem memobj, + cl_gl_object_type* gl_object_type, + cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLTextureInfo( + cl_mem memobj, + cl_gl_texture_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireGLObjects( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseGLObjects( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/* OpenCL 1.0 APIs that were deprecated in OpenCL 1.2 */ + +typedef cl_mem (CL_API_CALL * +clCreateFromGLTexture2D_fn)( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_mem (CL_API_CALL * +clCreateFromGLTexture3D_fn)( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture2D( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture3D( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_gl_event +***************************************************************/ +#define cl_khr_gl_event 1 +#define CL_KHR_GL_EVENT_EXTENSION_NAME \ + "cl_khr_gl_event" + +typedef struct __GLsync * cl_GLsync; + +/* cl_command_type */ +#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D + + +typedef cl_event (CL_API_CALL * +clCreateEventFromGLsyncKHR_fn)( + cl_context context, + cl_GLsync sync, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateEventFromGLsyncKHR( + cl_context context, + cl_GLsync sync, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_gl_depth_images +***************************************************************/ +#define cl_khr_gl_depth_images 1 +#define CL_KHR_GL_DEPTH_IMAGES_EXTENSION_NAME \ + "cl_khr_gl_depth_images" + +#if !defined(CL_VERSION_1_2) +/* cl_channel_order - defined in CL.h for OpenCL 1.2 and newer */ +#define CL_DEPTH_STENCIL 0x10BE + +#endif /* !defined(CL_VERSION_1_2) */ + +#if !defined(CL_VERSION_1_2) +/* cl_channel_type - defined in CL.h for OpenCL 1.2 and newer */ +#define CL_UNORM_INT24 0x10DF + +#endif /* !defined(CL_VERSION_1_2) */ + +/*************************************************************** +* cl_khr_gl_msaa_sharing +***************************************************************/ +#define cl_khr_gl_msaa_sharing 1 +#define CL_KHR_GL_MSAA_SHARING_EXTENSION_NAME \ + "cl_khr_gl_msaa_sharing" + +/* cl_gl_texture_info */ +#define CL_GL_NUM_SAMPLES 0x2012 + +/*************************************************************** +* cl_intel_sharing_format_query_gl +***************************************************************/ +#define cl_intel_sharing_format_query_gl 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_GL_EXTENSION_NAME \ + "cl_intel_sharing_format_query_gl" + +/* when cl_khr_gl_sharing is supported */ + +typedef cl_int (CL_API_CALL * +clGetSupportedGLTextureFormatsINTEL_fn)( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_GLenum* gl_formats, + cl_uint* num_texture_formats) ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedGLTextureFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_GLenum* gl_formats, + cl_uint* num_texture_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_GL_H_ */ diff --git a/miniCUDA124/include/CL/cl_gl_ext.h b/miniCUDA124/include/CL/cl_gl_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..57a30114981183d7bf39ee1e734b641037b02d4d --- /dev/null +++ b/miniCUDA124/include/CL/cl_gl_ext.h @@ -0,0 +1,18 @@ +/******************************************************************************* + * Copyright (c) 2008-2021 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#include +#pragma message("The extensions in cl_gl_ext.h have been moved into cl_gl.h. Please include cl_gl.h directly.") diff --git a/miniCUDA124/include/CL/cl_platform.h b/miniCUDA124/include/CL/cl_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..358b374c37cc65a59c1523550617c4c2cc9b4fce --- /dev/null +++ b/miniCUDA124/include/CL/cl_platform.h @@ -0,0 +1,1478 @@ +/******************************************************************************* + * Copyright (c) 2008-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __CL_PLATFORM_H +#define __CL_PLATFORM_H + + +#ifndef __CL_VERSION_H +#define __CL_VERSION_H + +/* Detect which version to target */ +#if !defined(CL_TARGET_OPENCL_VERSION) +#pragma message("cl_version.h: CL_TARGET_OPENCL_VERSION is not defined. Defaulting to 300 (OpenCL 3.0)") +#define CL_TARGET_OPENCL_VERSION 300 +#endif +#if CL_TARGET_OPENCL_VERSION != 100 && \ + CL_TARGET_OPENCL_VERSION != 110 && \ + CL_TARGET_OPENCL_VERSION != 120 && \ + CL_TARGET_OPENCL_VERSION != 200 && \ + CL_TARGET_OPENCL_VERSION != 210 && \ + CL_TARGET_OPENCL_VERSION != 220 && \ + CL_TARGET_OPENCL_VERSION != 300 +#pragma message("cl_version: CL_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220, 300). Defaulting to 300 (OpenCL 3.0)") +#undef CL_TARGET_OPENCL_VERSION +#define CL_TARGET_OPENCL_VERSION 300 +#endif + + +/* OpenCL Version */ +#if CL_TARGET_OPENCL_VERSION >= 300 && !defined(CL_VERSION_3_0) +#define CL_VERSION_3_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 220 && !defined(CL_VERSION_2_2) +#define CL_VERSION_2_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 210 && !defined(CL_VERSION_2_1) +#define CL_VERSION_2_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 200 && !defined(CL_VERSION_2_0) +#define CL_VERSION_2_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 120 && !defined(CL_VERSION_1_2) +#define CL_VERSION_1_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 110 && !defined(CL_VERSION_1_1) +#define CL_VERSION_1_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 100 && !defined(CL_VERSION_1_0) +#define CL_VERSION_1_0 1 +#endif + +/* Allow deprecated APIs for older OpenCL versions. */ +#if CL_TARGET_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_0_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_0_APIS +#endif + +#endif /* __CL_VERSION_H */ + + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_WIN32) + #if !defined(CL_API_ENTRY) + #define CL_API_ENTRY + #endif + #if !defined(CL_API_CALL) + #define CL_API_CALL __stdcall + #endif + #if !defined(CL_CALLBACK) + #define CL_CALLBACK __stdcall + #endif +#else + #if !defined(CL_API_ENTRY) + #define CL_API_ENTRY + #endif + #if !defined(CL_API_CALL) + #define CL_API_CALL + #endif + #if !defined(CL_CALLBACK) + #define CL_CALLBACK + #endif +#endif + +/* + * Deprecation flags refer to the last version of the header in which the + * feature was not deprecated. + * + * E.g. VERSION_1_1_DEPRECATED means the feature is present in 1.1 without + * deprecation but is deprecated in versions later than 1.1. + */ + +#ifndef CL_API_SUFFIX_USER +#define CL_API_SUFFIX_USER +#endif + +#ifndef CL_API_PREFIX_USER +#define CL_API_PREFIX_USER +#endif + +#define CL_API_SUFFIX_COMMON CL_API_SUFFIX_USER +#define CL_API_PREFIX_COMMON CL_API_PREFIX_USER + +#define CL_API_SUFFIX__VERSION_1_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_1_1 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_1_2 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_1 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_2 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_3_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__EXPERIMENTAL CL_API_SUFFIX_COMMON + + +#ifdef __GNUC__ + #define CL_API_SUFFIX_DEPRECATED __attribute__((deprecated)) + #define CL_API_PREFIX_DEPRECATED +#elif defined(_WIN32) + #define CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX_DEPRECATED __declspec(deprecated) +#else + #define CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS + #define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS + #define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_2_APIS + #define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED + #endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_0_APIS + #define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_1_APIS + #define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_2_APIS + #define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#if (defined (_WIN32) && defined(_MSC_VER)) + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wlanguage-extension-token" +#endif + +/* intptr_t is used in cl.h and provided by stddef.h in Visual C++, but not in clang */ +/* stdint.h was missing before Visual Studio 2010, include it for later versions and for clang */ +#if defined(__clang__) || _MSC_VER >= 1600 + #include +#endif + +/* scalar types */ +typedef signed __int8 cl_char; +typedef unsigned __int8 cl_uchar; +typedef signed __int16 cl_short; +typedef unsigned __int16 cl_ushort; +typedef signed __int32 cl_int; +typedef unsigned __int32 cl_uint; +typedef signed __int64 cl_long; +typedef unsigned __int64 cl_ulong; + +typedef unsigned __int16 cl_half; +typedef float cl_float; +typedef double cl_double; + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 1.7976931348623158e+308 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.7182818284590452354 +#define CL_M_LOG2E 1.4426950408889634074 +#define CL_M_LOG10E 0.43429448190325182765 +#define CL_M_LN2 0.69314718055994530942 +#define CL_M_LN10 2.30258509299404568402 +#define CL_M_PI 3.14159265358979323846 +#define CL_M_PI_2 1.57079632679489661923 +#define CL_M_PI_4 0.78539816339744830962 +#define CL_M_1_PI 0.31830988618379067154 +#define CL_M_2_PI 0.63661977236758134308 +#define CL_M_2_SQRTPI 1.12837916709551257390 +#define CL_M_SQRT2 1.41421356237309504880 +#define CL_M_SQRT1_2 0.70710678118654752440 + +#define CL_M_E_F 2.718281828f +#define CL_M_LOG2E_F 1.442695041f +#define CL_M_LOG10E_F 0.434294482f +#define CL_M_LN2_F 0.693147181f +#define CL_M_LN10_F 2.302585093f +#define CL_M_PI_F 3.141592654f +#define CL_M_PI_2_F 1.570796327f +#define CL_M_PI_4_F 0.785398163f +#define CL_M_1_PI_F 0.318309886f +#define CL_M_2_PI_F 0.636619772f +#define CL_M_2_SQRTPI_F 1.128379167f +#define CL_M_SQRT2_F 1.414213562f +#define CL_M_SQRT1_2_F 0.707106781f + +#define CL_NAN (CL_INFINITY - CL_INFINITY) +#define CL_HUGE_VALF ((cl_float) 1e50) +#define CL_HUGE_VAL ((cl_double) 1e500) +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#else + +#include + +/* scalar types */ +typedef int8_t cl_char; +typedef uint8_t cl_uchar; +typedef int16_t cl_short; +typedef uint16_t cl_ushort; +typedef int32_t cl_int; +typedef uint32_t cl_uint; +typedef int64_t cl_long; +typedef uint64_t cl_ulong; + +typedef uint16_t cl_half; +typedef float cl_float; +typedef double cl_double; + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.7182818284590452354 +#define CL_M_LOG2E 1.4426950408889634074 +#define CL_M_LOG10E 0.43429448190325182765 +#define CL_M_LN2 0.69314718055994530942 +#define CL_M_LN10 2.30258509299404568402 +#define CL_M_PI 3.14159265358979323846 +#define CL_M_PI_2 1.57079632679489661923 +#define CL_M_PI_4 0.78539816339744830962 +#define CL_M_1_PI 0.31830988618379067154 +#define CL_M_2_PI 0.63661977236758134308 +#define CL_M_2_SQRTPI 1.12837916709551257390 +#define CL_M_SQRT2 1.41421356237309504880 +#define CL_M_SQRT1_2 0.70710678118654752440 + +#define CL_M_E_F 2.718281828f +#define CL_M_LOG2E_F 1.442695041f +#define CL_M_LOG10E_F 0.434294482f +#define CL_M_LN2_F 0.693147181f +#define CL_M_LN10_F 2.302585093f +#define CL_M_PI_F 3.141592654f +#define CL_M_PI_2_F 1.570796327f +#define CL_M_PI_4_F 0.785398163f +#define CL_M_1_PI_F 0.318309886f +#define CL_M_2_PI_F 0.636619772f +#define CL_M_2_SQRTPI_F 1.128379167f +#define CL_M_SQRT2_F 1.414213562f +#define CL_M_SQRT1_2_F 0.707106781f + +#if defined( __GNUC__ ) + #define CL_HUGE_VALF __builtin_huge_valf() + #define CL_HUGE_VAL __builtin_huge_val() + #define CL_NAN __builtin_nanf( "" ) +#else + #define CL_HUGE_VALF ((cl_float) 1e50) + #define CL_HUGE_VAL ((cl_double) 1e500) + float nanf( const char * ); + #define CL_NAN nanf( "" ) +#endif +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#endif + +#include + +/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */ +typedef unsigned int cl_GLuint; +typedef int cl_GLint; +typedef unsigned int cl_GLenum; + +/* + * Vector types + * + * Note: OpenCL requires that all types be naturally aligned. + * This means that vector types must be naturally aligned. + * For example, a vector of four floats must be aligned to + * a 16 byte boundary (calculated as 4 * the natural 4-byte + * alignment of the float). The alignment qualifiers here + * will only function properly if your compiler supports them + * and if you don't actively work to defeat them. For example, + * in order for a cl_float4 to be 16 byte aligned in a struct, + * the start of the struct must itself be 16-byte aligned. + * + * Maintaining proper alignment is the user's responsibility. + */ + +/* Define basic vector types */ +#if defined( __VEC__ ) + #if !defined(__clang__) + #include /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */ + #endif + typedef __vector unsigned char __cl_uchar16; + typedef __vector signed char __cl_char16; + typedef __vector unsigned short __cl_ushort8; + typedef __vector signed short __cl_short8; + typedef __vector unsigned int __cl_uint4; + typedef __vector signed int __cl_int4; + typedef __vector float __cl_float4; + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_UINT4__ 1 + #define __CL_INT4__ 1 + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef float __cl_float4 __attribute__((vector_size(16))); + #else + typedef __m128 __cl_float4; + #endif + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE2__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16))); + typedef cl_char __cl_char16 __attribute__((vector_size(16))); + typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16))); + typedef cl_short __cl_short8 __attribute__((vector_size(16))); + typedef cl_uint __cl_uint4 __attribute__((vector_size(16))); + typedef cl_int __cl_int4 __attribute__((vector_size(16))); + typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16))); + typedef cl_long __cl_long2 __attribute__((vector_size(16))); + typedef cl_double __cl_double2 __attribute__((vector_size(16))); + #else + typedef __m128i __cl_uchar16; + typedef __m128i __cl_char16; + typedef __m128i __cl_ushort8; + typedef __m128i __cl_short8; + typedef __m128i __cl_uint4; + typedef __m128i __cl_int4; + typedef __m128i __cl_ulong2; + typedef __m128i __cl_long2; + typedef __m128d __cl_double2; + #endif + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_INT4__ 1 + #define __CL_UINT4__ 1 + #define __CL_ULONG2__ 1 + #define __CL_LONG2__ 1 + #define __CL_DOUBLE2__ 1 +#endif + +#if defined( __MMX__ ) + #include + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8))); + typedef cl_char __cl_char8 __attribute__((vector_size(8))); + typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8))); + typedef cl_short __cl_short4 __attribute__((vector_size(8))); + typedef cl_uint __cl_uint2 __attribute__((vector_size(8))); + typedef cl_int __cl_int2 __attribute__((vector_size(8))); + typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8))); + typedef cl_long __cl_long1 __attribute__((vector_size(8))); + typedef cl_float __cl_float2 __attribute__((vector_size(8))); + #else + typedef __m64 __cl_uchar8; + typedef __m64 __cl_char8; + typedef __m64 __cl_ushort4; + typedef __m64 __cl_short4; + typedef __m64 __cl_uint2; + typedef __m64 __cl_int2; + typedef __m64 __cl_ulong1; + typedef __m64 __cl_long1; + typedef __m64 __cl_float2; + #endif + #define __CL_UCHAR8__ 1 + #define __CL_CHAR8__ 1 + #define __CL_USHORT4__ 1 + #define __CL_SHORT4__ 1 + #define __CL_INT2__ 1 + #define __CL_UINT2__ 1 + #define __CL_ULONG1__ 1 + #define __CL_LONG1__ 1 + #define __CL_FLOAT2__ 1 +#endif + +#if defined( __AVX__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_float __cl_float8 __attribute__((vector_size(32))); + typedef cl_double __cl_double4 __attribute__((vector_size(32))); + #else + typedef __m256 __cl_float8; + typedef __m256d __cl_double4; + #endif + #define __CL_FLOAT8__ 1 + #define __CL_DOUBLE4__ 1 +#endif + +/* Define capabilities for anonymous struct members. */ +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ +#elif defined(_WIN32) && defined(_MSC_VER) && !defined(__STDC__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ +#elif defined(__GNUC__) && ! defined(__STRICT_ANSI__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ __extension__ +#elif defined(__clang__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ __extension__ +#else +#define __CL_HAS_ANON_STRUCT__ 0 +#define __CL_ANON_STRUCT__ +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + /* Disable warning C4201: nonstandard extension used : nameless struct/union */ + #pragma warning( push ) + #pragma warning( disable : 4201 ) +#endif + +/* Define alignment keys */ +#if defined( __GNUC__ ) || defined(__INTEGRITY) + #define CL_ALIGNED(_x) __attribute__ ((aligned(_x))) +#elif defined( _WIN32) && (_MSC_VER) + /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */ + /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */ + /* #include */ + /* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */ + #define CL_ALIGNED(_x) +#else + #warning Need to implement some method to align data here + #define CL_ALIGNED(_x) +#endif + +/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */ +#if __CL_HAS_ANON_STRUCT__ + /* .xyzw and .s0123...{f|F} are supported */ + #define CL_HAS_NAMED_VECTOR_FIELDS 1 + /* .hi and .lo are supported */ + #define CL_HAS_HI_LO_VECTOR_FIELDS 1 +#endif + +/* Define cl_vector types */ + +/* ---- cl_charn ---- */ +typedef union +{ + cl_char CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_char lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2; +#endif +}cl_char2; + +typedef union +{ + cl_char CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_char2 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[2]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4; +#endif +}cl_char4; + +/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */ +typedef cl_char4 cl_char3; + +typedef union +{ + cl_char CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_char4 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[4]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[2]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8; +#endif +}cl_char8; + +typedef union +{ + cl_char CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_char8 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[8]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[4]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8[2]; +#endif +#if defined( __CL_CHAR16__ ) + __cl_char16 v16; +#endif +}cl_char16; + + +/* ---- cl_ucharn ---- */ +typedef union +{ + cl_uchar CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uchar lo, hi; }; +#endif +#if defined( __cl_uchar2__) + __cl_uchar2 v2; +#endif +}cl_uchar2; + +typedef union +{ + cl_uchar CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uchar2 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[2]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4; +#endif +}cl_uchar4; + +/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */ +typedef cl_uchar4 cl_uchar3; + +typedef union +{ + cl_uchar CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uchar4 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[4]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[2]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8; +#endif +}cl_uchar8; + +typedef union +{ + cl_uchar CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uchar8 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[8]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[4]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8[2]; +#endif +#if defined( __CL_UCHAR16__ ) + __cl_uchar16 v16; +#endif +}cl_uchar16; + + +/* ---- cl_shortn ---- */ +typedef union +{ + cl_short CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_short lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2; +#endif +}cl_short2; + +typedef union +{ + cl_short CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_short2 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[2]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4; +#endif +}cl_short4; + +/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */ +typedef cl_short4 cl_short3; + +typedef union +{ + cl_short CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_short4 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[4]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[2]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8; +#endif +}cl_short8; + +typedef union +{ + cl_short CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_short8 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[8]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[4]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8[2]; +#endif +#if defined( __CL_SHORT16__ ) + __cl_short16 v16; +#endif +}cl_short16; + + +/* ---- cl_ushortn ---- */ +typedef union +{ + cl_ushort CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ushort lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2; +#endif +}cl_ushort2; + +typedef union +{ + cl_ushort CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ushort2 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[2]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4; +#endif +}cl_ushort4; + +/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */ +typedef cl_ushort4 cl_ushort3; + +typedef union +{ + cl_ushort CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ushort4 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[4]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[2]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8; +#endif +}cl_ushort8; + +typedef union +{ + cl_ushort CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ushort8 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[8]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[4]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8[2]; +#endif +#if defined( __CL_USHORT16__ ) + __cl_ushort16 v16; +#endif +}cl_ushort16; + + +/* ---- cl_halfn ---- */ +typedef union +{ + cl_half CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_half lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2; +#endif +}cl_half2; + +typedef union +{ + cl_half CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_half2 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[2]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4; +#endif +}cl_half4; + +/* cl_half3 is identical in size, alignment and behavior to cl_half4. See section 6.1.5. */ +typedef cl_half4 cl_half3; + +typedef union +{ + cl_half CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_half4 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[4]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[2]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8; +#endif +}cl_half8; + +typedef union +{ + cl_half CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_half8 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[8]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[4]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8[2]; +#endif +#if defined( __CL_HALF16__ ) + __cl_half16 v16; +#endif +}cl_half16; + +/* ---- cl_intn ---- */ +typedef union +{ + cl_int CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_int lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2; +#endif +}cl_int2; + +typedef union +{ + cl_int CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_int2 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[2]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4; +#endif +}cl_int4; + +/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */ +typedef cl_int4 cl_int3; + +typedef union +{ + cl_int CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_int4 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[4]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[2]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8; +#endif +}cl_int8; + +typedef union +{ + cl_int CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_int8 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[8]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[4]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8[2]; +#endif +#if defined( __CL_INT16__ ) + __cl_int16 v16; +#endif +}cl_int16; + + +/* ---- cl_uintn ---- */ +typedef union +{ + cl_uint CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uint lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2; +#endif +}cl_uint2; + +typedef union +{ + cl_uint CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uint2 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[2]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4; +#endif +}cl_uint4; + +/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */ +typedef cl_uint4 cl_uint3; + +typedef union +{ + cl_uint CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uint4 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[4]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[2]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8; +#endif +}cl_uint8; + +typedef union +{ + cl_uint CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uint8 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[8]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[4]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8[2]; +#endif +#if defined( __CL_UINT16__ ) + __cl_uint16 v16; +#endif +}cl_uint16; + +/* ---- cl_longn ---- */ +typedef union +{ + cl_long CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_long lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2; +#endif +}cl_long2; + +typedef union +{ + cl_long CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_long2 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[2]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4; +#endif +}cl_long4; + +/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */ +typedef cl_long4 cl_long3; + +typedef union +{ + cl_long CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_long4 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[4]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[2]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8; +#endif +}cl_long8; + +typedef union +{ + cl_long CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_long8 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[8]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[4]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8[2]; +#endif +#if defined( __CL_LONG16__ ) + __cl_long16 v16; +#endif +}cl_long16; + + +/* ---- cl_ulongn ---- */ +typedef union +{ + cl_ulong CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ulong lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2; +#endif +}cl_ulong2; + +typedef union +{ + cl_ulong CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ulong2 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[2]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4; +#endif +}cl_ulong4; + +/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */ +typedef cl_ulong4 cl_ulong3; + +typedef union +{ + cl_ulong CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ulong4 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[4]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[2]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8; +#endif +}cl_ulong8; + +typedef union +{ + cl_ulong CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ulong8 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[8]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[4]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8[2]; +#endif +#if defined( __CL_ULONG16__ ) + __cl_ulong16 v16; +#endif +}cl_ulong16; + + +/* --- cl_floatn ---- */ + +typedef union +{ + cl_float CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_float lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2; +#endif +}cl_float2; + +typedef union +{ + cl_float CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_float2 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[2]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4; +#endif +}cl_float4; + +/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */ +typedef cl_float4 cl_float3; + +typedef union +{ + cl_float CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_float4 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[4]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[2]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8; +#endif +}cl_float8; + +typedef union +{ + cl_float CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_float8 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[8]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[4]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8[2]; +#endif +#if defined( __CL_FLOAT16__ ) + __cl_float16 v16; +#endif +}cl_float16; + +/* --- cl_doublen ---- */ + +typedef union +{ + cl_double CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_double lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2; +#endif +}cl_double2; + +typedef union +{ + cl_double CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_double2 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[2]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4; +#endif +}cl_double4; + +/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */ +typedef cl_double4 cl_double3; + +typedef union +{ + cl_double CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_double4 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[4]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[2]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8; +#endif +}cl_double8; + +typedef union +{ + cl_double CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_double8 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[8]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[4]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8[2]; +#endif +#if defined( __CL_DOUBLE16__ ) + __cl_double16 v16; +#endif +}cl_double16; + +/* Macro to facilitate debugging + * Usage: + * Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source. + * The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \" + * Each line thereafter of OpenCL C source must end with: \n\ + * The last line ends in "; + * + * Example: + * + * const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\ + * kernel void foo( int a, float * b ) \n\ + * { \n\ + * // my comment \n\ + * *b[ get_global_id(0)] = a; \n\ + * } \n\ + * "; + * + * This should correctly set up the line, (column) and file information for your source + * string so you can do source level debugging. + */ +#define __CL_STRINGIFY( _x ) # _x +#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x ) +#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n" + +#ifdef __cplusplus +} +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + #pragma warning( pop ) +#endif + +#endif /* __CL_PLATFORM_H */ diff --git a/miniCUDA124/include/CL/cl_version.h b/miniCUDA124/include/CL/cl_version.h new file mode 100644 index 0000000000000000000000000000000000000000..fcaa84efa39fd3283321186bdc5307112d52e5d3 --- /dev/null +++ b/miniCUDA124/include/CL/cl_version.h @@ -0,0 +1,81 @@ +/******************************************************************************* + * Copyright (c) 2018-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __CL_VERSION_H +#define __CL_VERSION_H + +/* Detect which version to target */ +#if !defined(CL_TARGET_OPENCL_VERSION) +#pragma message("cl_version.h: CL_TARGET_OPENCL_VERSION is not defined. Defaulting to 300 (OpenCL 3.0)") +#define CL_TARGET_OPENCL_VERSION 300 +#endif +#if CL_TARGET_OPENCL_VERSION != 100 && \ + CL_TARGET_OPENCL_VERSION != 110 && \ + CL_TARGET_OPENCL_VERSION != 120 && \ + CL_TARGET_OPENCL_VERSION != 200 && \ + CL_TARGET_OPENCL_VERSION != 210 && \ + CL_TARGET_OPENCL_VERSION != 220 && \ + CL_TARGET_OPENCL_VERSION != 300 +#pragma message("cl_version: CL_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220, 300). Defaulting to 300 (OpenCL 3.0)") +#undef CL_TARGET_OPENCL_VERSION +#define CL_TARGET_OPENCL_VERSION 300 +#endif + + +/* OpenCL Version */ +#if CL_TARGET_OPENCL_VERSION >= 300 && !defined(CL_VERSION_3_0) +#define CL_VERSION_3_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 220 && !defined(CL_VERSION_2_2) +#define CL_VERSION_2_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 210 && !defined(CL_VERSION_2_1) +#define CL_VERSION_2_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 200 && !defined(CL_VERSION_2_0) +#define CL_VERSION_2_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 120 && !defined(CL_VERSION_1_2) +#define CL_VERSION_1_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 110 && !defined(CL_VERSION_1_1) +#define CL_VERSION_1_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 100 && !defined(CL_VERSION_1_0) +#define CL_VERSION_1_0 1 +#endif + +/* Allow deprecated APIs for older OpenCL versions. */ +#if CL_TARGET_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_0_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_0_APIS +#endif + +#endif /* __CL_VERSION_H */ diff --git a/miniCUDA124/include/CL/opencl.h b/miniCUDA124/include/CL/opencl.h new file mode 100644 index 0000000000000000000000000000000000000000..4d9419287fb2b9cb4e521905b1e589dce643d0e2 --- /dev/null +++ b/miniCUDA124/include/CL/opencl.h @@ -0,0 +1,32 @@ +/******************************************************************************* + * Copyright (c) 2008-2021 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __OPENCL_H +#define __OPENCL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_H */ diff --git a/miniCUDA124/include/cooperative_groups/memcpy_async.h b/miniCUDA124/include/cooperative_groups/memcpy_async.h new file mode 100644 index 0000000000000000000000000000000000000000..91d428b25b8d1e92a5d86bbe8148fe659533a96c --- /dev/null +++ b/miniCUDA124/include/cooperative_groups/memcpy_async.h @@ -0,0 +1,62 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC +#define _COOPERATIVE_GROUPS_MEMCPY_ASYNC + +#include "../cooperative_groups.h" +#include "details/info.h" + +#ifdef _CG_CPP11_FEATURES +# include "details/async.h" +#else +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +#endif + +#endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC diff --git a/miniCUDA124/include/cooperative_groups/reduce.h b/miniCUDA124/include/cooperative_groups/reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..68335edb92acc7b538866e6445cc6542410835bf --- /dev/null +++ b/miniCUDA124/include/cooperative_groups/reduce.h @@ -0,0 +1,63 @@ + /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_REDUCE_H +#define _COOPERATIVE_GROUPS_REDUCE_H + +#include "../cooperative_groups.h" +#include "details/info.h" + +#ifdef _CG_CPP11_FEATURES +# include "details/reduce.h" +#else +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +#endif + + +#endif //_COOPERATIVE_GROUPS_REDUCE_H diff --git a/miniCUDA124/include/cooperative_groups/scan.h b/miniCUDA124/include/cooperative_groups/scan.h new file mode 100644 index 0000000000000000000000000000000000000000..8452963748b9c4f6d8be3488a0bc355300b85a4f --- /dev/null +++ b/miniCUDA124/include/cooperative_groups/scan.h @@ -0,0 +1,63 @@ +/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_SCAN_H +#define _COOPERATIVE_GROUPS_SCAN_H + +#include "../cooperative_groups.h" +#include "details/info.h" + +#ifdef _CG_CPP11_FEATURES +# include "details/scan.h" +#else +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +#endif + + +#endif //_COOPERATIVE_GROUPS_SCAN_H diff --git a/miniCUDA124/include/crt/common_functions.h b/miniCUDA124/include/crt/common_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..a919d9adf8fc24782fb43e22937e9450ad174f2b --- /dev/null +++ b/miniCUDA124/include/crt/common_functions.h @@ -0,0 +1,310 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__ +#endif + +#if !defined(__COMMON_FUNCTIONS_H__) +#define __COMMON_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "builtin_types.h" +#include "host_defines.h" + +#define __CUDACC_VER__ "__CUDACC_VER__ is no longer supported. Use __CUDACC_VER_MAJOR__, __CUDACC_VER_MINOR__, and __CUDACC_VER_BUILD__ instead." + +#ifndef __CUDA_API_VER_MAJOR__ +#define __CUDA_API_VER_MAJOR__ __CUDACC_VER_MAJOR__ +#endif /* __CUDA_API_VER_MAJOR__ */ + +#ifndef __CUDA_API_VER_MINOR__ +#define __CUDA_API_VER_MINOR__ __CUDACC_VER_MINOR__ +#endif /* __CUDA_API_VER_MINOR__ */ + +#if !defined(__CUDACC_RTC__) +#include +#include + +extern "C" +{ +#endif /* !__CUDACC_RTC__ */ +extern _CRTIMP __host__ __device__ __device_builtin__ __cudart_builtin__ clock_t __cdecl clock(void) +#if defined(__QNX__) +asm("clock32") +#endif +__THROW; +extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memset(void*, int, size_t) __THROW; +extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memcpy(void*, const void*, size_t) __THROW; +#if !defined(__CUDACC_RTC__) +} +#endif /* !__CUDACC_RTC__ */ + +#if defined(__CUDA_ARCH__) + +#if defined(__CUDACC_RTC__) +inline __host__ __device__ void* operator new(size_t, void *p) { return p; } +inline __host__ __device__ void* operator new[](size_t, void *p) { return p; } +inline __host__ __device__ void operator delete(void*, void*) { } +inline __host__ __device__ void operator delete[](void*, void*) { } +#else /* !__CUDACC_RTC__ */ +#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__ +#include +#endif + +#if defined (__GNUC__) + +#define STD \ + std:: + +#else /* __GNUC__ */ + +#define STD + +#endif /* __GNUC__ */ + +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, void*) throw(); +# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw(); +#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */ +#endif /* __CUDACC_RTC__ */ + +#if !defined(__CUDACC_RTC__) +#include +#include +#endif /* !__CUDACC_RTC__ */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +extern "C" +{ +extern +#if !defined(_MSC_VER) || _MSC_VER < 1900 +_CRTIMP +#endif + +#if defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) +__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...) __THROW; +#else /* newer glibc */ +__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...); +#endif /* defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) */ + + +extern _CRTIMP __host__ __device__ __cudart_builtin__ void* __cdecl malloc(size_t) __THROW; +extern _CRTIMP __host__ __device__ __cudart_builtin__ void __cdecl free(void*) __THROW; + +#if defined(_MSC_VER) +extern __host__ __device__ __cudart_builtin__ void* __cdecl _alloca(size_t); +#endif + +#if defined(__QNX__) +#undef alloca +#define alloca(__S) __builtin_alloca(__S) +#endif +} +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +#if !defined(__CUDACC_RTC__) +#include +#endif /* !__CUDACC_RTC__ */ + +extern "C" +{ +#if defined(__CUDACC_RTC__) +extern __host__ __device__ void __assertfail(const char * __assertion, + const char *__file, + unsigned int __line, + const char *__function, + size_t charsize); +#elif defined(__APPLE__) +#define __builtin_expect(exp,c) (exp) +extern __host__ __device__ __cudart_builtin__ void __assert_rtn( + const char *, const char *, int, const char *); +#elif defined(__ANDROID__) +extern __host__ __device__ __cudart_builtin__ void __assert2( + const char *, int, const char *, const char *); +#elif defined(__QNX__) +#if !defined(_LIBCPP_VERSION) +namespace std { +#endif +extern __host__ __device__ __cudart_builtin__ void __assert( + const char *, const char *, unsigned int, const char *); +#if !defined(_LIBCPP_VERSION) +} +#endif +#elif defined(__HORIZON__) +extern __host__ __device__ __cudart_builtin__ void __assert_fail( + const char *, const char *, int, const char *); +#elif defined(__GNUC__) +extern __host__ __device__ __cudart_builtin__ void __assert_fail( + const char *, const char *, unsigned int, const char *) + __THROW; +#elif defined(_WIN32) +extern __host__ __device__ __cudart_builtin__ _CRTIMP void __cdecl _wassert( + const wchar_t *, const wchar_t *, unsigned); +#endif +} + +#if defined(__CUDACC_RTC__) +#ifdef NDEBUG +#define assert(e) (static_cast(0)) +#else /* !NDEBUG */ +#define __ASSERT_STR_HELPER(x) #x +#define assert(e) ((e) ? static_cast(0)\ + : __assertfail(__ASSERT_STR_HELPER(e), __FILE__,\ + __LINE__, __PRETTY_FUNCTION__,\ + sizeof(char))) +#endif /* NDEBUG */ +__host__ __device__ void* operator new(size_t); +__host__ __device__ void* operator new[](size_t); +__host__ __device__ void operator delete(void*); +__host__ __device__ void operator delete[](void*); +# if __cplusplus >= 201402L +__host__ __device__ void operator delete(void*, size_t); +__host__ __device__ void operator delete[](void*, size_t); +#endif /* __cplusplus >= 201402L */ + +#if __cplusplus >= 201703L +namespace std { enum class align_val_t : size_t {}; } +__host__ __device__ void* __cdecl operator new(size_t sz, std::align_val_t) noexcept; +__host__ __device__ void* __cdecl operator new[](size_t sz, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete(void* ptr, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete[](void* ptr, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete(void* ptr, size_t, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete[](void* ptr, size_t, std::align_val_t) noexcept; +#endif /* __cplusplus >= 201703L */ + +#else /* !__CUDACC_RTC__ */ +#if defined (__GNUC__) + +#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +#if (__cplusplus >= 201103L) && ((!(defined(__QNX__) && defined(_LIBCPP_VERSION))) || (defined(__QNX__) && __NV_GLIBCXX_VERSION >= 80300)) +#define THROWBADALLOC +#else +#if defined(__ANDROID__) && !defined(_LIBCPP_VERSION) && (defined(__BIONIC__) || __NV_GLIBCXX_VERSION < 40900) +#define THROWBADALLOC +#else +#define THROWBADALLOC throw(STD bad_alloc) +#endif +#endif +#define __DELETE_THROW throw() + +#undef __NV_GLIBCXX_VERSION + +#else /* __GNUC__ */ + +#define THROWBADALLOC throw(...) + +#endif /* __GNUC__ */ + +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t) THROWBADALLOC; +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t) THROWBADALLOC; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*) throw(); +# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw(); +#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */ + +#if __cpp_aligned_new +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, std::align_val_t); +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, std::align_val_t); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t, std::align_val_t) noexcept; +#endif /* __cpp_aligned_new */ + +#undef THROWBADALLOC +#undef STD +#endif /* __CUDACC_RTC__ */ + +#endif /* __CUDA_ARCH__ */ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) && (__CUDA_ARCH__ >= 350) +#include "cuda_device_runtime_api.h" +#endif + +#include "math_functions.h" + +#endif /* !__COMMON_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__ +#endif diff --git a/miniCUDA124/include/crt/cudacc_ext.h b/miniCUDA124/include/crt/cudacc_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..a16e8df8a8ac3139af404b585e038acfd7ff0da8 --- /dev/null +++ b/miniCUDA124/include/crt/cudacc_ext.h @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__ +#endif + + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__ +#endif diff --git a/miniCUDA124/include/crt/device_double_functions.h b/miniCUDA124/include/crt/device_double_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..c66359cbd777623fa41a1b19b06b24d42358d7dc --- /dev/null +++ b/miniCUDA124/include/crt/device_double_functions.h @@ -0,0 +1,1192 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__ +#endif + +#if !defined(__DEVICE_DOUBLE_FUNCTIONS_H__) +#define __DEVICE_DOUBLE_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__ +#else +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +extern "C" +{ +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a double as a 64-bit signed integer. + * + * Reinterpret the bits in the double-precision floating-point value \p x + * as a signed 64-bit integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ long long int __double_as_longlong(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a 64-bit signed integer as a double. + * + * Reinterpret the bits in the 64-bit signed integer value \p x as + * a double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __longlong_as_double(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * . + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * . + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Adds two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Adds two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-up mode. + * + * Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Add two floating-point values in round-down mode. + * + * Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dadd_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Subtracts two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-up mode. + * + * Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Subtract two floating-point values in round-down mode. + * + * Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dsub_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rn(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Multiplies two floating-point values \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rz(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-up mode. + * + * Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_ru(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_DOUBLE + * \brief Multiply two floating-point values in round-down mode. + * + * Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_double + * \note_nofma + */ +extern __device__ __device_builtin__ double __dmul_rd(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-up mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a float in round-down mode. + * + * Convert the double-precision floating-point value \p x to a single-precision + * floating-point value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ float __double2float_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ int __double2int_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ long long int __double2ll_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-up mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-down mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed int to a double. + * + * Convert the signed integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __int2double_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned int to a double. + * + * Convert the unsigned integer value \p x to a double-precision floating-point value. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-towards-zero mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-up mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit int to a double in round-down mode. + * + * Convert the signed 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ll2double_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-to-nearest-even mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-towards-zero mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-up mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned 64-bit int to a double in round-down mode. + * + * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point + * value in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high 32 bits in a double as a signed integer. + * + * Reinterpret the high 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2hiint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret low 32 bits in a double as a signed integer. + * + * Reinterpret the low 32 bits in the double-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ int __double2loint(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret high and low 32-bit integer values as a double. + * + * Reinterpret the integer value of \p hi as the high 32 bits of a + * double-precision floating-point value and the integer value of \p lo + * as the low 32 bits of the same double-precision floating-point value. + * \return Returns reinterpreted value. + */ +extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo); +} + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode); + +#undef EXCLUDE_FROM_RTC + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode = cudaRoundZero); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode = cudaRoundNearest); + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode = cudaRoundNearest); + +#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__ + + +#endif /* __cplusplus && __CUDACC__ */ + +#if !defined(__CUDACC_RTC__) +#include "device_double_functions.hpp" +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__DEVICE_DOUBLE_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__ +#endif diff --git a/miniCUDA124/include/crt/device_double_functions.hpp b/miniCUDA124/include/crt/device_double_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f5fd809468ee33efa0f3c5994f66aa61c551a60e --- /dev/null +++ b/miniCUDA124/include/crt/device_double_functions.hpp @@ -0,0 +1,197 @@ +/* + * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__ +#endif + +#if !defined(__DEVICE_DOUBLE_FUNCTIONS_HPP__) +#define __DEVICE_DOUBLE_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__ +#else +#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __fma_rz(a, b, c) : + mode == cudaRoundPosInf ? __fma_ru(a, b, c) : + mode == cudaRoundMinInf ? __fma_rd(a, b, c) : + __fma_rn(a, b, c); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dmul_rz(a, b) : + mode == cudaRoundPosInf ? __dmul_ru(a, b) : + mode == cudaRoundMinInf ? __dmul_rd(a, b) : + __dmul_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dadd_rz(a, b) : + mode == cudaRoundPosInf ? __dadd_ru(a, b) : + mode == cudaRoundMinInf ? __dadd_rd(a, b) : + __dadd_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __dsub_rz(a, b) : + mode == cudaRoundPosInf ? __dsub_ru(a, b) : + mode == cudaRoundMinInf ? __dsub_rd(a, b) : + __dsub_rn(a, b); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2int_rn(a) : + mode == cudaRoundPosInf ? __double2int_ru(a) : + mode == cudaRoundMinInf ? __double2int_rd(a) : + __double2int_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2uint_rn(a) : + mode == cudaRoundPosInf ? __double2uint_ru(a) : + mode == cudaRoundMinInf ? __double2uint_rd(a) : + __double2uint_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2ll_rn(a) : + mode == cudaRoundPosInf ? __double2ll_ru(a) : + mode == cudaRoundMinInf ? __double2ll_rd(a) : + __double2ll_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode) +{ + return mode == cudaRoundNearest ? __double2ull_rn(a) : + mode == cudaRoundPosInf ? __double2ull_ru(a) : + mode == cudaRoundMinInf ? __double2ull_rd(a) : + __double2ull_rz(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __ll2double_rz(a) : + mode == cudaRoundPosInf ? __ll2double_ru(a) : + mode == cudaRoundMinInf ? __ll2double_rd(a) : + __ll2double_rn(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode) +{ + return mode == cudaRoundZero ? __ull2double_rz(a) : + mode == cudaRoundPosInf ? __ull2double_ru(a) : + mode == cudaRoundMinInf ? __ull2double_rd(a) : + __ull2double_rn(a); +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode) +{ + return (double)a; +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode) +{ + return (double)a; +} + +__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode) +{ + return (double)a; +} + +#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +#endif /* !__DEVICE_DOUBLE_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__ +#endif diff --git a/miniCUDA124/include/crt/device_functions.h b/miniCUDA124/include/crt/device_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..6eddb8a8947cdb3713cfc7fad48b89269e8e59f3 --- /dev/null +++ b/miniCUDA124/include/crt/device_functions.h @@ -0,0 +1,3696 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__ +#endif + +#if !defined(__DEVICE_FUNCTIONS_H__) +#define __DEVICE_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#define __DEVICE_FUNCTIONS_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__ +#else +#define __DEVICE_FUNCTIONS_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __cudart_builtin__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +extern "C" +{ +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 32 bits of the product of the two 32-bit integers. + * + * Calculate the most significant 32 bits of the 64-bit product \p x * \p y, where \p x and \p y + * are 32-bit integers. + * + * \return Returns the most significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __mulhi(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 32 bits of the product of the two 32-bit unsigned integers. + * + * Calculate the most significant 32 bits of the 64-bit product \p x * \p y, where \p x and \p y + * are 32-bit unsigned integers. + * + * \return Returns the most significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __umulhi(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 64 bits of the product of the two 64-bit integers. + * + * Calculate the most significant 64 bits of the 128-bit product \p x * \p y, where \p x and \p y + * are 64-bit integers. + * + * \return Returns the most significant 64 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __mul64hi(long long int x, long long int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 64 bits of the product of the two 64 unsigned bit integers. + * + * Calculate the most significant 64 bits of the 128-bit product \p x * \p y, where \p x and \p y + * are 64-bit unsigned integers. + * + * \return Returns the most significant 64 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __umul64hi(unsigned long long int x, unsigned long long int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in an integer as a float. + * + * Reinterpret the bits in the signed integer value \p x as a single-precision + * floating-point value. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int_as_float(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a float as a signed integer. + * + * Reinterpret the bits in the single-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float_as_int(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in an unsigned integer as a float. + * + * Reinterpret the bits in the unsigned integer value \p x as a single-precision + * floating-point value. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint_as_float(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a float as a unsigned integer. + * + * Reinterpret the bits in the single-precision floating-point value \p x + * as a unsigned integer. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float_as_uint(float x); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __syncthreads(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __prof_trigger(int); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __threadfence(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __threadfence_block(void); +__DEVICE_FUNCTIONS_DECL__ +#if defined(__GNUC__) || defined(__CUDACC_RTC__) +__attribute__((__noreturn__)) +#elif defined(_MSC_VER) +__declspec(noreturn) +#endif /* defined(__GNUC__) || defined(__CUDACC_RTC__) */ +__device_builtin__ void __trap(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __brkpt(); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Clamp the input argument to [+0.0, 1.0]. + * + * Clamp the input argument \p x to be within the interval [+0.0, 1.0]. + * \return + * - __saturatef(\p x) returns 0 if \p x < 0. + * - __saturatef(\p x) returns 1 if \p x > 1. + * - __saturatef(\p x) returns \p x if + * \latexonly $0 \le x \le 1$ \endlatexonly + * \xmlonly + * + * + * 0 + * + * x + * + * 1 + * + * \endxmlonly. + * - __saturatef(NaN) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __saturatef(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the sum of absolute difference. + * + * Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the 32-bit sum of the third argument \p z plus and the absolute + * value of the difference between the first argument, \p x, and second + * argument, \p y. + * + * Inputs \p x and \p y are signed 32-bit integers, input \p z is + * a 32-bit unsigned integer. + * + * \return Returns + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * \endxmlonly. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __sad(int x, int y, unsigned int z); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the sum of absolute difference. + * + * Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the 32-bit sum of the third argument \p z plus and the absolute + * value of the difference between the first argument, \p x, and second + * argument, \p y. + * + * Inputs \p x, \p y, and \p z are unsigned 32-bit integers. + * + * \return Returns + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * \endxmlonly. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __usad(unsigned int x, unsigned int y, unsigned int z); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the least significant 32 bits of the product of the least significant 24 bits of two integers. + * + * Calculate the least significant 32 bits of the product of the least significant 24 bits of \p x and \p y. + * The high order 8 bits of \p x and \p y are ignored. + * + * \return Returns the least significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __mul24(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the least significant 32 bits of the product of the least significant 24 bits of two unsigned integers. + * + * Calculate the least significant 32 bits of the product of the least significant 24 bits of \p x and \p y. + * The high order 8 bits of \p x and \p y are ignored. + * + * \return Returns the least significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __umul24(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Divide two floating-point values. + * + * Compute \p x divided by \p y. If --use_fast_math is specified, + * use ::__fdividef() for higher performance, otherwise use normal division. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + * \note_fastmath + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fdividef(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate division of the input arguments. + * + * Calculate the fast approximate division of \p x by \p y. + * + * \return Returns \p x / \p y. + * - __fdividef( + * \latexonly $\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly + * , \p y) returns NaN for + * \latexonly $2^{126} < |y| < 2^{128}$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * 126 + * + * + * < + * |y| + * < + * + * 2 + * + * 128 + * + * + * + * \endxmlonly. + * - __fdividef(\p x, \p y) returns 0 for + * \latexonly $2^{126} < |y| < 2^{128}$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * 126 + * + * + * < + * |y| + * < + * + * 2 + * + * 128 + * + * + * + * + * \endxmlonly + * and finite + * \latexonly $x$ \endlatexonly + * \xmlonly + * + * + * x + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdividef(float x, float y); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fdivide(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate sine of the input argument. + * + * Calculate the fast approximate sine of the input argument \p x, measured in radians. + * + * \return Returns the approximate sine of \p x. + * + * \note_accuracy_single_intrinsic + * \note Output in the denormal range is flushed to sign preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __sinf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate cosine of the input argument. + * + * Calculate the fast approximate cosine of the input argument \p x, measured in radians. + * + * \return Returns the approximate cosine of \p x. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __cosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate tangent of the input argument. + * + * Calculate the fast approximate tangent of the input argument \p x, measured in radians. + * + * \return Returns the approximate tangent of \p x. + * + * \note_accuracy_single_intrinsic + * \note The result is computed as the fast divide of ::__sinf() + * by ::__cosf(). Denormal output is flushed to sign-preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __tanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate of sine and cosine of the first input argument. + * + * Calculate the fast approximate of sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the second + * argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \note_accuracy_single_intrinsic + * \note Denorm input/output is flushed to sign preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ void __sincosf(float x, float *sptr, float *cptr) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __expf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 10 exponential of the input argument. + * + * Calculate the fast approximate base 10 exponential of the input argument \p x, + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __exp10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 2 logarithm of the input argument. + * + * Calculate the fast approximate base 2 logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_2(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * 2 + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __log2f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 10 logarithm of the input argument. + * + * Calculate the fast approximate base 10 logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_{10}(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * 10 + * + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __log10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument. + * + * Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_e(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * e + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __logf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate of + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * Calculate the fast approximate of \p x, the first input argument, + * raised to the power of \p y, the second input argument, + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __powf(float x, float y) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_ru(float); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-to-nearest-even mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-towards-zero mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rz(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-up mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_ru(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-down mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rd(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-to-nearest-even mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-towards-zero mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rz(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-up mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_ru(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-down mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rd(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit integer to a float in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-towards-zero mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-up mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-down mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-to-nearest-even mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-towards-zero mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-up mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-down mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Compute the sum of \p x and \p y in round-to-nearest-even rounding mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Compute the sum of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-up mode. + * + * Compute the sum of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-down mode. + * + * Compute the sum of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Compute the difference of \p x and \p y in round-to-nearest-even rounding mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Compute the difference of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-up mode. + * + * Compute the difference of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-down mode. + * + * Compute the difference of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Compute the product of \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Compute the product of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-up mode. + * + * Compute the product of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-down mode. + * + * Compute the product of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rn(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rz(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_ru(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rd(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the reciprocal of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the reciprocal of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the reciprocal of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the square root of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the square root of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the square root of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frsqrt_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-to-nearest-even mode. + * + * Divide two floating-point values \p x by \p y in round-to-nearest-even mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-towards-zero mode. + * + * Divide two floating-point values \p x by \p y in round-towards-zero mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-up mode. + * + * Divide two floating-point values \p x by \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-down mode. + * + * Divide two floating-point values \p x by \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Return the number of consecutive high-order zero bits in a 32-bit integer. + * + * Count the number of consecutive leading zero bits, starting at the most significant bit (bit 31) of \p x. + * + * \return Returns a value between 0 and 32 inclusive representing the number of zero bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __clz(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Find the position of the least significant bit set to 1 in a 32-bit integer. + * + * Find the position of the first (least significant) bit set to 1 in \p x, where the least significant + * bit position is 1. + * + * \return Returns a value between 0 and 32 inclusive representing the position of the first bit set. + * - __ffs(0) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __ffs(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of bits that are set to 1 in a 32-bit integer. + * + * Count the number of bits that are set to 1 in \p x. + * + * \return Returns a value between 0 and 32 inclusive representing the number of set bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __popc(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Reverse the bit order of a 32-bit unsigned integer. + * + * Reverses the bit order of the 32-bit unsigned integer \p x. + * + * \return Returns the bit-reversed value of \p x. i.e. bit N of the return value corresponds to bit 31-N of \p x. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __brev(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of consecutive high-order zero bits in a 64-bit integer. + * + * Count the number of consecutive leading zero bits, starting at the most significant bit (bit 63) of \p x. + * + * \return Returns a value between 0 and 64 inclusive representing the number of zero bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __clzll(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Find the position of the least significant bit set to 1 in a 64-bit integer. + * + * Find the position of the first (least significant) bit set to 1 in \p x, where the least significant + * bit position is 1. + * + * \return Returns a value between 0 and 64 inclusive representing the position of the first bit set. + * - __ffsll(0) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __ffsll(long long int x); + + +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of bits that are set to 1 in a 64-bit integer. + * + * Count the number of bits that are set to 1 in \p x. + * + * \return Returns a value between 0 and 64 inclusive representing the number of set bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __popcll(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Reverse the bit order of a 64-bit unsigned integer. + * + * Reverses the bit order of the 64-bit unsigned integer \p x. + * + * \return Returns the bit-reversed value of \p x. i.e. bit N of the return value corresponds to bit 63-N of \p x. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __brevll(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Return selected bytes from two 32-bit unsigned integers. + * + * \return Returns a 32-bit integer consisting of four bytes from eight input bytes provided in the two + * input integers \p x and \p y, as specified by a selector, \p s. + * + * Create 8-byte source + * - uint64_t \p tmp64 = ((uint64_t)\p y << 32) | \p x; + * + * Extract selector bits + * - \p selector0 = (\p s >> 0) & 0x7; + * - \p selector1 = (\p s >> 4) & 0x7; + * - \p selector2 = (\p s >> 8) & 0x7; + * - \p selector3 = (\p s >> 12) & 0x7; + * + * Return 4 selected bytes from 8-byte source: + * - \p res[07:00] = \p tmp64[\p selector0]; + * - \p res[15:08] = \p tmp64[\p selector1]; + * - \p res[23:16] = \p tmp64[\p selector2]; + * - \p res[31:24] = \p tmp64[\p selector3]; + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __byte_perm(unsigned int x, unsigned int y, unsigned int s); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute average of signed input arguments, avoiding overflow + * in the intermediate sum. + * + * Compute average of signed input arguments \p x and \p y + * as ( \p x + \p y ) >> 1, avoiding overflow in the intermediate sum. + * + * \return Returns a signed integer value representing the signed + * average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __hadd(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute rounded average of signed input arguments, avoiding + * overflow in the intermediate sum. + * + * Compute average of signed input arguments \p x and \p y + * as ( \p x + \p y + 1 ) >> 1, avoiding overflow in the intermediate + * sum. + * + * \return Returns a signed integer value representing the signed + * rounded average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __rhadd(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute average of unsigned input arguments, avoiding overflow + * in the intermediate sum. + * + * Compute average of unsigned input arguments \p x and \p y + * as ( \p x + \p y ) >> 1, avoiding overflow in the intermediate sum. + * + * \return Returns an unsigned integer value representing the unsigned + * average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __uhadd(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute rounded average of unsigned input arguments, avoiding + * overflow in the intermediate sum. + * + * Compute average of unsigned input arguments \p x and \p y + * as ( \p x + \p y + 1 ) >> 1, avoiding overflow in the intermediate + * sum. + * + * \return Returns an unsigned integer value representing the unsigned + * rounded average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __urhadd(unsigned int x, unsigned int y); + +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __double2int_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __double2uint_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __double2ll_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __double2ull_rz(double x); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm0(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm1(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm2(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm3(void); + +/******************************************************************************* + * * + * FP16 SIMD functions * + * * + *******************************************************************************/ + + // #include "fp16.h" + + +/******************************************************************************* + * * + * SIMD functions * + * * + *******************************************************************************/ + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword absolute value. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes, + * then computes absolute value for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabs2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword absolute value with signed saturation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes, + * then computes absolute value with signed saturation for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsss2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed addition, with wrap-around: a + b + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs unsigned addition on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vadd2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword addition with signed saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs addition with signed saturation on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddss2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword addition with unsigned saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs addition with unsigned saturation on corresponding parts. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddus2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed rounded average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes signed rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned rounded average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes unsigned rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes unsigned average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vhaddu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if they are equal, and 0000 otherwise. + * For example __vcmpeq2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpeq2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a >= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part >= 'b' part, and 0000 otherwise. + * For example __vcmpges2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpges2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a >= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part >= 'b' part, and 0000 otherwise. + * For example __vcmpgeu2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgeu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a > b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part > 'b' part, and 0000 otherwise. + * For example __vcmpgts2(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xffff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a > b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part > 'b' part, and 0000 otherwise. + * For example __vcmpgtu2(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xffff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgtu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a <= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part <= 'b' part, and 0000 otherwise. + * For example __vcmples2(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xffff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmples2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a <= b ? 0xffff : 0. + * + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part <= 'b' part, and 0000 otherwise. + * For example __vcmpleu2(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xffff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpleu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a < b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part < 'b' part, and 0000 otherwise. + * For example __vcmplts2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmplts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a < b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part < 'b' part, and 0000 otherwise. + * For example __vcmpltu2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpltu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison: a != b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part != 'b' part, and 0000 otherwise. + * For example __vcmplts2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpne2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword absolute difference of unsigned integer computation: |a - b| + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed maximum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes signed maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned maximum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes unsigned maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes signed minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmins2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes unsigned minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vminu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part == 'b' part. + * If both equalities are satisfied, function returns 1. + * \return Returns 1 if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vseteq2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetges2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgeu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgtu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetles2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetleu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetlts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetltu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part != 'b' part. + * If both conditions are satisfied, function returns 1. + * \return Returns 1 if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetne2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword sum of abs diff of unsigned. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute differences and returns + * sum of those differences. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsadu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed subtraction, with wrap-around. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsub2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed subtraction, with signed saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction with signed saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubss2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword subtraction with unsigned saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction with unsigned saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubus2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword negation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vneg2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword negation with signed saturation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vnegss2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword sum of absolute difference of signed integer. + * + * Splits 4 bytes of each into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword sum of absolute difference of signed. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference and sum it up. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsads2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute value. + * + * Splits argument by bytes. Computes absolute value of each byte. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabs4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute value with signed saturation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte, + * then computes absolute value with signed saturation for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsss4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed addition. + * + * Splits 'a' into 4 bytes, then performs unsigned addition on each of these + * bytes with the corresponding byte from 'b', ignoring overflow. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vadd4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte addition with signed saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte, + * then performs addition with signed saturation on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddss4 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte addition with unsigned saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte, + * then performs addition with unsigned saturation on corresponding parts. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddus4 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed rounded average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes signed rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned rounded average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes unsigned rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes unsigned average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vhaddu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if they are equal, and 00 otherwise. + * For example __vcmpeq4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpeq4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part >= 'b' part, and 00 otherwise. + * For example __vcmpges4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpges4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part >= 'b' part, and 00 otherwise. + * For example __vcmpgeu4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgeu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part > 'b' part, and 00 otherwise. + * For example __vcmpgts4(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part > 'b' part, and 00 otherwise. + * For example __vcmpgtu4(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgtu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part <= 'b' part, and 00 otherwise. + * For example __vcmples4(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmples4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part <= 'b' part, and 00 otherwise. + * For example __vcmpleu4(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpleu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part < 'b' part, and 00 otherwise. + * For example __vcmplts4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmplts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part < 'b' part, and 00 otherwise. + * For example __vcmpltu4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpltu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part != 'b' part, and 00 otherwise. + * For example __vcmplts4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpne4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute difference of unsigned integer. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed maximum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes signed maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned maximum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes unsigned maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed minimum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes signed minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmins4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned minimum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes unsigned minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vminu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part == 'b' part. + * If both equalities are satisfied, function returns 1. + * \return Returns 1 if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vseteq4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetles4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 part, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetleu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetlts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetltu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetges4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgeu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgtu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part != 'b' part. + * If both conditions are satisfied, function returns 1. + * \return Returns 1 if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetne4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte sum of abs difference of unsigned. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute differences and returns + * sum of those differences. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsadu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsub4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction with signed saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction with signed saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubss4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction with unsigned saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction with unsigned saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubus4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte negation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vneg4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte negation with signed saturation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vnegss4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute difference of signed integer. + * + * Splits 4 bytes of each into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte sum of abs difference of signed. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference and sum it up. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsads4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), 0) + * + * Calculates the maximum of \p a and \p b of two signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a max with relu ( = max(a_part, b_part, 0) ). Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(a, b), 0) + * + * Calculates the minimum of \p a and \p b of two signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(a, b), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a min with relu ( = max(min(a_part, b_part), 0) ). Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), c) + * + * Calculates the 3-way max of signed integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a 3-way max ( = max(max(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), c) + * + * Calculates the 3-way max of unsigned integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a 3-way max ( = max(max(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(min(a, b), c) + * + * Calculates the 3-way min of signed integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(min(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a 3-way min ( = min(min(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(min(a, b), c) + * + * Calculates the 3-way min of unsigned integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(min(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a 3-way min ( = min(min(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(max(a, b), c), 0) + * + * Calculates the maximum of three signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(max(a, b), c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a three-way max with relu ( = max(a_part, b_part, c_part, 0) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(min(a, b), c), 0) + * + * Calculates the minimum of three signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(min(a, b), c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a three-way min with relu ( = max(min(a_part, b_part, c_part), 0) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a + b, c) + * + * Calculates the sum of signed integers \p a and \p b and takes the max with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add and compare: max(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a + b, c) + * + * Calculates the sum of unsigned integers \p a and \p b and takes the max with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs an add and compare: max(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a + b, c) + * + * Calculates the sum of signed integers \p a and \p b and takes the min with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add and compare: min(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a + b, c) + * + * Calculates the sum of unsigned integers \p a and \p b and takes the min with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs an add and compare: min(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a + b, c), 0) + * + * Calculates the sum of signed integers \p a and \p b and takes the max with \p c. + * If the result is less than \p 0 then \0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a + b, c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add, followed by a max with relu: max(max(a_part + b_part), c_part), 0) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(a + b, c), 0) + * + * Calculates the sum of signed integers \p a and \p b and takes the min with \p c. + * If the result is less than \p 0 then \0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(a + b, c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add, followed by a min with relu: max(min(a_part + b_part), c_part), 0) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a, b), also sets the value pointed to by pred to (a >= b). + * + * Calculates the maximum of \p a and \p b of two signed ints. Also sets the value pointed to by \p pred to the value (a >= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a, b), also sets the value pointed to by pred to (a >= b). + * + * Calculates the maximum of \p a and \p b of two unsigned ints. Also sets the value pointed to by \p pred to the value (a >= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a, b), also sets the value pointed to by pred to (a <= b). + * + * Calculates the minimum of \p a and \p b of two signed ints. Also sets the value pointed to by \p pred to the value (a <= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a, b), also sets the value pointed to by pred to (a <= b). + * + * Calculates the minimum of \p a and \p b of two unsigned ints. Also sets the value pointed to by \p pred to the value (a <= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a >= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part >= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part >= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a >= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part >= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part >= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a <= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part <= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part <= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a <= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part <= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part <= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/******************************************************************************* + * * + * END SIMD functions * + * * + *******************************************************************************/ +} //extern "c" +#undef EXCLUDE_FROM_RTC + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __DEVICE_FUNCTIONS_STATIC_DECL__ +#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#include "device_functions.hpp" +#endif /* !defined(__CUDACC_RTC__) */ + +#include "device_atomic_functions.h" +#include "device_double_functions.h" +#include "sm_20_atomic_functions.h" +#include "sm_32_atomic_functions.h" +#include "sm_35_atomic_functions.h" +#include "sm_60_atomic_functions.h" +#include "sm_20_intrinsics.h" +#include "sm_30_intrinsics.h" +#include "sm_32_intrinsics.h" +#include "sm_35_intrinsics.h" +#include "sm_61_intrinsics.h" +#include "sm_70_rt.h" +#include "sm_80_rt.h" +#include "sm_90_rt.h" +#ifndef __CUDACC_RTC_MINIMAL__ +#include "texture_indirect_functions.h" +#include "surface_indirect_functions.h" +#endif /* !__CUDACC_RTC_MINIMAL__ */ +#include "cudacc_ext.h" + +#ifdef __CUDACC__ +extern "C" __host__ __device__ unsigned CUDARTAPI __cudaPushCallConfiguration(dim3 gridDim, + dim3 blockDim, + size_t sharedMem = 0, + struct CUstream_st *stream = 0); + +#if !defined(__CUDACC_RTC__) &&!defined(__NV_LEGACY_LAUNCH) +extern "C" cudaError_t CUDARTAPI __cudaGetKernel(cudaKernel_t *, const void *); + +extern "C" cudaError_t CUDARTAPI __cudaLaunchKernel( + cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem, + cudaStream_t stream +); + +extern "C" cudaError_t CUDARTAPI __cudaLaunchKernel_ptsz( + cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem, + cudaStream_t stream +); + +//referenced from compiler generated kernel launch code +static inline cudaError_t __cudaLaunchKernel_helper( + cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem, + cudaStream_t stream) +{ +#if defined(__CUDART_API_PER_THREAD_DEFAULT_STREAM) + return __cudaLaunchKernel_ptsz(kernel, gridDim, blockDim, args, sharedMem, + stream); +#else /* !__CUDART_API_PER_THREAD_DEFAULT_STREAM */ + return __cudaLaunchKernel(kernel, gridDim, blockDim, args, sharedMem, + stream); +#endif /* __CUDART_API_PER_THREAD_DEFAULT_STREAM */ +} +#endif /* !defined(__CUDACC_RTC__) && !defined(__NV_LEGACY_LAUNCH) */ + + + +#endif /* __CUDACC__ */ + +#endif /* !__DEVICE_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__ +#endif + diff --git a/miniCUDA124/include/crt/device_functions.hpp b/miniCUDA124/include/crt/device_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..33280f56a78c6163b43a297a283ff2593bf60387 --- /dev/null +++ b/miniCUDA124/include/crt/device_functions.hpp @@ -0,0 +1,1197 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__ +#endif + +#if !defined(__DEVICE_FUNCTIONS_HPP__) +#define __DEVICE_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#define __DEVICE_FUNCTIONS_DECL__ __device__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__ +#else +#define __DEVICE_FUNCTIONS_DECL__ __device__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __DEVICE_FUNCTIONS_STATIC_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#ifdef __CUDACC__ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) +#define __CUDA_AND_AT_LEAST_SM_90__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) */ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) +#define __CUDA_AND_AT_LEAST_SM_70__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) */ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) +#define __CUDA_AND_AT_LEAST_SM_75__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) */ +#endif /* __CUDACC__ */ + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm("{max.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); + return res; +#else + // Host and older architecture code + int ans = max(a, b); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm("{max.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vmaxs2(a, b), 0U); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)max(aS_lo, bS_lo); + short ansS_hi = (short)max(aS_hi, bS_hi); + + // relu + if(ansS_lo < 0){ ansS_lo = 0; } + if(ansS_hi < 0){ ansS_hi = 0; } + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm("{min.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); + return res; +#else + // Host and older architecture code + int ans = min(a, b); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm("{min.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vmins2(a, b), 0U); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)min(aS_lo, bS_lo); + short ansS_hi = (short)min(aS_hi, bS_hi); + + // relu + if(ansS_lo < 0){ ansS_lo = 0; } + if(ansS_hi < 0){ ansS_hi = 0; } + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "max.s32 t1, %1, %2; \n\t" + "max.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(max(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + // Future asm code (naming/syntax may change): + asm ("{.reg .b32 t1; \n\t" + "max.s16x2 t1, %1, %2; \n\t" + "max.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_70__) + res = __vmaxs2(__vmaxs2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ +int res; + asm ("{.reg .u32 t1; \n\t" + "max.u32 t1, %1, %2; \n\t" + "max.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(max(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "max.u16x2 t1, %1, %2; \n\t" + "max.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxu2(__vmaxu2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max(max(aU_lo, bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)max(max(aU_hi, bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "min.s32 t1, %1, %2; \n\t" + "min.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(min(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.s16x2 t1, %1, %2; \n\t" + "min.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_70__) + res = __vmins2(__vmins2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .u32 t1; \n\t" + "min.u32 t1, %1, %2; \n\t" + "min.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(min(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.u16x2 t1, %1, %2; \n\t" + "min.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vminu2(__vminu2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min(min(aU_lo, bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)min(min(aU_hi, bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "max.s32.relu t1, %1, %2; \n\t" + "max.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = max(max(a, b), c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "max.s16x2.relu t1, %1, %2; \n\t" + "max.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_75__) + res = __vimax_s16x2_relu(__vmaxs2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi); + + // relu + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "min.s32.relu t1, %1, %2; \n\t" + "min.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = min(min(a, b), c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.s16x2.relu t1, %1, %2; \n\t" + "min.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_75__) + res = __vimin_s16x2_relu(__vmins2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi); + + // relu + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "max.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "max.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int res; + asm ("{.reg .u32 t1; \n\t" + "add.u32 t1, %1, %2; \n\t" + "max.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.u16x2 t1, %1, %2; \n\t" + "max.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxu2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max((unsigned short)(aU_lo + bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)max((unsigned short)(aU_hi + bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "min.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "min.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmins2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int res; + asm ("{.reg .u32 t1; \n\t" + "add.u32 t1, %1, %2; \n\t" + "min.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.u16x2 t1, %1, %2; \n\t" + "min.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vminu2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min((unsigned short)(aU_lo + bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)min((unsigned short)(aU_hi + bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "max.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = max(a + b, c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "max.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vimax_s16x2_relu(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi); + + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "min.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = min(a + b, c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "min.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vimin_s16x2_relu(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi); + + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +// vimax vimin with predicate +// *pred gets set to '(a >= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.ge.s32 __$temp1, %2, %3;\n\t" + " selp.s32 %0, %2, %3, __$temp1;\n\t" + " selp.s32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + int ans = max(a, b); + + *pred = (a >= b); + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.ge.u32 __$temp1, %2, %3;\n\t" + " selp.u32 %0, %2, %3, __$temp1;\n\t" + " selp.u32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + unsigned int ans = max(a, b); + + *pred = (a >= b); + return ans; +#endif +} + +// *pred gets set to '(a <= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.le.s32 __$temp1, %2, %3;\n\t" + " selp.s32 %0, %2, %3, __$temp1;\n\t" + " selp.s32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + int ans = min(a, b); + + *pred = (a <= b); + return ans; +#endif +} + +// *pred gets set to '(a <= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.le.u32 __$temp1, %2, %3;\n\t" + " selp.u32 %0, %2, %3, __$temp1;\n\t" + " selp.u32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + unsigned int ans = min(a, b); + + *pred = (a <= b); + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .s16 rs0, rs1, rs2, rs3; \n\t" + "max.s16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.s16 pv, rs0, rs2; \n\t" + "setp.eq.s16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)max(aS_lo, bS_lo); + short ansS_hi = (short)max(aS_hi, bS_hi); + + *pred_hi = (aS_hi >= bS_hi); + *pred_lo = (aS_lo >= bS_lo); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "max.u16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.u16 pv, rs0, rs2; \n\t" + "setp.eq.u16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max(aU_lo, bU_lo); + unsigned short ansU_hi = (unsigned short)max(aU_hi, bU_hi); + + *pred_hi = (aU_hi >= bU_hi); + *pred_lo = (aU_lo >= bU_lo); + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "min.s16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.s16 pv, rs0, rs2; \n\t" + "setp.eq.s16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)min(aS_lo, bS_lo); + short ansS_hi = (short)min(aS_hi, bS_hi); + + *pred_hi = (aS_hi <= bS_hi); + *pred_lo = (aS_lo <= bS_lo); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "min.u16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.u16 pv, rs0, rs2; \n\t" + "setp.eq.u16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min(aU_lo, bU_lo); + unsigned short ansU_hi = (unsigned short)min(aU_hi, bU_hi); + + *pred_hi = (aU_hi <= bU_hi); + *pred_lo = (aU_lo <= bU_lo); + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +#ifdef __CUDA_AND_AT_LEAST_SM_90__ +#undef __CUDA_AND_AT_LEAST_SM_90__ +#endif + +#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* !__DEVICE_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__ +#endif diff --git a/miniCUDA124/include/crt/func_macro.h b/miniCUDA124/include/crt/func_macro.h new file mode 100644 index 0000000000000000000000000000000000000000..64dcf066023d3630a3d32b04752d9a64133749ac --- /dev/null +++ b/miniCUDA124/include/crt/func_macro.h @@ -0,0 +1,57 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__ +#endif + +#if !defined(__FUNC_MACRO_H__) +#define __FUNC_MACRO_H__ + +#if !defined(__CUDA_INTERNAL_COMPILATION__) + +#error -- incorrect inclusion of a cudart header file + +#endif /* !__CUDA_INTERNAL_COMPILATION__ */ + +#if defined(__GNUC__) + +#define __func__(decl) \ + inline decl + +#define __device_func__(decl) \ + static __attribute__((__unused__)) decl + +#elif defined(_WIN32) + +#define __func__(decl) \ + static inline decl + +#define __device_func__(decl) \ + static decl + +#endif /* __GNUC__ */ + +#endif /* __FUNC_MACRO_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__ +#endif diff --git a/miniCUDA124/include/crt/host_config.h b/miniCUDA124/include/crt/host_config.h new file mode 100644 index 0000000000000000000000000000000000000000..868d0b8cc21deb9f3363257567825185fc4838f2 --- /dev/null +++ b/miniCUDA124/include/crt/host_config.h @@ -0,0 +1,310 @@ +/* + * Copyright 1993-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__ +#endif + +#if !defined(__HOST_CONFIG_H__) +#define __HOST_CONFIG_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) + +#define _CRTIMP +#define __THROW + +#else /* __CUDACC_RTC__ */ + +/* check for host compilers that are compatible with nvcc */ +#if !defined(__GNUC__) && !defined(_WIN32) + +#error --- !!! UNSUPPORTED COMPILER !!! --- + +#endif /* !__GNUC__ && !_WIN32 */ + +/* check invalid configurations */ +#if defined(__PGIC__) +#if !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) +#error -- unsupported pgc++ configuration! pgc++ is supported only on Linux x86_64! +#endif /* !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) */ +#endif /* defined(__PGIC__) */ + +#if defined(__powerpc__) +#if !defined(__powerpc64__) || !defined(__LITTLE_ENDIAN__) +#error -- unsupported PPC platform! Only 64-bit little endian PPC is supported! +#endif /* !__powerpc64__ || !__LITTLE_ENDIAN__ */ +#endif /* __powerpc__ */ + +#if defined(__APPLE__) && defined(__MACH__) && !defined(__clang__) +#error -- clang and clang++ are the only supported host compilers on Mac OS X! +#endif /* __APPLE__ && __MACH__ && !__clang__ */ + + +/* check host compiler version */ +#if !__NV_NO_HOST_COMPILER_CHECK + +#if defined(__ICC) + +#if (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && !(__ICC >= 1900 && __ICC <= 2021)) || !defined(__GNUC__) || !defined(__LP64__) + +#error -- unsupported ICC configuration! Only ICC 15.0, ICC 16.0, ICC 17.0, ICC 18.0, ICC 19.x and 20.x on Linux x86_64 are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && __ICC != 1900) || !__GNUC__ || !__LP64__ */ + +#endif /* __ICC */ + +#if defined(__GRCO_CLANG_COMPILER__) +#if (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) +#error -- unsupported Grace clang version! The version must be 16.x to 17.x. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. +#endif /* (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) */ + +#endif /* __GRCO_CLANG_COMPILER__ */ + +#if defined(__INTEL_CLANG_COMPILER) +#error -- unsupported Intel ICX compiler! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. +#endif /* __INTEL_CLANG_COMPILER */ + +#if defined(__powerpc__) + +#if defined(__ibmxl_vrm__) && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && \ + !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) + +#error -- unsupported xlC version! only xlC 13.1 and 16.1 are supported. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* __ibmxl_vrm__ && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && + !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) */ + +#endif /* __powerpc__ */ + +#if defined(__GNUC__) + +#if __GNUC__ > 13 + +#error -- unsupported GNU version! gcc versions later than 13 are not supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* __GNUC__ > 13 */ + + +#if defined(__HORIZON__) +#if (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) +#error -- unsupported HOS clang version! The version must be must be less than 18 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. +#endif /* (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */ +#endif /* __HORIZON__ */ + +#if defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) + +#if (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) +#error -- unsupported clang version! clang version must be less than 18 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#endif /* (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */ + +#endif /* defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) */ + + +#endif /* __GNUC__ */ + +#if defined(_WIN32) + +#if _MSC_VER < 1910 || _MSC_VER >= 1950 + +#error -- unsupported Microsoft Visual Studio version! Only the versions between 2017 and 2022 (inclusive) are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk. + +#elif _MSC_VER >= 1910 && _MSC_VER < 1910 + +#pragma message("support for this version of Microsoft Visual Studio has been deprecated! Only the versions between 2017 and 2022 (inclusive) are supported!") + +#endif /* (_MSC_VER < 1910 || _MSC_VER >= 1950) || (_MSC_VER >= 1910 && _MSC_VER < 1910) */ + +#endif /* _WIN32 */ +#endif /* !__NV_NO_HOST_COMPILER_CHECK */ + + +/* configure host compiler */ +#if defined(__APPLE__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#if defined(__BLOCKS__) /* nvcc does not support closures */ + +#undef __BLOCKS__ + +#endif /* __BLOCKS__ */ + +#elif defined(__ANDROID__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__QNX__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__HORIZON__) + +#define _CRTIMP +#define _ACRTIMP +#define __THROW + +#elif defined(__GNUC__) + +#define _CRTIMP +#define _ACRTIMP + +#include /* for __THROW */ + +#elif defined(_WIN32) + +#if _MSC_VER >= 1500 + +#undef _USE_DECLSPECS_FOR_SAL +#define _USE_DECLSPECS_FOR_SAL \ + 1 + +#endif /* _MSC_VER >= 1500 */ + +#if !defined(_CRT_NONSTDC_NO_WARNINGS) + +#define _CRT_NONSTDC_NO_WARNINGS /* to suppress warnings */ + +#endif /* !_CRT_NONSTDC_NO_WARNINGS */ + +#if !defined(_CRT_SECURE_NO_WARNINGS) + +#define _CRT_SECURE_NO_WARNINGS /* to suppress warnings */ + +#endif /* !_CRT_SECURE_NO_WARNINGS */ + +#if !defined(NOMINMAX) + +#define NOMINMAX /* min and max are part of cuda runtime */ + +#endif /* !NOMINMAX */ + +#include /* for _CRTIMP */ +#if _MSC_VER >= 1900 +#include /* for _ACRTIMP */ +#endif /* _MSC_VER >= 1900 */ + +#define __THROW + +#endif /* __APPLE__ */ + +#endif /* __CUDACC_RTC__ */ + + +#if defined(__cplusplus) && defined(__CUDA_ARCH__) && (defined(__PGIC__) || defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) + +#if __CUDACC_RTC__ +typedef char *va_list; +#else /* !__CUDACC_RTC__ */ +#include +#endif /* __CUDACC_RTC__ */ + + +#undef va_start +#undef va_end +#undef va_arg + +#ifdef __PGIC__ + +#undef __builtin_va_end + +#define va_start(v,l) __builtin_alt_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_alt_va_arg(v,l) + +#if (__cplusplus >= 201103L) +#undef va_copy +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif + +#else /* !__PGIC__ */ + + +#define va_start(ap, x) (__cu_va_start(&ap, x)) +#define va_end(ap) (__cu_va_end(&ap)) +#define va_arg(ap, t) (*((t *)__cu_va_arg(&ap, (t *)0))) + +#if (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) +#undef va_copy +#define va_copy(apd, aps) (__cu_va_copy(&(apd), &(aps))) +#endif /* (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) */ +#endif /* __PGIC__ */ + +#endif /* defined(__cplusplus) && (defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) */ + + + +#endif /* __CUDACC__ */ + +#endif /* !__HOST_CONFIG_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__ +#endif diff --git a/miniCUDA124/include/crt/host_defines.h b/miniCUDA124/include/crt/host_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..667989ee04dabf4ed1178bd8bbd56b6add1b0bee --- /dev/null +++ b/miniCUDA124/include/crt/host_defines.h @@ -0,0 +1,280 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__ +#endif + +#if !defined(__HOST_DEFINES_H__) +#define __HOST_DEFINES_H__ + +#if defined(__CUDACC__) && !defined(__CUDACC_RTC__) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_ALLOW_UNSUPPORTED_LIBCPP) +#include +#if ((defined(_MSC_VER ) && (defined(_M_X64) || defined(_M_AMD64))) ||\ + (defined(__x86_64__) || defined(__amd64__))) && defined(_LIBCPP_VERSION) && !(defined(__HORIZON__) || defined(__ANDROID__) || defined(__QNX__)) +#error "libc++ is not supported on x86 system" +#endif +#endif + +/* CUDA JIT mode (__CUDACC_RTC__) also uses GNU style attributes */ +#if defined(__GNUC__) || (defined(__PGIC__) && defined(__linux__)) || defined(__CUDA_LIBDEVICE__) || defined(__CUDACC_RTC__) + +#if defined(__CUDACC_RTC__) +#define __volatile__ volatile +#endif /* __CUDACC_RTC__ */ + +#define __no_return__ \ + __attribute__((noreturn)) + +#if defined(__CUDACC__) || defined(__CUDA_ARCH__) || defined(__CUDA_LIBDEVICE__) +/* gcc allows users to define attributes with underscores, + e.g., __attribute__((__noinline__)). + Consider a non-CUDA source file (e.g. .cpp) that has the + above attribute specification, and includes this header file. In that case, + defining __noinline__ as below would cause a gcc compilation error. + Hence, only define __noinline__ when the code is being processed + by a CUDA compiler component. +*/ +#define __noinline__ \ + __attribute__((noinline)) +#endif /* __CUDACC__ || __CUDA_ARCH__ || __CUDA_LIBDEVICE__ */ + +#undef __forceinline__ +#define __forceinline__ \ + __inline__ __attribute__((always_inline)) +#define __inline_hint__ \ + __attribute__((nv_inline_hint)) +#define __align__(n) \ + __attribute__((aligned(n))) +#define __maxnreg__(a) \ + __attribute__((maxnreg(a))) +#define __thread__ \ + __thread +#define __import__ +#define __export__ +#define __cdecl +#define __annotate__(a) \ + __attribute__((a)) +#define __location__(a) \ + __annotate__(a) +#define CUDARTAPI +#define CUDARTAPI_CDECL + +#elif defined(_MSC_VER) + +#if _MSC_VER >= 1400 + +#define __restrict__ \ + __restrict + +#else /* _MSC_VER >= 1400 */ + +#define __restrict__ + +#endif /* _MSC_VER >= 1400 */ + +#define __inline__ \ + __inline +#define __no_return__ \ + __declspec(noreturn) +#define __noinline__ \ + __declspec(noinline) +#define __forceinline__ \ + __forceinline +#define __inline_hint__ \ + __declspec(nv_inline_hint) +#define __align__(n) \ + __declspec(align(n)) +#define __maxnreg__(n) \ + __declspec(maxnreg(n)) +#define __thread__ \ + __declspec(thread) +#define __import__ \ + __declspec(dllimport) +#define __export__ \ + __declspec(dllexport) +#define __annotate__(a) \ + __declspec(a) +#define __location__(a) \ + __annotate__(__##a##__) +#define CUDARTAPI \ + __stdcall +#define CUDARTAPI_CDECL \ + __cdecl + +#else /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */ + +#define __inline__ + +#if !defined(__align__) + +#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for '__align__' !!! --- + +#endif /* !__align__ */ + +#if !defined(CUDARTAPI) + +#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for 'CUDARTAPI' !!! --- + +#endif /* !CUDARTAPI */ + +#endif /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */ + +#if (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !defined(__clang__)))) || \ + (defined(_MSC_VER) && _MSC_VER < 1900) || \ + (!defined(__GNUC__) && !defined(_MSC_VER)) + +#define __specialization_static \ + static + +#else /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) || + (_MSC_VER && _MSC_VER < 1900) || + (!__GNUC__ && !_MSC_VER) */ + +#define __specialization_static + +#endif /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) || + (_MSC_VER && _MSC_VER < 1900) || + (!__GNUC__ && !_MSC_VER) */ + +#if !defined(__CUDACC__) && !defined(__CUDA_LIBDEVICE__) + +#undef __annotate__ +#define __annotate__(a) + +#else /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */ + +#define __launch_bounds__(...) \ + __annotate__(launch_bounds(__VA_ARGS__)) + +#endif /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */ + +#if defined(__CUDACC__) || defined(__CUDA_LIBDEVICE__) || \ + defined(__GNUC__) || defined(_WIN64) + +#define __builtin_align__(a) \ + __align__(a) + +#else /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */ + +#define __builtin_align__(a) + +#endif /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */ + +#if defined(__CUDACC__) || !defined(__grid_constant__) +#define __grid_constant__ \ + __location__(grid_constant) +#endif /* defined(__CUDACC__) || !defined(__grid_constant__) */ + +#if defined(__CUDACC__) || !defined(__host__) +#define __host__ \ + __location__(host) +#endif /* defined(__CUDACC__) || !defined(__host__) */ +#if defined(__CUDACC__) || !defined(__device__) +#define __device__ \ + __location__(device) +#endif /* defined(__CUDACC__) || !defined(__device__) */ +#if defined(__CUDACC__) || !defined(__global__) +#define __global__ \ + __location__(global) +#endif /* defined(__CUDACC__) || !defined(__global__) */ +#if defined(__CUDACC__) || !defined(__shared__) +#define __shared__ \ + __location__(shared) +#endif /* defined(__CUDACC__) || !defined(__shared__) */ +#if defined(__CUDACC__) || !defined(__constant__) +#define __constant__ \ + __location__(constant) +#endif /* defined(__CUDACC__) || !defined(__constant__) */ +#if defined(__CUDACC__) || !defined(__managed__) +#define __managed__ \ + __location__(managed) +#endif /* defined(__CUDACC__) || !defined(__managed__) */ + +#if !defined(__CUDACC__) +#define __device_builtin__ +#define __device_builtin_texture_type__ +#define __device_builtin_surface_type__ +#define __cudart_builtin__ +#else /* defined(__CUDACC__) */ +#define __device_builtin__ \ + __location__(device_builtin) +#define __device_builtin_texture_type__ \ + __location__(device_builtin_texture_type) +#define __device_builtin_surface_type__ \ + __location__(device_builtin_surface_type) +#define __cudart_builtin__ \ + __location__(cudart_builtin) +#endif /* !defined(__CUDACC__) */ + +#if defined(__CUDACC__) || !defined(__cluster_dims__) +#if defined(_MSC_VER) +#define __cluster_dims__(...) \ + __declspec(__cluster_dims__(__VA_ARGS__)) + +#else /* !defined(_MSC_VER) */ +#define __cluster_dims__(...) \ + __attribute__((cluster_dims(__VA_ARGS__))) +#endif /* defined(_MSC_VER) */ +#endif /* defined(__CUDACC__) || !defined(__cluster_dims__) */ + +#define __CUDA_ARCH_HAS_FEATURE__(_FEAT) __CUDA_ARCH_FEAT_##_FEAT + +#endif /* !__HOST_DEFINES_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__ +#endif diff --git a/miniCUDA124/include/crt/host_runtime.h b/miniCUDA124/include/crt/host_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..fe64f5d2ac28d6967fa77b2b2793819173d279d8 --- /dev/null +++ b/miniCUDA124/include/crt/host_runtime.h @@ -0,0 +1,306 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2023, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__ +#endif + +#if !defined(__CUDA_INTERNAL_COMPILATION__) + +#define __CUDA_INTERNAL_COMPILATION__ +#define __text__ +#define __surf__ +#define __name__shadow_var(c, cpp) \ + #c +#define __name__text_var(c, cpp) \ + #cpp +#define __host__shadow_var(c, cpp) \ + cpp +#define __text_var(c, cpp) \ + cpp +#define __device_fun(fun) \ + #fun +#define __device_var(var) \ + #var +#define __device__text_var(c, cpp) \ + #c +#define __device__shadow_var(c, cpp) \ + #c + +#if defined(_WIN32) && !defined(_WIN64) + +#define __pad__(f) \ + f + +#else /* _WIN32 && !_WIN64 */ + +#define __pad__(f) + +#endif /* _WIN32 && !_WIN64 */ + +#include "builtin_types.h" +#include "storage_class.h" + +#else /* !__CUDA_INTERNAL_COMPILATION__ */ + +template +static inline T *__cudaAddressOf(T &val) +{ + return (T *)((void *)(&(const_cast(reinterpret_cast(val))))); +} + +#define __cudaRegisterBinary(X) \ + __cudaFatCubinHandle = __cudaRegisterFatBinary((void*)&__fatDeviceText); \ + { void (*callback_fp)(void **) = (void (*)(void **))(X); (*callback_fp)(__cudaFatCubinHandle); __cudaRegisterFatBinaryEnd(__cudaFatCubinHandle); }\ + atexit(__cudaUnregisterBinaryUtil) + +#define __cudaRegisterVariable(handle, var, ext, size, constant, global) \ + __cudaRegisterVar(handle, (char*)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global) +#define __cudaRegisterManagedVariable(handle, var, ext, size, constant, global) \ + __cudaRegisterManagedVar(handle, (void **)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global) + +#define __cudaRegisterGlobalTexture(handle, tex, dim, norm, ext) \ + __cudaRegisterTexture(handle, (const struct textureReference*)&tex, (const void**)(void*)__device##tex, __name##tex, dim, norm, ext) +#define __cudaRegisterGlobalSurface(handle, surf, dim, ext) \ + __cudaRegisterSurface(handle, (const struct surfaceReference*)&surf, (const void**)(void*)__device##surf, __name##surf, dim, ext) +#define __cudaRegisterEntry(handle, funptr, fun, thread_limit) \ + __cudaRegisterFunction(handle, (const char*)funptr, (char*)__device_fun(fun), #fun, -1, (uint3*)0, (uint3*)0, (dim3*)0, (dim3*)0, (int*)0) + +extern "C" cudaError_t CUDARTAPI __cudaPopCallConfiguration( + dim3 *gridDim, + dim3 *blockDim, + size_t *sharedMem, + void *stream +); + +#define __cudaLaunchPrologue(size) \ + void * __args_arr[size]; \ + int __args_idx = 0 + +#define __cudaSetupArg(arg, offset) \ + __args_arr[__args_idx] = (void *)__cudaAddressOf(arg); ++__args_idx + +#define __cudaSetupArgSimple(arg, offset) \ + __args_arr[__args_idx] = (void *)(char *)&arg; ++__args_idx + +#if defined(__GNUC__) +#define __NV_ATTR_UNUSED_FOR_LAUNCH __attribute__((unused)) +#else /* !__GNUC__ */ +#define __NV_ATTR_UNUSED_FOR_LAUNCH +#endif /* __GNUC__ */ + +#ifdef __NV_LEGACY_LAUNCH +/* the use of __args_idx in the expression below avoids host compiler warning about it being an + unused variable when the launch has no arguments */ +#define __cudaLaunch(fun) \ + { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \ + dim3 __gridDim, __blockDim;\ + size_t __sharedMem; \ + cudaStream_t __stream; \ + if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \ + return; \ + if (__args_idx == 0) {\ + (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\ + } else { \ + (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\ + }\ + } +#else /* !__NV_LEGACY_LAUNCH */ +#define __cudaLaunch(fun) \ + { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \ + static cudaKernel_t __handle = 0; \ + volatile static bool __tmp __NV_ATTR_UNUSED_FOR_LAUNCH = (__cudaGetKernel(&__handle, (const void *)fun) == cudaSuccess); \ + dim3 __gridDim, __blockDim;\ + size_t __sharedMem; \ + cudaStream_t __stream; \ + if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \ + return; \ + if (__args_idx == 0) {\ + (void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\ + } else { \ + (void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\ + }\ + } +#endif /* __NV_LEGACY_LAUNCH */ + +#if defined(__GNUC__) +#define __nv_dummy_param_ref(param) \ + { volatile static void **__ref __attribute__((unused)); __ref = (volatile void **)param; } +#else /* __GNUC__ */ +#define __nv_dummy_param_ref(param) \ + { volatile static void **__ref; __ref = (volatile void **)param; } +#endif /* __GNUC__ */ + +static void ____nv_dummy_param_ref(void *param) __nv_dummy_param_ref(param) + +#define __REGISTERFUNCNAME_CORE(X) __cudaRegisterLinkedBinary##X +#define __REGISTERFUNCNAME(X) __REGISTERFUNCNAME_CORE(X) + +extern "C" { +void __REGISTERFUNCNAME( __NV_MODULE_ID ) ( void (*)(void **), void *, void *, void (*)(void *)); +} + +#define __TO_STRING_CORE(X) #X +#define __TO_STRING(X) __TO_STRING_CORE(X) + +extern "C" { +#if defined(_WIN32) +#pragma data_seg("__nv_module_id") + static const __declspec(allocate("__nv_module_id")) unsigned char __module_id_str[] = __TO_STRING(__NV_MODULE_ID); +#pragma data_seg() +#elif defined(__APPLE__) + static const unsigned char __module_id_str[] __attribute__((section ("__NV_CUDA,__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID); +#else + static const unsigned char __module_id_str[] __attribute__((section ("__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID); +#endif + +#undef __FATIDNAME_CORE +#undef __FATIDNAME +#define __FATIDNAME_CORE(X) __fatbinwrap##X +#define __FATIDNAME(X) __FATIDNAME_CORE(X) + +#define ____cudaRegisterLinkedBinary(X) \ +{ __REGISTERFUNCNAME(__NV_MODULE_ID) (( void (*)(void **))(X), (void *)&__FATIDNAME(__NV_MODULE_ID), (void *)&__module_id_str, (void (*)(void *))&____nv_dummy_param_ref); } + +} + +extern "C" { +extern void** CUDARTAPI __cudaRegisterFatBinary( + void *fatCubin +); + +extern void CUDARTAPI __cudaRegisterFatBinaryEnd( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaUnregisterFatBinary( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaRegisterVar( + void **fatCubinHandle, + char *hostVar, + char *deviceAddress, + const char *deviceName, + int ext, + size_t size, + int constant, + int global +); + +extern void CUDARTAPI __cudaRegisterManagedVar( + void **fatCubinHandle, + void **hostVarPtrAddress, + char *deviceAddress, + const char *deviceName, + int ext, + size_t size, + int constant, + int global +); + +extern char CUDARTAPI __cudaInitModule( + void **fatCubinHandle +); + +extern void CUDARTAPI __cudaRegisterTexture( + void **fatCubinHandle, + const struct textureReference *hostVar, + const void **deviceAddress, + const char *deviceName, + int dim, + int norm, + int ext +); + +extern void CUDARTAPI __cudaRegisterSurface( + void **fatCubinHandle, + const struct surfaceReference *hostVar, + const void **deviceAddress, + const char *deviceName, + int dim, + int ext +); + +extern void CUDARTAPI __cudaRegisterFunction( + void **fatCubinHandle, + const char *hostFun, + char *deviceFun, + const char *deviceName, + int thread_limit, + uint3 *tid, + uint3 *bid, + dim3 *bDim, + dim3 *gDim, + int *wSize +); + +#if defined(__APPLE__) +extern "C" int atexit(void (*)(void)); + +#elif defined(__GNUC__) && !defined(__ANDROID__) && !defined(__HORIZON__) +extern int atexit(void(*)(void)) throw(); + +#elif defined(__HORIZON__) + +// __TEMP_WAR__ 200132570 HOS : Disable atexit call until it works +#define atexit(p) + +#else /* __GNUC__ && !__ANDROID__ */ +extern int __cdecl atexit(void(__cdecl *)(void)); +#endif + +} + +static void **__cudaFatCubinHandle; + +static void __cdecl __cudaUnregisterBinaryUtil(void) +{ + ____nv_dummy_param_ref((void *)&__cudaFatCubinHandle); + __cudaUnregisterFatBinary(__cudaFatCubinHandle); +} + +static char __nv_init_managed_rt_with_module(void **handle) +{ + return __cudaInitModule(handle); +} + +#include "common_functions.h" + +#pragma pack() + +#if defined(_WIN32) + +#pragma warning(disable: 4099) + +#if !defined(_WIN64) + +#pragma warning(disable: 4408) + +#endif /* !_WIN64 */ + +#endif /* _WIN32 */ + +#endif /* !__CUDA_INTERNAL_COMPILATION__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__ +#endif diff --git a/miniCUDA124/include/crt/math_functions.h b/miniCUDA124/include/crt/math_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7d0a783afa1feb1a7b44162099c273cbbbb6f6 --- /dev/null +++ b/miniCUDA124/include/crt/math_functions.h @@ -0,0 +1,12208 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/math_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/math_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__ +#endif + +#if !defined(__MATH_FUNCTIONS_H__) +#define __MATH_FUNCTIONS_H__ + +#if defined(__QNX__) && (__GNUC__ >= 5) && defined(__CUDACC__) +#if __has_include(<__config>) +#include <__config> +#endif +#endif + +/** + * \defgroup CUDA_MATH Mathematical Functions + * + * CUDA mathematical functions are always available in device code. + * + * Host implementations of the common mathematical functions are mapped + * in a platform-specific way to standard math library functions, provided + * by the host compiler and respective host libm where available. + * Some functions, not available with the host compilers, are implemented + * in crt/math_functions.hpp header file. + * For example, see ::erfinv(). Other, less common functions, + * like ::rhypot(), ::cyl_bessel_i0() are only available in device code. + * + * Note that many floating-point and integer functions names are + * overloaded for different argument types. For example, the ::log() + * function has the following prototypes: + * \code + * double log(double x); + * float log(float x); + * float logf(float x); + * \endcode + * + * Note also that due to implementation constraints, certain math functions + * from std:: namespace may be callable in device code even via explicitly + * qualified std:: names. However, such use is discouraged, since this + * capability is unsupported, unverified, undocumented, not portable, and + * may change without notice. + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "host_defines.h" + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +extern "C" +{ + +/* Define math function DOXYGEN toplevel groups, functions will + be added to these groups later. +*/ +/** + * \defgroup CUDA_MATH_SINGLE Single Precision Mathematical Functions + * This section describes single precision mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_DOUBLE Double Precision Mathematical Functions + * This section describes double precision mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INT Integer Mathematical Functions + * This section describes integer mathematical functions. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_SINGLE Single Precision Intrinsics + * This section describes single precision intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_DOUBLE Double Precision Intrinsics + * This section describes double precision intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_INT Integer Intrinsics + * This section describes integer intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * \defgroup CUDA_MATH_INTRINSIC_CAST Type Casting Intrinsics + * This section describes type casting intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + +/** + * + * \defgroup CUDA_MATH_INTRINSIC_SIMD SIMD Intrinsics + * This section describes SIMD intrinsic functions that are + * only supported in device code. + * To use these functions you do not need to include any additional + * header files in your program. + */ + + +/** + * @} + */ +#define __DEVICE_FUNCTIONS_DECL__ __host__ __device__ +#if !defined(_MSC_VER) +#define __CUDA_MATH_CRTIMP +#else +#if _MSC_VER < 1900 +#define __CUDA_MATH_CRTIMP _CRTIMP +#else +#define __CUDA_MATH_CRTIMP _ACRTIMP +#endif +#endif + +#if defined(__ANDROID__) && (__ANDROID_API__ <= 20) && !defined(__aarch64__) +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ int abs(int); +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long int labs(long int); +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long long int llabs(long long int); +#else /* __ANDROID__ */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ int __cdecl abs(int a) __THROW; +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p long \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long int __cdecl labs(long int a) __THROW; +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the absolute value of the input \p long \p long \p int argument. + * + * Calculate the absolute value of the input argument \p a. + * + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ long long int llabs(long long int a) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} +#endif +#endif /* __ANDROID__ */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +/* put all math functions in std */ +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the absolute value of the input argument. + * + * Calculate the absolute value of the input argument \p x. + * + * \return + * Returns the absolute value of the input argument. + * - fabs( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fabs( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns +0. + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl fabs(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the absolute value of its argument + * + * Calculate the absolute value of the input argument \p x. + * + * \return + * Returns the absolute value of its argument. + * - fabsf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fabsf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns +0. + * - fabsf(NaN) returns an unspecified NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fabsf(float x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int min(const int a, const int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int umin(const unsigned int a, const unsigned int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llmin(const long long int a, const long long int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int ullmin(const unsigned long long int a, const unsigned long long int b); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine the minimum numeric value of the arguments. + * + * Determines the minimum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the minimum numeric value of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fminf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fminf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Determine the minimum numeric value of the arguments. + * + * Determines the minimum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the minimum numeric value of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fmin(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fmin(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int max(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int umax(const unsigned int a, const unsigned int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llmax(const long long int a, const long long int b); +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int ullmax(const unsigned long long int a, const unsigned long long int b); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine the maximum numeric value of the arguments. + * + * Determines the maximum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the maximum numeric values of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmaxf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fmaxf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Determine the maximum numeric value of the arguments. + * + * Determines the maximum numeric value of the arguments \p x and \p y. Treats NaN + * arguments as missing data. If one argument is a NaN and the other is legitimate numeric + * value, the numeric value is chosen. + * + * \return + * Returns the maximum numeric values of the arguments \p x and \p y. + * - If both arguments are NaN, returns NaN. + * - If one argument is NaN, returns the numeric argument. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fmax(double, double) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fmax(double, double); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine of the input argument. + * + * Calculate the sine of the input argument \p x (measured in radians). + * + * \return + * - sin( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sin( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sin(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cosine of the input argument. + * + * Calculate the cosine of the input argument \p x (measured in radians). + * + * \return + * - cos( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cos( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl cos(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine and cosine of the first input argument. + * + * Calculate the sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sin() and ::cos(). + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincos(double x, double *sptr, double *cptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine and cosine of the first input argument. + * + * Calculate the sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the second + * argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinf() and ::cosf(). + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincosf(float x, float *sptr, float *cptr) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the tangent of the input argument. + * + * Calculate the tangent of the input argument \p x (measured in radians). + * + * \return + * - tan( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tan( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl tan(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the input argument. + * + * Calculate the nonnegative square root of \p x, + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * - sqrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sqrt( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - sqrt(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sqrt(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the reciprocal of the square root of the input argument. + * + * Calculate the reciprocal of the nonnegative square root of \p x, + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * - rsqrt( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - rsqrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rsqrt(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rsqrt(double x); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the reciprocal of the square root of the input argument. + * + * Calculate the reciprocal of the nonnegative square root of \p x, + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * - rsqrtf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - rsqrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rsqrtf(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rsqrtf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 2 logarithm of the input argument. + * + * Calculate the base 2 logarithm of the input argument \p x. + * + * \return + * - log2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log2(1) returns +0. + * - log2(\p x) returns NaN for \p x < 0. + * - log2( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double log2(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl log2(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 2 exponential of the input argument. + * + * Calculate + * \latexonly $2^x$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * x + * + * + * \endxmlonly, + * the base 2 exponential of the input argument \p x. + * + * \return + * - exp2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp2( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp2( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double exp2(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl exp2(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 2 exponential of the input argument. + * + * Calculate + * \latexonly $2^x$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * x + * + * + * \endxmlonly, + * the base 2 exponential of the input argument \p x. + * + * \return + * - exp2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp2f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp2f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float exp2f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl exp2f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 10 exponential of the input argument. + * + * Calculate + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly, + * the base 10 exponential of the input argument \p x. + * + * \return + * - exp10( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp10( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp10( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double exp10(double x) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 10 exponential of the input argument. + * + * Calculate + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly, + * the base 10 exponential of the input argument \p x. + * + * \return + * - exp10f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp10f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp10f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float exp10f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument, minus 1. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly + * -1, the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, minus 1. + * + * \return + * - expm1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - expm1( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns -1. + * - expm1( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double expm1(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl expm1(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument, minus 1. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly + * -1, the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, minus 1. + * + * \return + * - expm1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - expm1f( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns -1. + * - expm1f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float expm1f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl expm1f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 2 logarithm of the input argument. + * + * Calculate the base 2 logarithm of the input argument \p x. + * + * \return + * - log2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log2f(1) returns +0. + * - log2f(\p x) returns NaN for \p x < 0. + * - log2f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log2f(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl log2f(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base 10 logarithm of the input argument. + * + * Calculate the base 10 logarithm of the input argument \p x. + * + * \return + * - log10( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log10(1) returns +0. + * - log10(\p x) returns NaN for \p x < 0. + * - log10( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl log10(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument. + * + * Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument \p x. + * + * \return + * - log( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log(1) returns +0. + * - log(\p x) returns NaN for \p x < 0. + * - log( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl log(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly + * of the input argument \p x. + * + * \return + * - log1p( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - log1p(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log1p(\p x) returns NaN for \p x < -1. + * - log1p( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double log1p(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl log1p(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $\log_{e}(1+x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * ( + * 1 + * + + * x + * ) + * + * \endxmlonly + * of the input argument \p x. + * + * \return + * - log1pf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - log1pf(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log1pf(\p x) returns NaN for \p x < -1. + * - log1pf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log1pf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl log1pf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the largest integer less than or equal to \p x. + * + * Calculates the largest integer value which is less than or equal to \p x. + * + * \return + * Returns + * \latexonly $\lfloor x \rfloor$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - floor( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - floor( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl floor(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly, + * the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x. + * + * \return + * - exp( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - exp( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - exp( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl exp(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic cosine of the input argument. + * + * Calculate the hyperbolic cosine of the input argument \p x. + * + * \return + * - cosh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cosh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl cosh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic sine of the input argument. + * + * Calculate the hyperbolic sine of the input argument \p x. + * + * \return + * - sinh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl sinh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the hyperbolic tangent of the input argument. + * + * Calculate the hyperbolic tangent of the input argument \p x. + * + * \return + * - tanh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl tanh(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the nonnegative inverse hyperbolic cosine of the input argument. + * + * Calculate the nonnegative inverse hyperbolic cosine of the input argument \p x. + * + * \return + * Result will be in the interval [0, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ]. + * - acosh(1) returns 0. + * - acosh(\p x) returns NaN for \p x in the interval [ + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , 1). + * - acosh( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double acosh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl acosh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the nonnegative inverse hyperbolic cosine of the input argument. + * + * Calculate the nonnegative inverse hyperbolic cosine of the input argument \p x. + * + * \return + * Result will be in the interval [0, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ]. + * - acoshf(1) returns 0. + * - acoshf(\p x) returns NaN for \p x in the interval [ + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , 1). + * - acoshf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float acoshf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl acoshf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse hyperbolic sine of the input argument. + * + * Calculate the inverse hyperbolic sine of the input argument \p x. + * + * \return + * - asinh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - asinh( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double asinh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl asinh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse hyperbolic sine of the input argument. + * + * Calculate the inverse hyperbolic sine of the input argument \p x. + * + * \return + * - asinhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - asinhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float asinhf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl asinhf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse hyperbolic tangent of the input argument. + * + * Calculate the inverse hyperbolic tangent of the input argument \p x. + * + * \return + * - atanh( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - atanh( + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - atanh(\p x) returns NaN for \p x outside interval [-1, 1]. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double atanh(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl atanh(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse hyperbolic tangent of the input argument. + * + * Calculate the inverse hyperbolic tangent of the input argument \p x. + * + * \return + * - atanhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - atanhf( + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - atanhf(\p x) returns NaN for \p x outside interval [-1, 1]. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atanhf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl atanhf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * + * \endxmlonly + * of the input arguments \p x and \p exp. + * + * \return + * - ldexp(\p x, \p exp) is equivalent to scalbn(\p x, \p exp). + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl ldexp(double x, int exp) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * \endxmlonly. + * + * Calculate the value of + * \latexonly $x\cdot 2^{exp}$ \endlatexonly + * \xmlonly + * + * + * x + * + * + * 2 + * + * e + * x + * p + * + * + * + * + * \endxmlonly + * of the input arguments \p x and \p exp. + * + * \return + * - ldexpf(\p x, \p exp) is equivalent to scalbnf(\p x, \p exp). + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ldexpf(float x, int exp) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the floating-point representation of the exponent of the input argument. + * + * Calculate the floating-point representation of the exponent of the input argument \p x. + * + * \return + * - logb( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly. + * - logb( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double logb(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl logb(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the floating-point representation of the exponent of the input argument. + * + * Calculate the floating-point representation of the exponent of the input argument \p x. + * + * \return + * - logbf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly. + * - logbf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float logbf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl logbf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute the unbiased integer exponent of the argument. + * + * Calculates the unbiased integer exponent of the input argument \p x. + * + * \return + * - If successful, returns the unbiased exponent of the argument. + * - ilogb( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns INT_MIN. + * - ilogb(NaN) returns INT_MIN. + * - ilogb( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns INT_MAX. + * - Note: above behavior does not take into account FP_ILOGB0 nor FP_ILOGBNAN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int ilogb(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP int __cdecl ilogb(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute the unbiased integer exponent of the argument. + * + * Calculates the unbiased integer exponent of the input argument \p x. + * + * \return + * - If successful, returns the unbiased exponent of the argument. + * - ilogbf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns INT_MIN. + * - ilogbf(NaN) returns INT_MIN. + * - ilogbf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns INT_MAX. + * - Note: above behavior does not take into account FP_ILOGB0 nor FP_ILOGBNAN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int ilogbf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP int __cdecl ilogbf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbn( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbn(\p x, 0) returns \p x. + * - scalbn( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double scalbn(double x, int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl scalbn(double x, int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbnf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbnf(\p x, 0) returns \p x. + * - scalbnf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float scalbnf(float x, int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl scalbnf(float x, int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalbln( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalbln(\p x, 0) returns \p x. + * - scalbln( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double scalbln(double x, long int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl scalbln(double x, long int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Scale floating-point input by integer power of two. + * + * Scale \p x by + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * + * \endxmlonly + * by efficient manipulation of the floating-point + * exponent. + * + * \return + * Returns \p x * + * \latexonly $2^n$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * n + * + * + * \endxmlonly. + * - scalblnf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - scalblnf(\p x, 0) returns \p x. + * - scalblnf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p n) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float scalblnf(float x, long int n) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl scalblnf(float x, long int n); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Extract mantissa and exponent of a floating-point value + * + * Decompose the floating-point value \p x into a component \p m for the + * normalized fraction element and another term \p n for the exponent. + * The absolute value of \p m will be greater than or equal to 0.5 and + * less than 1.0 or it will be equal to 0; + * \latexonly $x = m\cdot 2^n$ \endlatexonly + * \xmlonly + * + * + * x + * = + * m + * + * + * 2 + * n + * + * + * \endxmlonly. + * The integer exponent \p n will be stored in the location to which \p nptr points. + * + * \return + * Returns the fractional component \p m. + * - frexp( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores zero in the location pointed to by \p nptr. + * - frexp( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * and stores an unspecified value in the + * location to which \p nptr points. + * - frexp(NaN, \p y) returns a NaN and stores an unspecified value in the location to which \p nptr points. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl frexp(double x, int *nptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Extract mantissa and exponent of a floating-point value + * + * Decomposes the floating-point value \p x into a component \p m for the + * normalized fraction element and another term \p n for the exponent. + * The absolute value of \p m will be greater than or equal to 0.5 and + * less than 1.0 or it will be equal to 0; + * \latexonly $x = m\cdot 2^n$ \endlatexonly + * \xmlonly + * + * + * x + * = + * m + * + * + * 2 + * n + * + * + * \endxmlonly. + * The integer exponent \p n will be stored in the location to which \p nptr points. + * + * \return + * Returns the fractional component \p m. + * - frexpf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores zero in the location pointed to by \p nptr. + * - frexpf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p nptr) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * and stores an unspecified value in the + * location to which \p nptr points. + * - frexpf(NaN, \p y) returns a NaN and stores an unspecified value in the location to which \p nptr points. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float frexpf(float x, int *nptr) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded away from zero. + * + * \return + * Returns rounded integer value. + * - round( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - round( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_slow_round See ::rint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double round(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl round(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded away from zero. + * + * \return + * Returns rounded integer value. + * - roundf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - roundf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_slow_round See ::rintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float roundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl roundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::lrint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lround(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lround(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::lrintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lroundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lroundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::llrint(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llround(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llround(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round to nearest integer value. + * + * Round \p x to the nearest integer value, with halfway cases rounded + * away from zero. If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + * + * \note_slow_round See ::llrintf(). + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llroundf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llroundf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded to the nearest even integer value. + * + * \return + * Returns rounded integer value. + * - rint( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - rint( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if defined(__CUDA_ARCH__) || defined(__DOXYGEN_ONLY__) +/* + * We don't generate the declaration of rint for host compilation. + * This is acaully a workaround to compile the boost header file when + * Clang 3.8 is used as the host compiler. The boost header file has + * the following example code: + * namespace NS { extern "C" { double rint(double); } + * } + * + * After preprocessing, we get something like below: + * + * extern "C" { double rint(double x) throw(); } + * # 30 "/usr/include/math.h" 3 + * extern "C" { double rint(double x) throw(); } + * namespace NS { extern "C" { double rint(double); } } + * + * Although GCC accepts this output, Clang 3.8 doesn't. + * Furthermore, we cannot change the boost header file by adding "throw()" + * to rint's declaration there. So, as a workaround, we just don't generate + * our re-declaration for the host compilation. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl rint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#endif /* __CUDA_ARCH__ || __DOXYGEN_ONLY__ */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value in floating-point. + * + * Round \p x to the nearest integer value in floating-point format, + * with halfway cases rounded to the nearest even integer value. + * + * \return + * Returns rounded integer value. + * - rintf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - rintf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl rintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lrint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lrint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long int lrintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long int __cdecl lrintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llrint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llrint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round input to nearest integer value. + * + * Round \p x to the nearest integer value, + * with halfway cases rounded to the nearest even integer value. + * If the result is outside the range of the return type, + * the behavior is undefined. + * + * \return + * Returns rounded integer value. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int llrintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP long long int __cdecl llrintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Round the input argument to the nearest integer. + * + * Round argument \p x to an integer value in double precision floating-point format. Uses round to nearest rounding, with ties rounding to even. + * + * \return + * - nearbyint( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - nearbyint( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nearbyint(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nearbyint(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Round the input argument to the nearest integer. + * + * Round argument \p x to an integer value in single precision floating-point format. Uses round to nearest rounding, with ties rounding to even. + * + * \return + * - nearbyintf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - nearbyintf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nearbyintf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nearbyintf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate ceiling of the input argument. + * + * Compute the smallest integer value not less than \p x. + * + * \return + * Returns + * \latexonly $\lceil x \rceil$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + expressed as a floating-point number. + * - ceil( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - ceil( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl ceil(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Truncate input argument to the integral part. + * + * Round \p x to the nearest integer value that does not exceed \p x in + * magnitude. + * + * \return + * Returns truncated integer value. + * - trunc( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - trunc( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double trunc(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl trunc(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Truncate input argument to the integral part. + * + * Round \p x to the nearest integer value that does not exceed \p x in + * magnitude. + * + * \return + * Returns truncated integer value. + * - truncf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - truncf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float truncf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl truncf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute the positive difference between \p x and \p y. + * + * Compute the positive difference between \p x and \p y. The positive + * difference is \p x - \p y when \p x > \p y and +0 otherwise. + * + * \return + * Returns the positive difference between \p x and \p y. + * - fdim(\p x, \p y) returns \p x - \p y if \p x > \p y. + * - fdim(\p x, \p y) returns +0 if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly \p y. + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fdim(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fdim(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute the positive difference between \p x and \p y. + * + * Compute the positive difference between \p x and \p y. The positive + * difference is \p x - \p y when \p x > \p y and +0 otherwise. + * + * \return + * Returns the positive difference between \p x and \p y. + * - fdimf(\p x, \p y) returns \p x - \p y if \p x > \p y. + * - fdimf(\p x, \p y) returns +0 if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly \p y. + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fdimf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fdimf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc tangent of the ratio of first and second input arguments. + * + * Calculate the principal value of the arc tangent of the ratio of first + * and second input arguments \p y / \p x. The quadrant of the result is + * determined by the signs of inputs \p y and \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * , + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ]. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , -0) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , +0) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for \p x < 0. + * - atan2( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p x > 0. + * - atan2(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\pi$ \endlatexonly + * \xmlonly + * + * + * - + * π + * + * + * \endxmlonly + * /2 for \p y < 0. + * - atan2(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2 for \p y > 0. + * - atan2( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2 for finite \p x. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 3\pi$ \endlatexonly + * \xmlonly + * + * + * ± + * 3 + * π + * + * + * \endxmlonly + * /4. + * - atan2( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /4. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl atan2(double y, double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc tangent of the input argument. + * + * Calculate the principal value of the arc tangent of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2, + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2]. + * - atan( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl atan(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc cosine of the input argument. + * + * Calculate the principal value of the arc cosine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [0, + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - acos(1) returns +0. + * - acos(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl acos(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the arc sine of the input argument. + * + * Calculate the principal value of the arc sine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2, + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2] for \p x inside [-1, +1]. + * - asin( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - asin(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl asin(double x) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of two arguments. + * + * Calculate the length of the hypotenuse of a right triangle whose two sides have lengths + * \p x and \p y without undue overflow or underflow. + * + * \return Returns the length of the hypotenuse + * \latexonly $\sqrt{x^2+y^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * \endxmlonly. + * - hypot(\p x,\p y), hypot(\p y,\p x), and hypot(\p x, \p -y) are equivalent. + * - hypot(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) is equivalent to fabs(\p x). + * - hypot( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly, + * even if \p y is a NaN. + * + * \note_accuracy_double + */ +#if defined(_WIN32) +#if defined(_MSC_VER) && _MSC_VER < 1900 +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __CRTDECL hypot(double x, double y); +#else +extern _ACRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl hypot(double x, double y); +#endif +#else /* _WIN32 */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double hypot(double x, double y) __THROW; +#endif /* _WIN32 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of two arguments. + * + * Calculate one over the length of the hypotenuse of a right triangle whose two sides have + * lengths \p x and \p y without undue overflow or underflow. + * + * \return Returns one over the length of the hypotenuse + * \latexonly $\frac{1}{\sqrt{x^2+y^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * + * + * \endxmlonly. + * - rhypot(\p x,\p y), rhypot(\p y,\p x), and rhypot(\p x, \p -y) are equivalent. + * - rhypot( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns +0, + * even if \p y is a NaN. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rhypot(double x, double y) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of two arguments. + * + * Calculates the length of the hypotenuse of a right triangle whose two sides have lengths + * \p x and \p y without undue overflow or underflow. + * + * \return Returns the length of the hypotenuse + * \latexonly $\sqrt{x^2+y^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * \endxmlonly. + * - hypotf(\p x,\p y), hypotf(\p y,\p x), and hypotf(\p x, \p -y) are equivalent. + * - hypotf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) is equivalent to fabsf(\p x). + * - hypotf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly, + * even if \p y is a NaN. + * + * \note_accuracy_single + */ +#if defined(_WIN32) +static __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __CRTDECL hypotf(float x, float y); +#else /* _WIN32 */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float hypotf(float x, float y) __THROW; +#endif /* _WIN32 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of two arguments. + * + * Calculates one over the length of the hypotenuse of a right triangle whose two sides have + * lengths \p x and \p y without undue overflow or underflow. + * + * \return Returns one over the length of the hypotenuse + * \latexonly $\frac{1}{\sqrt{x^2+y^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * x + * 2 + * + * + + * + * y + * 2 + * + * + * + * + * + * \endxmlonly. + * - rhypotf(\p x,\p y), rhypotf(\p y,\p x), and rhypotf(\p x, \p -y) are equivalent. + * - rhypotf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ,\p y) returns +0, + * even if \p y is a NaN. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rhypotf(float x, float y) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of three coordinates of the argument. + * + * Calculate the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of 3D vector + * \latexonly $\sqrt{a^2+b^2+c^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl norm3d(double a, double b, double c) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of three coordinates. + * + * Calculate one over the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm3d(double a, double b, double c) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of four coordinates of the argument. + * + * Calculate the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of 4D vector + * \latexonly $\sqrt{a^2+b^2+c^2+d^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl norm4d(double a, double b, double c, double d) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate one over the square root of the sum of squares of four coordinates. + * + * Calculate one over the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2+d^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm4d(double a, double b, double c, double d) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the square root of the sum of squares of any number of coordinates. + * + * Calculate the length of a vector p, dimension of which is passed as an argument \p without undue overflow or underflow. + * + * \return Returns the length of the dim-D vector + * \latexonly $\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ + __device__ __device_builtin__ double norm(int dim, double const * p) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the reciprocal of square root of the sum of squares of any number of coordinates. + * + * Calculates one over the length of vector \p p, dimension of which is passed as an argument, in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the vector + * \latexonly $\frac{1}{\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ double rnorm(int dim, double const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the reciprocal of square root of the sum of squares of any number of coordinates. + * + * Calculates one over the length of vector \p p, dimension of which is passed as an argument, in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the vector + * \latexonly $\frac{1}{\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + +extern __device__ __device_builtin__ float rnormf(int dim, float const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of any number of coordinates. + * + * Calculates the length of a vector \p p, dimension of which is passed as an argument without undue overflow or underflow. + * + * \return Returns the length of the dim-D vector + * \latexonly $\sqrt{\sum_{i=0}^{dim-1} p_i^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * + * p + * 0 + * + * 2 + * + * + + * + * + * p + * 1 + * + * 2 + * + * + ... + + * + * + * p + * + * dim + * - + * 1 + * + * + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + __device__ __device_builtin__ float normf(int dim, float const * p) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of three coordinates of the argument. + * + * Calculates the length of three dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of the 3D vector + * \latexonly $\sqrt{a^2+b^2+c^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ + +extern __device__ __device_builtin__ float norm3df(float a, float b, float c) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of three coordinates. + * + * Calculates one over the length of three dimension vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rnorm3df(float a, float b, float c) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the sum of squares of four coordinates of the argument. + * + * Calculates the length of four dimensional vector in Euclidean space without undue overflow or underflow. + * + * \return Returns the length of the 4D vector + * \latexonly $\sqrt{a^2+b^2+c^2+d^2}$ \endlatexonly + * \xmlonly + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float norm4df(float a, float b, float c, float d) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate one over the square root of the sum of squares of four coordinates. + * + * Calculates one over the length of four dimension vector in Euclidean space without undue overflow or underflow. + * + * \return Returns one over the length of the 3D vector + * \latexonly $\frac{1}{\sqrt{a^2+b^2+c^2+d^2}}$ \endlatexonly + * \xmlonly + * + * + * + * + * 1 + * + * + * + * + * a + * 2 + * + * + + * + * b + * 2 + * + * + + * + * c + * 2 + * + * + + * + * d + * 2 + * + * + * + * + * + * \endxmlonly. + * - In the presence of an exactly infinite coordinate + * \latexonly $+0$ \endlatexonly + * \xmlonly + * + * + * + + * 0 + * + * \endxmlonly + * is returned, even if there are NaNs. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float rnorm4df(float a, float b, float c, float d) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cube root of the input argument. + * + * Calculate the cube root of \p x, + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * - cbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - cbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double cbrt(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cbrt(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cube root of the input argument. + * + * Calculate the cube root of \p x, + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $x^{1/3}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * 1 + * + * / + * + * 3 + * + * + * + * \endxmlonly. + * - cbrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - cbrtf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cbrtf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl cbrtf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate reciprocal cube root function. + * + * Calculate reciprocal cube root function of \p x. + * + * \return + * - rcbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rcbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double rcbrt(double x); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate reciprocal cube root function. + * + * Calculate reciprocal cube root function of \p x. + * + * \return + * - rcbrt( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - rcbrt( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float rcbrtf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - sinpi( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinpi( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double sinpi(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - sinpif( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinpif( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinpif(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the cosine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the cosine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - cospi( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cospi( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double cospi(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cosine of the input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the cosine of \p x + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * + * \endxmlonly + * (measured in radians), + * where \p x is the input argument. + * + * \return + * - cospif( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cospif( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cospif(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the sine and cosine of the first input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine and cosine of the first input argument, \p x (measured in radians), + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinpi() and ::cospi(). + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincospi(double x, double *sptr, double *cptr); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine and cosine of the first input argument + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. + * + * Calculate the sine and cosine of the first input argument, \p x (measured in radians), + * \latexonly $\times \pi$ \endlatexonly + * \xmlonly + * + * + * × + * π + * + * \endxmlonly. The results for sine and cosine are written into the + * second argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \see ::sinpif() and ::cospif(). + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ void sincospif(float x, float *sptr, float *cptr); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of first argument to the power of second argument. + * + * Calculate the value of \p x to the power of \p y. + * + * \return + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * for \p y an odd integer less than 0. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * +* + + * + * + * + * \endxmlonly + * for \p y less than 0 and not an odd integer. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - pow( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns +0 for \p y > 0 and not an odd integer. + * - pow(-1, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns 1. + * - pow(+1, \p y) returns 1 for any \p y, even a NaN. + * - pow(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1 for any \p x, even a NaN. + * - pow(\p x, \p y) returns a NaN for finite \p x < 0 and finite non-integer \p y. + * - pow(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - pow(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns -0 for \p y an odd integer less than 0. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0 and not an odd integer. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - pow( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0 and not an odd integer. + * - pow( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0. + * - pow( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl pow(double x, double y) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Break down the input argument into fractional and integral parts. + * + * Break down the argument \p x into fractional and integral parts. The + * integral part is stored in the argument \p iptr. + * Fractional and integral parts are given the same sign as the argument \p x. + * + * \return + * - modf( + * \latexonly $\pm x$ \endlatexonly + * \xmlonly + * + * + * ± + * x + * + * + * \endxmlonly + * , \p iptr) returns a result with the same sign as \p x. + * - modf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p iptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * in the object pointed to by \p iptr. + * - modf(NaN, \p iptr) stores a NaN in the object pointed to by \p iptr and returns a NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl modf(double x, double *iptr) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the double-precision floating-point remainder of \p x / \p y. + * + * Calculate the double-precision floating-point remainder of \p x / \p y. + * The floating-point remainder of the division operation \p x / \p y calculated + * by this function is exactly the value x - n*y, where \p n is \p x / \p y with its fractional part truncated. + * The computed value will have the same sign as \p x, and its magnitude will be less than the magnitude of \p y. + * + * \return + * - Returns the floating-point remainder of \p x / \p y. + * - fmod( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * if \p y is not zero. + * - fmod(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x if \p x is finite. + * - fmod(\p x, \p y) returns NaN if \p x is + * \latexonly $\pm\infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * or \p y is zero. + * - If either argument is NaN, NaN is returned. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double __cdecl fmod(double x, double y) __THROW; +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute double-precision floating-point remainder. + * + * Compute double-precision floating-point remainder \p r of dividing + * \p x by \p y for nonzero \p y. Thus + * \latexonly $ r = x - n y$ \endlatexonly + * \xmlonly + * + * + * r + * = + * x + * + * n + * y + * + * \endxmlonly. + * The value \p n is the integer value nearest + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * In the case when + * \latexonly $ | n -\frac{x}{y} | = \frac{1}{2} $ \endlatexonly + * \xmlonly + * + * + * + * | + * + * n + * + * + * x + * y + * + * + * | + * + * = + * + * 1 + * 2 + * + * + * + * \endxmlonly + * , the + * even \p n value is chosen. + * + * \return + * - remainder(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns NaN. + * - remainder( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y) returns NaN. + * - remainder(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x for finite \p x. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double remainder(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl remainder(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute single-precision floating-point remainder. + * + * Compute single-precision floating-point remainder \p r of dividing + * \p x by \p y for nonzero \p y. Thus + * \latexonly $ r = x - n y$ \endlatexonly + * \xmlonly + * + * + * r + * = + * x + * + * n + * y + * + * \endxmlonly. + * The value \p n is the integer value nearest + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * In the case when + * \latexonly $ | n -\frac{x}{y} | = \frac{1}{2} $ \endlatexonly + * \xmlonly + * + * + * + * | + * + * n + * + * + * x + * y + * + * + * | + * + * = + * + * 1 + * 2 + * + * + * + * \endxmlonly + * , the + * even \p n value is chosen. + * + * \return + * - remainderf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns NaN. + * - remainderf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y) returns NaN. + * - remainderf(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x for finite \p x. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float remainderf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl remainderf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute double-precision floating-point remainder and part of quotient. + * + * Compute a double-precision floating-point remainder in the same way as the + * ::remainder() function. Argument \p quo returns part of quotient upon + * division of \p x by \p y. Value \p quo has the same sign as + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * + * \endxmlonly + * and may not be the exact quotient but agrees with the exact quotient + * in the low order 3 bits. + * + * \return + * Returns the remainder. + * - remquo(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquo( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquo(\p x, \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points if either of \p x or \p y is NaN. + * - remquo(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p quo) returns \p x and stores zero + * in the location to which \p quo points for finite \p x. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double remquo(double x, double y, int *quo) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl remquo(double x, double y, int *quo); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute single-precision floating-point remainder and part of quotient. + * + * Compute a single-precision floating-point remainder in the same way as the + * ::remainderf() function. Argument \p quo returns part of quotient upon + * division of \p x by \p y. Value \p quo has the same sign as + * \latexonly $ \frac{x}{y} $ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * + * \endxmlonly + * and may not be the exact quotient but agrees with the exact quotient + * in the low order 3 bits. + * + * \return + * Returns the remainder. + * - remquof(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquof( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points. + * - remquof(\p x, \p y, \p quo) returns NaN + * and stores an unspecified value in the + * location to which \p quo points if either of \p x or \p y is NaN. + * - remquof(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p quo) returns \p x and stores zero + * in the location to which \p quo points for finite \p x. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float remquof(float x, float y, int *quo) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl remquof(float x, float y, int *quo); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 0 for + * the input argument \p x, + * \latexonly $J_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 0. + * - j0( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns +0. + * - j0(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl j0(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 0 for + * the input argument \p x, + * \latexonly $J_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 0. + * - j0f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns +0. + * - j0f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float j0f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 1 for + * the input argument \p x, + * \latexonly $J_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 1. + * - j1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl j1(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order 1 for + * the input argument \p x, + * \latexonly $J_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order 1. + * - j1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - j1f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float j1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the first kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order \p n for + * the input argument \p x, + * \latexonly $J_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order \p n. + * - jn(\p n, NaN) returns NaN. + * - jn(\p n, \p x) returns NaN for \p n < 0. + * - jn(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl jn(int n, double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the first kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the first kind of order \p n for + * the input argument \p x, + * \latexonly $J_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * J + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the first kind of order \p n. + * - jnf(\p n, NaN) returns NaN. + * - jnf(\p n, \p x) returns NaN for \p n < 0. + * - jnf(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float jnf(int n, float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 0 for + * the input argument \p x, + * \latexonly $Y_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 0. + * - y0( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y0(\p x) returns NaN for \p x < 0. + * - y0( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y0(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl y0(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order 0 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 0 for + * the input argument \p x, + * \latexonly $Y_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 0. + * - y0f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y0f(\p x) returns NaN for \p x < 0. + * - y0f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y0f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float y0f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 1 for + * the input argument \p x, + * \latexonly $Y_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 1. + * - y1( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y1(\p x) returns NaN for \p x < 0. + * - y1( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y1(NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl y1(double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order 1 for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order 1 for + * the input argument \p x, + * \latexonly $Y_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order 1. + * - y1f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - y1f(\p x) returns NaN for \p x < 0. + * - y1f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - y1f(NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float y1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the Bessel function of the second kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order \p n for + * the input argument \p x, + * \latexonly $Y_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order \p n. + * - yn(\p n, \p x) returns NaN for \p n < 0. + * - yn(\p n, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + *) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - yn(\p n, \p x) returns NaN for \p x < 0. + * - yn(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - yn(\p n, NaN) returns NaN. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl yn(int n, double x) __THROW; +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the Bessel function of the second kind of order n for the input argument. + * + * Calculate the value of the Bessel function of the second kind of order \p n for + * the input argument \p x, + * \latexonly $Y_n(x)$ \endlatexonly + * \xmlonly + * + * + * + * Y + * n + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the Bessel function of the second kind of order \p n. + * - ynf(\p n, \p x) returns NaN for \p n < 0. + * - ynf(\p n, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - ynf(\p n, \p x) returns NaN for \p x < 0. + * - ynf(\p n, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * - ynf(\p n, NaN) returns NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ynf(int n, float x) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 0 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 0 for + * the input argument \p x, + * \latexonly $I_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 0. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cyl_bessel_i0(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 0 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 0 for + * the input argument \p x, + * \latexonly $I_0(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 0 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 0. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float cyl_bessel_i0f(float x) __THROW; + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 1 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 1 for + * the input argument \p x, + * \latexonly $I_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 1. + * + * \note_accuracy_double + */ +extern __device__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl cyl_bessel_i1(double x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of the regular modified cylindrical Bessel function of order 1 for the input argument. + * + * Calculate the value of the regular modified cylindrical Bessel function of order 1 for + * the input argument \p x, + * \latexonly $I_1(x)$ \endlatexonly + * \xmlonly + * + * + * + * I + * 1 + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * Returns the value of the regular modified cylindrical Bessel function of order 1. + * + * \note_accuracy_single + */ +extern __device__ __device_builtin__ float cyl_bessel_i1f(float x) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the error function of the input argument. + * + * Calculate the value of the error function for the input argument \p x, + * \latexonly $\frac{2}{\sqrt \pi} \int_0^x e^{-t^2} dt$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * π + * + * + * + * + * 0 + * x + * + * + * e + * + * + * + * t + * 2 + * + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - erf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - erf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erf(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl erf(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the error function of the input argument. + * + * Calculate the value of the error function for the input argument \p x, + * \latexonly $\frac{2}{\sqrt \pi} \int_0^x e^{-t^2} dt$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * π + * + * + * + * + * 0 + * x + * + * + * e + * + * + * + * t + * 2 + * + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - erff( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - erff( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erff(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl erff(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse error function of the input argument. + * + * Calculate the inverse error function + * \latexonly $\erf^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erf + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [-1, 1]. + * + * \return + * - erfinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - erfinv(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfinv(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfinv(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse error function of the input argument. + * + * Calculate the inverse error function + * \latexonly $\erf^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erf + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [-1, 1]. + * + * \return + * - erfinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - erfinvf(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfinvf(-1) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfinvf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfinvf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the complementary error function of the input argument. + * + * Calculate the complementary error function of the input argument \p x, + * 1 - erf(\p x). + * + * \return + * - erfc( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns 2. + * - erfc( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfc(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl erfc(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the complementary error function of the input argument. + * + * Calculate the complementary error function of the input argument \p x, + * 1 - erf(\p x). + * + * \return + * - erfcf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns 2. + * - erfcf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl erfcf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the natural logarithm of the absolute value of the gamma function of the input argument. + * + * Calculate the natural logarithm of the absolute value of the gamma function of the input argument \p x, namely the value of + * \latexonly $\log_{e}\left|\int_{0}^{\infty} e^{-t}t^{x-1}dt\right|$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * + * + * + * \endxmlonly + * + * \return + * - lgamma(1) returns +0. + * - lgamma(2) returns +0. + * - lgamma(\p x) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly 0 and \p x is an integer. + * - lgamma( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - lgamma( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double lgamma(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl lgamma(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse complementary error function of the input argument. + * + * Calculate the inverse complementary error function + * \latexonly $\erfc^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erfc + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [0, 2]. + * + * \return + * - erfcinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfcinv(2) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfcinv(\p x) returns NaN for \p x outside [0, 2]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfcinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse complementary error function of the input argument. + * + * Calculate the inverse complementary error function + * \latexonly $\erfc^{-1}$ \endlatexonly + * \xmlonly + * + * + * + * erfc + * + * - + * 1 + * + * + * + * + * \endxmlonly + * (\p x), of the input argument \p x in the interval [0, 2]. + * + * \return + * - erfcinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - erfcinvf(2) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - erfcinvf(\p x) returns NaN for \p x outside [0, 2]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcinvf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the inverse of the standard normal cumulative distribution function. + * + * Calculate the inverse of the standard normal cumulative distribution function for input argument \p x, + * \latexonly $\Phi^{-1}(x)$ \endlatexonly + * \xmlonly + * + * + * + * Φ + * + * + * 1 + * + * + * ( + * x + * ) + * + * \endxmlonly. The function is defined for input values in the interval + * \latexonly $(0, 1)$ \endlatexonly + * \xmlonly + * + * + * ( + * 0 + * , + * 1 + * ) + * + * \endxmlonly. + * + * \return + * - normcdfinv( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - normcdfinv(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - normcdfinv(\p x) returns NaN + * if \p x is not in the interval [0,1]. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double normcdfinv(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the inverse of the standard normal cumulative distribution function. + * + * Calculate the inverse of the standard normal cumulative distribution function for input argument \p x, + * \latexonly $\Phi^{-1}(x)$ \endlatexonly + * \xmlonly + * + * + * + * Φ + * + * + * 1 + * + * + * ( + * x + * ) + * + * \endxmlonly. The function is defined for input values in the interval + * \latexonly $(0, 1)$ \endlatexonly + * \xmlonly + * + * + * ( + * 0 + * , + * 1 + * ) + * + * \endxmlonly. + * + * \return + * - normcdfinvf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - normcdfinvf(1) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - normcdfinvf(\p x) returns NaN + * if \p x is not in the interval [0,1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float normcdfinvf(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the standard normal cumulative distribution function. + * + * Calculate the cumulative distribution function of the standard normal distribution for input argument \p x, + * \latexonly $\Phi(x)$ \endlatexonly + * \xmlonly + * + * + * Φ + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - normcdf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns 1. + * - normcdf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double normcdf(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the standard normal cumulative distribution function. + * + * Calculate the cumulative distribution function of the standard normal distribution for input argument \p x, + * \latexonly $\Phi(x)$ \endlatexonly + * \xmlonly + * + * + * Φ + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - normcdff( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns 1. + * - normcdff( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 + + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float normcdff(float x); +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the scaled complementary error function of the input argument. + * + * Calculate the scaled complementary error function of the input argument \p x, + * \latexonly $e^{x^2}\cdot \textrm{erfc}(x)$ \endlatexonly + * \xmlonly + * + * + * + * e + * + * + * x + * 2 + * + * + * + * + * + * erfc + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - erfcx( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * - erfcx( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_double + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double erfcx(double x); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the scaled complementary error function of the input argument. + * + * Calculate the scaled complementary error function of the input argument \p x, + * \latexonly $e^{x^2}\cdot \textrm{erfc}(x)$ \endlatexonly + * \xmlonly + * + * + * + * e + * + * + * x + * 2 + * + * + * + * + * + * erfc + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \return + * - erfcxf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly. + * - erfcxf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float erfcxf(float x); + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the natural logarithm of the absolute value of the gamma function of the input argument. + * + * Calculate the natural logarithm of the absolute value of the gamma function of the input argument \p x, namely the value of + * \latexonly $\log_{e}\left|\int_{0}^{\infty} e^{-t}t^{x-1}dt\right|$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * e + * + * + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * + * + * + * \endxmlonly + * + * \return + * - lgammaf(1) returns +0. + * - lgammaf(2) returns +0. + * - lgammaf(\p x) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * if \p x + * \latexonly $\leq$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly + * 0 and \p x is an integer. + * - lgammaf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - lgammaf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float lgammaf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl lgammaf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the gamma function of the input argument. + * + * Calculate the gamma function of the input argument \p x, namely the value of + * \latexonly $\int_{0}^{\infty} e^{-t}t^{x-1}dt$ \endlatexonly + * \xmlonly + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - tgamma( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - tgamma(2) returns +1. + * - tgamma(\p x) returns NaN if \p x < 0 and \p x is an integer. + * - tgamma( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN. + * - tgamma( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double tgamma(double x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl tgamma(double x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the gamma function of the input argument. + * + * Calculate the gamma function of the input argument \p x, namely the value of + * \latexonly $\int_{0}^{\infty} e^{-t}t^{x-1}dt$ \endlatexonly + * \xmlonly + * + * + * + * + * + * 0 + * + * + * + * + * + * + * e + * + * + * t + * + * + * + * t + * + * x + * + * 1 + * + * + * d + * t + * + * \endxmlonly. + * + * \return + * - tgammaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - tgammaf(2) returns +1. + * - tgammaf(\p x) returns NaN if \p x < 0 and \p x is an integer. + * - tgammaf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN. + * - tgammaf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tgammaf(float x) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl tgammaf(float x); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** \ingroup CUDA_MATH_DOUBLE + * \brief Create value with given magnitude, copying sign of second value. + * + * Create a floating-point value with the magnitude \p x and the sign of \p y. + * + * \return + * Returns a value with the magnitude of \p x and the sign of \p y. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double copysign(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl copysign(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** \ingroup CUDA_MATH_SINGLE + * \brief Create value with given magnitude, copying sign of second value. + * + * Create a floating-point value with the magnitude \p x and the sign of \p y. + * + * \return + * Returns a value with the magnitude of \p x and the sign of \p y. + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float copysignf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl copysignf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Return next representable double-precision floating-point value after argument \p x in the direction of \p y. + * + * Calculate the next representable double-precision floating-point value + * following \p x in the direction of \p y. For example, if \p y is greater than \p x, ::nextafter() + * returns the smallest representable number greater than \p x + * + * \return + * - nextafter(\p x, \p y) = \p y if \p x equals \p y. + * - nextafter(\p x, \p y) = \p NaN if either \p x or \p y are \p NaN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nextafter(double x, double y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nextafter(double x, double y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Return next representable single-precision floating-point value after argument \p x in the direction of \p y. + * + * Calculate the next representable single-precision floating-point value + * following \p x in the direction of \p y. For example, if \p y is greater than \p x, ::nextafterf() + * returns the smallest representable number greater than \p x + * + * \return + * - nextafterf(\p x, \p y) = \p y if \p x equals \p y. + * - nextafterf(\p x, \p y) = \p NaN if either \p x or \p y are \p NaN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nextafterf(float x, float y) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nextafterf(float x, float y); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Returns "Not a Number" value. + * + * Return a representation of a quiet NaN. Argument \p tagp selects one of the possible representations. + * + * \return + * - nan(\p tagp) returns NaN. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double nan(const char *tagp) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl nan(const char *tagp); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Returns "Not a Number" value + * + * Return a representation of a quiet NaN. Argument \p tagp selects one of the possible representations. + * + * \return + * - nanf(\p tagp) returns NaN. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float nanf(const char *tagp) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl nanf(const char *tagp); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* namespace std */ +#endif +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinff(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnanf(float) __THROW; + + +#if defined(__APPLE__) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinited(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinitef(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitd(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnand(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinfd(double) __THROW; +#else /* __APPLE__ */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finite(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finitef(float) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbit(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnan(double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinf(double) __THROW; +#endif /* __APPLE__ */ + +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitf(float) __THROW; + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * + * Compute the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation. After computing the value + * to infinite precision, the value is rounded once. + * + * \return + * Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fma( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fma( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fma(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fma(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_double + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fma(double x, double y, double z) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP double __cdecl fma(double x, double y, double z); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * + * Compute the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation. After computing the value + * to infinite precision, the value is rounded once. + * + * \return + * Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmaf(float x, float y, float z) __THROW; +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ __CUDA_MATH_CRTIMP float __cdecl fmaf(float x, float y, float z); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + + +/* these are here to avoid warnings on the call graph. + long double is not supported on the device */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __signbitl(long double) __THROW; +#if defined(__APPLE__) +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isfinite(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinf(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnan(long double) __THROW; +#else /* __APPLE__ */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __finitel(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isinfl(long double) __THROW; +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __isnanl(long double) __THROW; +#endif /* __APPLE__ */ + +#if defined(_WIN32) && defined(_M_AMD64) +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl acosf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl asinf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl atanf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl atan2f(float, float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl cosf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sinf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl tanf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl coshf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sinhf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl tanhf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl expf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl logf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl log10f(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl modff(float, float*) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl powf(float, float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl sqrtf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl ceilf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl floorf(float) __THROW; +extern __CUDA_MATH_CRTIMP __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __cdecl fmodf(float, float) __THROW; +#else /* _WIN32 && _M_AMD64 */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc cosine of the input argument. + * + * Calculate the principal value of the arc cosine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [0, + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - acosf(1) returns +0. + * - acosf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float acosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc sine of the input argument. + * + * Calculate the principal value of the arc sine of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * , + + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * ] for \p x inside [-1, +1]. + * - asinf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - asinf(\p x) returns NaN for \p x outside [-1, +1]. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float asinf(float x) __THROW; + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc tangent of the input argument. + * + * Calculate the principal value of the arc tangent of the input argument \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * , + + * \latexonly $\pi/2$ \endlatexonly + * \xmlonly + * + * + * π + * + * / + * + * 2 + * + * + * \endxmlonly + * ]. + * - atanf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atanf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the arc tangent of the ratio of first and second input arguments. + * + * Calculate the principal value of the arc tangent of the ratio of first + * and second input arguments \p y / \p x. The quadrant of the result is + * determined by the signs of inputs \p y and \p x. + * + * \return + * Result will be in radians, in the interval [- + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * , + + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * ]. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , -0) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , +0) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for \p x < 0. + * - atan2f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p x > 0. + * - atan2f(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\pi$ \endlatexonly + * \xmlonly + * + * + * - + * π + * + * + * \endxmlonly + * /2 for \p y < 0. + * - atan2f(\p y, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pi$ \endlatexonly + * \xmlonly + * + * + * π + * + * + * \endxmlonly + * /2 for \p y > 0. + * - atan2f( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2f( + * \latexonly $\pm y$ \endlatexonly + * \xmlonly + * + * + * ± + * y + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for finite \p y > 0. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p x) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /2 for finite \p x. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 3\pi$ \endlatexonly + * \xmlonly + * + * + * ± + * 3 + * π + * + * + * \endxmlonly + * /4. + * - atan2f( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \pi$ \endlatexonly + * \xmlonly + * + * + * ± + * π + * + * + * \endxmlonly + * /4. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float atan2f(float y, float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the cosine of the input argument. + * + * Calculate the cosine of the input argument \p x (measured in radians). + * + * \return + * - cosf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - cosf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float cosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the sine of the input argument. + * + * Calculate the sine of the input argument \p x (measured in radians). + * + * \return + * - sinf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the tangent of the input argument. + * + * Calculate the tangent of the input argument \p x (measured in radians). + * + * \return + * - tanf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns NaN. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic cosine of the input argument. + * + * Calculate the hyperbolic cosine of the input argument \p x. + * + * \return + * - coshf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - coshf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float coshf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic sine of the input argument. + * + * Calculate the hyperbolic sine of the input argument \p x. + * + * \return + * - sinhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sinhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sinhf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the hyperbolic tangent of the input argument. + * + * Calculate the hyperbolic tangent of the input argument \p x. + * + * \return + * - tanhf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - tanhf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 1$ \endlatexonly + * \xmlonly + * + * + * ± + * 1 + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float tanhf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the natural logarithm of the input argument. + * + * Calculate the natural logarithm of the input argument \p x. + * + * \return + * - logf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - logf(1) returns +0. + * - logf(\p x) returns NaN for \p x < 0. + * - logf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float logf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly, + * the base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x. + * + * \return + * - expf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1. + * - expf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * - + * + * + * \endxmlonly + * ) returns +0. + * - expf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float expf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the base 10 logarithm of the input argument. + * + * Calculate the base 10 logarithm of the input argument \p x. + * + * \return + * - log10f( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * - log10f(1) returns +0. + * - log10f(\p x) returns NaN for \p x < 0. + * - log10f( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float log10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Break down the input argument into fractional and integral parts. + * + * Break down the argument \p x into fractional and integral parts. The integral part is stored in the argument \p iptr. + * Fractional and integral parts are given the same sign as the argument \p x. + * + * \return + * - modff( + * \latexonly $\pm x$ \endlatexonly + * \xmlonly + * + * + * ± + * x + * + * + * \endxmlonly + * , \p iptr) returns a result with the same sign as \p x. + * - modff( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p iptr) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * and stores + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * in the object pointed to by \p iptr. + * - modff(NaN, \p iptr) stores a NaN in the object pointed to by \p iptr and returns a NaN. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float modff(float x, float *iptr) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the value of first argument to the power of second argument. + * + * Calculate the value of \p x to the power of \p y. + * + * \return + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * for \p y an odd integer less than 0. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y less than 0 and not an odd integer. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - powf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns +0 for \p y > 0 and not an odd integer. + * - powf(-1, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns 1. + * - powf(+1, \p y) returns 1 for any \p y, even a NaN. + * - powf(\p x, + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns 1 for any \p x, even a NaN. + * - powf(\p x, \p y) returns a NaN for finite \p x < 0 and finite non-integer \p y. + * - powf(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns +0 for + * \latexonly $| x | < 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * < + * 1 + * + * \endxmlonly. + * - powf(\p x, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for + * \latexonly $| x | > 1$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * | + * + * > + * 1 + * + * \endxmlonly. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns -0 for \p y an odd integer less than 0. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0 and not an odd integer. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * for \p y an odd integer greater than 0. + * - powf( + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0 and not an odd integer. + * - powf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns +0 for \p y < 0. + * - powf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * for \p y > 0. + * + * \note_accuracy_single + * \note_fastmath + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float powf(float x, float y) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the square root of the input argument. + * + * Calculate the nonnegative square root of \p x, + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \return + * Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * - sqrtf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - sqrtf( + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - sqrtf(\p x) returns NaN if \p x is less than 0. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float sqrtf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate ceiling of the input argument. + * + * Compute the smallest integer value not less than \p x. + * + * \return + * Returns + * \latexonly $\lceil x \rceil$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - ceilf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * - ceilf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float ceilf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the largest integer less than or equal to \p x. + * + * Calculate the largest integer value which is less than or equal to \p x. + * + * \return + * Returns + * \latexonly $\lfloor x \rfloor$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * expressed as a floating-point number. + * - floorf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * \endxmlonly. + * - floorf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * ) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * \endxmlonly. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float floorf(float x) __THROW; +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the floating-point remainder of \p x / \p y. + * + * Calculate the floating-point remainder of \p x / \p y. + * The floating-point remainder of the division operation \p x / \p y calculated + * by this function is exactly the value x - n*y, where \p n is \p x / \p y with its fractional part truncated. + * The computed value will have the same sign as \p x, and its magnitude will be less than the magnitude of \p y. + * \return + * - Returns the floating-point remainder of \p x / \p y. + * - fmodf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p y) returns + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * if \p y is not zero. + * - fmodf(\p x, + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * ) returns \p x if \p x is finite. + * - fmodf(\p x, \p y) returns NaN if \p x is + * \latexonly $\pm\infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * or \p y is zero. + * - If either argument is NaN, NaN is returned. + * + * \note_accuracy_single + */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fmodf(float x, float y) __THROW; +#if defined(__QNX__) +/* redeclare some builtins that QNX uses */ +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FLog(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FCosh(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FSinh(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ float _FSinx(float, unsigned int, int); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int _FDsign(float); +extern __DEVICE_FUNCTIONS_DECL__ __device_builtin__ int _Dsign(double); +#endif +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif +#endif /* _WIN32 && _M_AMD64 */ + +} + +#if !defined(__CUDACC_RTC__) +#include +#include + +#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__ +#include +#include +#endif /* __CUDA_INTERNAL_SKIP_CPP_HEADERS__ */ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); + +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); + +#elif defined(__GNUC__) + +#undef signbit +#undef isfinite +#undef isnan +#undef isinf + +#if defined(__APPLE__) + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(double x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(double x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); +#else /* !(!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000) */ +template +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool __libcpp_isnan(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(float x) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(long double x) _NOEXCEPT; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 */ + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); +#else /* !(!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000) */ +template +__cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ bool __libcpp_isinf(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(float x) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(long double x) _NOEXCEPT; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 7000 */ + +#else /* __APPLE__ */ + +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +#if !defined(_NVHPC_CUDA) +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool signbit(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isfinite(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(float x); +/* GCC 6.1 uses ::isnan(double x) for isnan(double x) if the condition is true */ +#if _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#else /* !(_GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC) */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(double x); +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isnan(long double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(float x); +/* GCC 6.1 uses ::isinf(double x) for isinf(double x) if the condition is true. */ +#if _GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#else /* !(_GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC) */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(double x); +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISINF && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ constexpr bool isinf(long double x); +} +#endif + +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ + +#if defined(__QNX__) +#if (__QNX__) && !defined(_LIBCPP_VERSION) +/* QNX defines functions in std, need to declare them here */ +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(float x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(double x); +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(long double x); +} +#else +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const float x); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const double x); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool signbit(const long double x); +#endif +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(const long double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(const long double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const float a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const double a); +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(const long double a); +#else /* ! __QNX__ */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const float x); +#if defined(__ICC) +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const double x) throw(); +#else /* !__ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const double x); +#endif /* __ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int signbit(const long double x); + +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const float x); +#if defined(__ICC) +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const double x) throw(); +#else /* !__ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const double x); +#endif /* __ICC */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isfinite(const long double x); + +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +template +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool __libcpp_isnan(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(float x) _NOEXCEPT; +#else /* !((defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(float x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ +#if defined(__ANDROID__) || defined(__HORIZON__) +#if !defined(_LIBCPP_VERSION) +__forceinline__ +#endif /* !defined(_LIBCPP_VERSION) */ +#if _LIBCPP_VERSION >= 7000 +#ifdef _LIBCPP_PREFERRED_OVERLOAD +_LIBCPP_INLINE_VISIBILITY _LIBCPP_PREFERRED_OVERLOAD __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(double x) _NOEXCEPT; +#endif /* _LIBCPP_PREFERRED_OVERLOAD */ +#else /* _LIBCPP_VERSION < 7000 */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x); +#endif /* _LIBCPP_VERSION >= 7000 */ +#else /* !(__ANDROID__ || __HORIZON__) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(double x) throw(); +#endif /* __ANDROID__ */ +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isnan(long double x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isnan(long double x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ + +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +static __inline__ __cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ unsigned __FLOAT_BITS(float __f); +static __inline__ __cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ unsigned long long __DOUBLE_BITS(double __f); +template +__cudart_builtin__ __DEVICE_FUNCTIONS_DECL__ bool __libcpp_isinf(T) _NOEXCEPT; +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(float x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(float x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ + +#if defined(__ANDROID__) || defined(__HORIZON__) +#if !defined(_LIBCPP_VERSION) +__forceinline__ +#endif /* !defined(_LIBCPP_VERSION) */ +#if _LIBCPP_VERSION >= 7000 +#ifdef _LIBCPP_PREFERRED_OVERLOAD +_LIBCPP_INLINE_VISIBILITY _LIBCPP_PREFERRED_OVERLOAD __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(double x) _NOEXCEPT; +#endif /* _LIBCPP_PREFERRED_OVERLOAD */ +#else /* _LIBCPP_VERSION < 7000 */ +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x); +#endif /* _LIBCPP_VERSION >= 7000 */ +#else /* ! (__ANDROID__ || __HORIZON__) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(double x) throw(); +#endif /* __ANDROID__ || __HORIZON__ */ +#if (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 +inline _LIBCPP_INLINE_VISIBILITY __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool isinf(long double x) _NOEXCEPT; +#else /* !( (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000) */ +__forceinline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ int isinf(long double x); +#endif /* (defined(__ANDROID__) || defined(__HORIZON__)) && _LIBCPP_VERSION >= 8000 */ +#endif /* __QNX__ */ + +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* __APPLE__ */ + +#if !defined(_LIBCPP_VERSION) +#if defined(__clang__) +#if __has_include() +#define __NV_GLIBCXX_VERSION 40800 +#endif /* __has_include() */ +#endif /* __clang__ */ + +#if !defined(__NV_GLIBCXX_VERSION) +#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#endif /* !__NV_GLIBCXX_VERSION */ +#endif /* !defined(_LIBCPP_VERSION) */ + +#if !defined(__HORIZON__) || !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 +#if defined(__arm__) && !defined(_STLPORT_VERSION) && !_GLIBCXX_USE_C99 +#if !defined(__ANDROID__) || (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800) + +#if defined(__QNX__) +/* QNX defines functions in std, need to declare them here */ +namespace std { +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs (long long int a); +} +#elif defined(__HORIZON__) +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +#pragma GCC system_header +#endif +_LIBCPP_BEGIN_NAMESPACE_STD +__DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs (long long int a) throw(); +_LIBCPP_END_NAMESPACE_STD +#else +static __inline__ __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int a); +#endif /* __QNX__ || __HORIZON__*/ + +#endif /* !__ANDROID__ || (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800) */ +#endif /* __arm__ && !_STLPORT_VERSION && !_GLIBCXX_USE_C99 */ +#endif /* !defined(__HORIZON__) || !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 */ + +#if defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800 && !defined(__ibmxl__) + +#if !defined(_STLPORT_VERSION) +namespace __gnu_cxx +{ +#endif /* !_STLPORT_VERSION */ + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int a); + +#if !defined(_STLPORT_VERSION) +} +#endif /* !_STLPORT_VERSION */ + +#endif /* defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION < 40800 && !__ibmxl__ */ + +namespace std +{ + template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T __pow_helper(T, int); + template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T __cmath_power(T, unsigned int); +} + +using std::abs; +using std::fabs; +using std::ceil; +using std::floor; +using std::sqrt; +#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 +using std::pow; +#endif /* !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 3800 */ +using std::log; +using std::log10; +using std::fmod; +using std::modf; +using std::exp; +using std::frexp; +using std::ldexp; +using std::asin; +using std::sin; +using std::sinh; +using std::acos; +using std::cos; +using std::cosh; +using std::atan; +using std::atan2; +using std::tan; +using std::tanh; + +#elif defined(_WIN32) + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __CUDA_MATH_CRTIMP double __cdecl _hypot(double x, double y); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __CUDA_MATH_CRTIMP float __cdecl _hypotf(float x, float y); + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int signbit(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ bool signbit(long double) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _ldsign(long double); +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is negative. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE signbit(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is negative. + */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __RETURN_TYPE signbit(double) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _dsign(double); +#undef __RETURN_TYPE +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero value + * if and only if \p a is negative. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE signbit(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +#if _MSC_VER >= 1900 +#define __SIGNBIT_THROW throw() +#else +#define __SIGNBIT_THROW +#endif +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Return the sign bit of the input. + * + * Determine whether the floating-point value \p a is negative. + * + * \return + * Reports the sign bit of all values including infinities, zeros, and NaNs. + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is negative. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero value + * if and only if \p a is negative. + */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __RETURN_TYPE signbit(float) __SIGNBIT_THROW; +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ __device_builtin__ __CUDA_MATH_CRTIMP int _fdsign(float); +#undef __RETURN_TYPE +#undef __SIGNBIT_THROW +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isinf(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isinf(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * \return + * - With Visual Studio 2013 host compiler: Returns true if and only + * if \p a is an infinite value. + * - With other host compilers: Returns a nonzero value if and only + * if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * \return + * - With Visual Studio 2013 host compiler: Returns true if and only + * if \p a is an infinite value. + * - With other host compilers: Returns a nonzero value if and only + * if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is an infinite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero + * value if and only if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * + * \brief Determine whether argument is infinite. + * + * Determine whether the floating-point value \p a is an infinite value + * (positive or negative). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is an infinite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a nonzero + * value if and only if \p a is an infinite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isinf(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isnan(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isnan(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * + * + * \brief Determine whether argument is a NaN. + * + * Determine whether the floating-point value \p a is a NaN. + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. + * Returns true if and only if \p a is a NaN value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns a + * nonzero value if and only if \p a is a NaN value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isnan(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __DEVICE_FUNCTIONS_DECL__ int isfinite(long double a); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ bool isfinite(long double a); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(double a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_DOUBLE + * + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(double a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#define __RETURN_TYPE int +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(float a); +#undef __RETURN_TYPE +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +#define __RETURN_TYPE bool +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Determine whether argument is finite. + * + * Determine whether the floating-point value \p a is a finite value + * (zero, subnormal, or normal and not infinity or NaN). + * + * \return + * - With Visual Studio 2013 host compiler: __RETURN_TYPE is 'bool'. Returns + * true if and only if \p a is a finite value. + * - With other host compilers: __RETURN_TYPE is 'int'. Returns + * a nonzero value if and only if \p a is a finite value. + */ +static __inline__ __DEVICE_FUNCTIONS_DECL__ __RETURN_TYPE isfinite(float a); +#undef __RETURN_TYPE +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T _Pow_int(T, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int); +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +template extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ T _Pow_int(T, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int) throw(); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* __CUDACC_RTC__ */ + +#if __cplusplus >= 201103L +#define __NV_NOEXCEPT noexcept +#else /* !__cplusplus >= 201103L */ +#define __NV_NOEXCEPT throw() +#endif /* __cplusplus >= 201103L */ + +#if defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++11-extensions" +#endif /* __clang__ */ +#if _LIBCPP_VERSION < 3800 +_LIBCPP_BEGIN_NAMESPACE_STD +#endif /* _LIBCPP_VERSION < 3800 */ +#elif defined(__GNUC__) && !defined(_STLPORT_VERSION) +namespace std { +#endif /* defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) || + __GNUC__ && !_STLPORT_VERSION */ + +#if defined(__CUDACC_RTC__) || defined(__GNUC__) + +#if defined(__CUDACC_RTC__) || \ + (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION >= 40800) || \ + defined(__ibmxl__) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int abs(long long int); +#endif /* __CUDACC__RTC__ || + (defined(__NV_GLIBCXX_VERSION) && __NV_GLIBCXX_VERSION >= 40800) || + __ibmxl__ */ + +#endif /* __CUDACC_RTC__ || __GNUC__ */ + +#if defined(__CUDACC_RTC__) || \ + (!defined(_MSC_VER) || _MSC_VER < 1800) && \ + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long int __cdecl abs(long int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl abs(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl abs(double); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fabs(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ceil(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl floor(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sqrt(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, float); + +#if !defined(__QNX__) + +#if defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +typename __gnu_cxx::__promote_2<_Tp, _Up>::__type pow(_Tp, _Up); +#else /* !(defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION)) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl pow(double, int); +#endif /* defined(__GNUC__) && __cplusplus >= 201103L && !defined(_LIBCPP_VERSION) */ + +#endif /* !defined(__QNX__) */ + +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log10(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fmod(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl modf(float, float*); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl exp(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl frexp(float, int*); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ldexp(float, int); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl asin(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sin(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sinh(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl acos(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cos(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cosh(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan2(float, float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tan(float); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tanh(float); +#else /* __CUDACC_RTC__ || + (!defined(_MSC_VER) || _MSC_VER < 1800) && + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long int __cdecl abs(long int) throw(); +#if defined(_LIBCPP_VERSION) +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ long long int __cdecl abs(long long int) throw(); +#endif /* defined(_LIBCPP_VERSION) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl abs(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl abs(double) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fabs(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ceil(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl floor(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sqrt(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, float) throw(); +#if defined(_LIBCPP_VERSION) +#if (defined (__ANDROID__) || defined(__HORIZON__)) && (_LIBCPP_VERSION >= 9000) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +#if _LIBCPP_VERSION >= 14000 +typename std::__enable_if_t +#else /* _LIBCPP_VERSION < 14000 */ +typename std::_EnableIf +#endif /* _LIBCPP_VERSION >= 14000 */ +< + std::is_arithmetic<_A1>::value && + std::is_arithmetic<_A2>::value, + std::__promote<_A1, _A2> +>::type pow(_A1 __lcpp_x, _A2 __lcpp_y) __NV_NOEXCEPT; +#elif (defined(__APPLE__) && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800 || defined(__QNX__) +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +#if _LIBCPP_VERSION >= 13000 +typename std::enable_if < +#else /* _LIBCPP_VERSION < 13000 */ +typename std::__lazy_enable_if < +#endif /* _LIBCPP_VERSION >= 13000 */ + std::is_arithmetic<_Tp>::value && std::is_arithmetic<_Up>::value, + std::__promote<_Tp, _Up> +>::type pow(_Tp __x, _Up __y) __NV_NOEXCEPT; +#else /* !((__APPLE__ && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800) */ +template +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ +typename enable_if < + std::is_arithmetic<_Tp>::value && std::is_arithmetic<_Up>::value, + typename std::__promote<_Tp, _Up>::type +>::type pow(_Tp __x, _Up __y) __NV_NOEXCEPT; +#endif /* (__APPLE__ && __clang_major__ >= 7) || _LIBCPP_VERSION >= 3800 */ +#else /* !defined(_LIBCPP_VERSION) */ +#if !(defined(__GNUC__) && __cplusplus >= 201103L) +#if (defined(_MSC_VER) && (_MSC_VER >= 1928)) && !(defined __CUDA_INTERNAL_SKIP_CPP_HEADERS__) +template && ::std:: is_arithmetic_v<_Ty2>, int> > [[nodiscard]] __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ ::std:: _Common_float_type_t<_Ty1, _Ty2> __cdecl pow(_Ty1 _Left, _Ty2 _Right) noexcept; +#else +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl pow(float, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ double __cdecl pow(double, int) throw(); +#endif /* (defined(_MSC_VER) && (_MSC_VER >= 1928)) && !(defined __CUDA_INTERNAL_SKIP_CPP_HEADERS__) */ +#endif /* !(defined(__GNUC__) && __cplusplus >= 201103L) */ +#endif /* defined(_LIBCPP_VERSION) */ +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl log10(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl fmod(float, float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl modf(float, float*) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl exp(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl frexp(float, int*) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl ldexp(float, int) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl asin(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sin(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl sinh(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl acos(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cos(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl cosh(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl atan2(float, float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tan(float) throw(); +extern __DEVICE_FUNCTIONS_DECL__ __cudart_builtin__ float __cdecl tanh(float) throw(); +#endif /* __CUDACC_RTC__ || + (!defined(_MSC_VER) || _MSC_VER < 1800) && + (!defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 1101)) */ + +#if defined(_LIBCPP_VERSION) && defined(_LIBCPP_END_NAMESPACE_STD) && !defined(_STLPORT_VERSION) +#if _LIBCPP_VERSION < 3800 +_LIBCPP_END_NAMESPACE_STD +#endif /* _LIBCPP_VERSION < 3800 */ +#if defined(__clang__) +#pragma clang diagnostic pop +#endif /* __clang__ */ +#elif defined(__GNUC__) && !defined(_STLPORT_VERSION) +} +#endif /* defined(_LIBCPP_VERSION) && defined(_LIBCPP_BEGIN_NAMESPACE_STD) && !defined(_STLPORT_VERSION) || + __GNUC__ && !_STLPORT_VERSION */ + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __NV_NOEXCEPT + +#if defined(__CUDACC_RTC__) +#define __MATH_FUNCTIONS_DECL__ __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __MATH_FUNCTIONS_DECL__ static inline __host__ __device__ __cudart_builtin__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ static inline __device__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +#if defined(__QNX__) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) +#if defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) +#if defined(_LIBCPP_VERSION) +#define __NV_NOEXCEPT _NOEXCEPT +_LIBCPP_BEGIN_NAMESPACE_STD +#else +#define __NV_NOEXCEPT +namespace std { +__host__ __device__ __cudart_builtin__ int ilogbf(float a); +#endif +#else /* !(defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000)) */ +#define __NV_NOEXCEPT _NOEXCEPT +#endif /* defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) */ +__host__ __device__ __cudart_builtin__ float logb(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ int ilogb(float a) __NV_NOEXCEPT; + +__host__ __device__ __cudart_builtin__ float scalbn(float a, int b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float scalbln(float a, long int b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float exp2(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float expm1(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float log2(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float log1p(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float acosh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float asinh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float atanh(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float hypot(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float cbrt(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float erf(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float erfc(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float lgamma(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float tgamma(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float copysign(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float nextafter(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float remainder(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float remquo(float a, float b, int *quo) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float round(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long int lround(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long long int llround(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float trunc(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float rint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long int lrint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ long long int llrint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float nearbyint(float a) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fdim(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fma(float a, float b, float c) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fmax(float a, float b) __NV_NOEXCEPT; +__host__ __device__ __cudart_builtin__ float fmin(float a, float b) __NV_NOEXCEPT; +#if defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) +#if defined(_LIBCPP_VERSION) +_LIBCPP_END_NAMESPACE_STD +using _VSTD::logb; +using _VSTD::ilogb; +using _VSTD::scalbn; +using _VSTD::scalbln; +using _VSTD::exp2; +using _VSTD::expm1; +using _VSTD::log2; +using _VSTD::log1p; +using _VSTD::acosh; +using _VSTD::asinh; +using _VSTD::atanh; +using _VSTD::hypot; +using _VSTD::cbrt; +using _VSTD::erf; +using _VSTD::erfc; +using _VSTD::lgamma; +using _VSTD::tgamma; +using _VSTD::copysign; +using _VSTD::nextafter; +using _VSTD::remainder; +using _VSTD::remquo; +using _VSTD::round; +using _VSTD::lround; +using _VSTD::llround; +using _VSTD::trunc; +using _VSTD::rint; +using _VSTD::lrint; +using _VSTD::llrint; +using _VSTD::nearbyint; +using _VSTD::fdim; +using _VSTD::fma; +using _VSTD::fmax; +using _VSTD::fmin; +#else +} +#endif +#endif /* defined(__QNX__) && (!defined(_LIBCPP_VERSION) || _LIBCPP_VERSION < 8000) */ +#undef __NV_NOEXCEPT +#else /* !(defined(__QNX__ ) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800)) */ +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +namespace std { +__host__ __device__ __cudart_builtin__ constexpr float logb(float a); +__host__ __device__ __cudart_builtin__ constexpr int ilogb(float a); +__host__ __device__ __cudart_builtin__ constexpr float scalbn(float a, int b); +__host__ __device__ __cudart_builtin__ constexpr float scalbln(float a, long int b); +__host__ __device__ __cudart_builtin__ constexpr float exp2(float a); +__host__ __device__ __cudart_builtin__ constexpr float expm1(float a); +__host__ __device__ __cudart_builtin__ constexpr float log2(float a); +__host__ __device__ __cudart_builtin__ constexpr float log1p(float a); +__host__ __device__ __cudart_builtin__ constexpr float acosh(float a); +__host__ __device__ __cudart_builtin__ constexpr float asinh(float a); +__host__ __device__ __cudart_builtin__ constexpr float atanh(float a); +__host__ __device__ __cudart_builtin__ constexpr float hypot(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float cbrt(float a); +__host__ __device__ __cudart_builtin__ constexpr float erf(float a); +__host__ __device__ __cudart_builtin__ constexpr float erfc(float a); +__host__ __device__ __cudart_builtin__ constexpr float lgamma(float a); +__host__ __device__ __cudart_builtin__ constexpr float tgamma(float a); +__host__ __device__ __cudart_builtin__ constexpr float copysign(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float nextafter(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float remainder(float a, float b); +__host__ __device__ __cudart_builtin__ float remquo(float a, float b, int *quo); +__host__ __device__ __cudart_builtin__ constexpr float round(float a); +__host__ __device__ __cudart_builtin__ constexpr long int lround(float a); +__host__ __device__ __cudart_builtin__ constexpr long long int llround(float a); +__host__ __device__ __cudart_builtin__ constexpr float trunc(float a); +__host__ __device__ __cudart_builtin__ constexpr float rint(float a); +__host__ __device__ __cudart_builtin__ constexpr long int lrint(float a); +__host__ __device__ __cudart_builtin__ constexpr long long int llrint(float a); +__host__ __device__ __cudart_builtin__ constexpr float nearbyint(float a); +__host__ __device__ __cudart_builtin__ constexpr float fdim(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float fma(float a, float b, float c); +__host__ __device__ __cudart_builtin__ constexpr float fmax(float a, float b); +__host__ __device__ __cudart_builtin__ constexpr float fmin(float a, float b); +} +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ +__MATH_FUNCTIONS_DECL__ float logb(float a); + +__MATH_FUNCTIONS_DECL__ int ilogb(float a); + +__MATH_FUNCTIONS_DECL__ float scalbn(float a, int b); + +__MATH_FUNCTIONS_DECL__ float scalbln(float a, long int b); + +__MATH_FUNCTIONS_DECL__ float exp2(float a); + +__MATH_FUNCTIONS_DECL__ float expm1(float a); + +__MATH_FUNCTIONS_DECL__ float log2(float a); + +__MATH_FUNCTIONS_DECL__ float log1p(float a); + +__MATH_FUNCTIONS_DECL__ float acosh(float a); + +__MATH_FUNCTIONS_DECL__ float asinh(float a); + +__MATH_FUNCTIONS_DECL__ float atanh(float a); + +__MATH_FUNCTIONS_DECL__ float hypot(float a, float b); + +__MATH_FUNCTIONS_DECL__ float cbrt(float a); + +__MATH_FUNCTIONS_DECL__ float erf(float a); + +__MATH_FUNCTIONS_DECL__ float erfc(float a); + +__MATH_FUNCTIONS_DECL__ float lgamma(float a); + +__MATH_FUNCTIONS_DECL__ float tgamma(float a); + +__MATH_FUNCTIONS_DECL__ float copysign(float a, float b); + +__MATH_FUNCTIONS_DECL__ float nextafter(float a, float b); + +__MATH_FUNCTIONS_DECL__ float remainder(float a, float b); + +__MATH_FUNCTIONS_DECL__ float remquo(float a, float b, int *quo); + +__MATH_FUNCTIONS_DECL__ float round(float a); + +__MATH_FUNCTIONS_DECL__ long int lround(float a); + +__MATH_FUNCTIONS_DECL__ long long int llround(float a); + +__MATH_FUNCTIONS_DECL__ float trunc(float a); + +__MATH_FUNCTIONS_DECL__ float rint(float a); + +__MATH_FUNCTIONS_DECL__ long int lrint(float a); + +__MATH_FUNCTIONS_DECL__ long long int llrint(float a); + +__MATH_FUNCTIONS_DECL__ float nearbyint(float a); + +__MATH_FUNCTIONS_DECL__ float fdim(float a, float b); + +__MATH_FUNCTIONS_DECL__ float fma(float a, float b, float c); + +__MATH_FUNCTIONS_DECL__ float fmax(float a, float b); + +__MATH_FUNCTIONS_DECL__ float fmin(float a, float b); +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* defined(__QNX__) || (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) */ +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +extern __host__ __device__ __cudart_builtin__ float __cdecl logb(float) throw(); +extern __host__ __device__ __cudart_builtin__ int __cdecl ilogb(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl scalbn(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl scalbln(float, long int) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl exp2(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl expm1(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl log2(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl log1p(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl acosh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl asinh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl atanh(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl hypot(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl cbrt(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl erf(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl erfc(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl lgamma(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl tgamma(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl copysign(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl nextafter(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl remainder(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl remquo(float, float, int *) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl round(float) throw(); +extern __host__ __device__ __cudart_builtin__ long int __cdecl lround(float) throw(); +extern __host__ __device__ __cudart_builtin__ long long int __cdecl llround(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl trunc(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl rint(float) throw(); +extern __host__ __device__ __cudart_builtin__ long int __cdecl lrint(float) throw(); +extern __host__ __device__ __cudart_builtin__ long long int __cdecl llrint(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl nearbyint(float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fdim(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fma(float, float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fmax(float, float) throw(); +extern __host__ __device__ __cudart_builtin__ float __cdecl fmin(float, float) throw(); +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +__MATH_FUNCTIONS_DECL__ float exp10(const float a); + +__MATH_FUNCTIONS_DECL__ float rsqrt(const float a); + +__MATH_FUNCTIONS_DECL__ float rcbrt(const float a); + +__MATH_FUNCTIONS_DECL__ float sinpi(const float a); + +__MATH_FUNCTIONS_DECL__ float cospi(const float a); + +__MATH_FUNCTIONS_DECL__ void sincospi(const float a, float *const sptr, float *const cptr); + +__MATH_FUNCTIONS_DECL__ void sincos(const float a, float *const sptr, float *const cptr); + +__MATH_FUNCTIONS_DECL__ float j0(const float a); + +__MATH_FUNCTIONS_DECL__ float j1(const float a); + +__MATH_FUNCTIONS_DECL__ float jn(const int n, const float a); + +__MATH_FUNCTIONS_DECL__ float y0(const float a); + +__MATH_FUNCTIONS_DECL__ float y1(const float a); + +__MATH_FUNCTIONS_DECL__ float yn(const int n, const float a); + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i0(const float a); + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i1(const float a); + +__MATH_FUNCTIONS_DECL__ float erfinv(const float a); + +__MATH_FUNCTIONS_DECL__ float erfcinv(const float a); + +__MATH_FUNCTIONS_DECL__ float normcdfinv(const float a); + +__MATH_FUNCTIONS_DECL__ float normcdf(const float a); + +__MATH_FUNCTIONS_DECL__ float erfcx(const float a); + +__MATH_FUNCTIONS_DECL__ double copysign(const double a, const float b); + +__MATH_FUNCTIONS_DECL__ double copysign(const float a, const double b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p int and \p unsigned \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p int and \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p int and \p unsigned \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p int and \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p long \p long \p int and \p unsigned \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the minimum value of the input \p unsigned \p long \p long \p int and \p long \p long \p int arguments. + * + * Calculate the minimum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the minimum value of the input \p float arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fminf() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ float min(const float a, const float b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p float arguments. + * + * Calculate the minimum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmin() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const double a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p float and \p double arguments. + * + * Convert \p float argument \p a to \p double, followed by ::fmin(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const float a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the minimum value of the input \p double and \p float arguments. + * + * Convert \p float argument \p b to \p double, followed by ::fmin(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double min(const double a, const float b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p int and \p unsigned \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p int and \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long int max(const long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p int and \p unsigned \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const long int a, const unsigned long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p int and \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ long long int max(const long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p long \p long \p int and \p unsigned \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const long long int a, const unsigned long long int b); + +/** + * \ingroup CUDA_MATH_INT + * \brief Calculate the maximum value of the input \p unsigned \p long \p long \p int and \p long \p long \p int arguments. + * + * Calculate the maximum value of the arguments \p a and \p b, perform integer promotion first. + */ +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const long long int b); + +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Calculate the maximum value of the input \p float arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmaxf() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ float max(const float a, const float b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p float arguments. + * + * Calculate the maximum value of the arguments \p a and \p b. + * Behavior is equivalent to ::fmax() function. + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const double a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p float and \p double arguments. + * + * Convert \p float argument \p a to \p double, followed by ::fmax(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const float a, const double b); + +/** + * \ingroup CUDA_MATH_DOUBLE + * \brief Calculate the maximum value of the input \p double and \p float arguments. + * + * Convert \p float argument \p b to \p double, followed by ::fmax(). + * + * Note, this is different from \p std:: specification + */ +__MATH_FUNCTIONS_DECL__ double max(const double a, const float b); + +#undef __MATH_FUNCTIONS_DECL__ +#undef __MATH_FUNCTIONS_DEVICE_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ +#undef EXCLUDE_FROM_RTC + +extern "C"{ +inline __device__ void *__nv_aligned_device_malloc(size_t size, size_t align) +{ + __device__ void *__nv_aligned_device_malloc_impl(size_t, size_t); + return __nv_aligned_device_malloc_impl(size, align); +} +} + +#endif /* __cplusplus && __CUDACC__ */ + +#define EXCLUDE_FROM_RTC + +#if !defined(__CUDACC__) + +/******************************************************************************* +* * +* ONLY FOR HOST CODE! NOT FOR DEVICE EXECUTION * +* * +*******************************************************************************/ + +#include + +#if defined(_WIN32) +#pragma warning (push) +#pragma warning (disable : 4211) + +#endif /* _WIN32 */ + +__func__(double rsqrt(double a)); + +__func__(double rcbrt(double a)); + +__func__(double sinpi(double a)); + +__func__(double cospi(double a)); + +__func__(void sincospi(double a, double *sptr, double *cptr)); + +__func__(double erfinv(double a)); + +__func__(double erfcinv(double a)); + +__func__(double normcdfinv(double a)); + +__func__(double normcdf(double a)); + +__func__(double erfcx(double a)); + +__func__(float rsqrtf(float a)); + +__func__(float rcbrtf(float a)); + +__func__(float sinpif(float a)); + +__func__(float cospif(float a)); + +__func__(void sincospif(float a, float *sptr, float *cptr)); + +__func__(float erfinvf(float a)); + +__func__(float erfcinvf(float a)); + +__func__(float normcdfinvf(float a)); + +__func__(float normcdff(float a)); + +__func__(float erfcxf(float a)); + +__func__(int min(int a, int b)); + +__func__(unsigned int umin(unsigned int a, unsigned int b)); + +__func__(long long int llmin(long long int a, long long int b)); + +__func__(unsigned long long int ullmin(unsigned long long int a, unsigned long long int b)); + +__func__(int max(int a, int b)); + +__func__(unsigned int umax(unsigned int a, unsigned int b)); + +__func__(long long int llmax(long long int a, long long int b)); + +__func__(unsigned long long int ullmax(unsigned long long int a, unsigned long long int b)); + +#if defined(_WIN32) || defined(__APPLE__) || defined (__ANDROID__) + +__func__(int __isnan(double a)); + +#endif /* _WIN32 || __APPLE__ || __ANDROID__ */ + +#if defined(_WIN32) || defined(__APPLE__) || defined (__QNX__) + +__func__(void sincos(double a, double *sptr, double *cptr)); + +#endif /* _WIN32 || __APPLE__ || __QNX__ */ + +#if defined(_WIN32) || defined(__APPLE__) + +__func__(double exp10(double a)); + +__func__(float exp10f(float a)); + +__func__(void sincosf(float a, float *sptr, float *cptr)); + +__func__(int __isinf(double a)); + +#endif /* _WIN32 || __APPLE__ */ + +#if (defined(_WIN32) && (!defined(_MSC_VER) || _MSC_VER < 1800)) || defined (__ANDROID__) + +__func__(double log2(double a)); + +#endif /* (_WIN32 && (!defined(_MSC_VER) || _MSC_VER < 1800)) || __ANDROID__ */ + +#if defined(_WIN32) + +__func__(int __signbit(double a)); + +__func__(int __finite(double a)); + +__func__(int __signbitl(long double a)); + +__func__(int __signbitf(float a)); + +__func__(int __finitel(long double a)); + +__func__(int __finitef(float a)); + +__func__(int __isinfl(long double a)); + +__func__(int __isinff(float a)); + +__func__(int __isnanl(long double a)); + +__func__(int __isnanf(float a)); + +#endif /* _WIN32 */ + +#if defined(_WIN32) && (!defined(_MSC_VER) || _MSC_VER < 1800) + +__func__(double copysign(double a, double b)); + +__func__(double fmax(double a, double b)); + +__func__(double fmin(double a, double b)); + +__func__(double trunc(double a)); + +__func__(double round(double a)); + +__func__(long int lround(double a)); + +__func__(long long int llround(double a)); + +__func__(double rint(double a)); + +__func__(double nearbyint(double a)); + +__func__(long int lrint(double a)); + +__func__(long long int llrint(double a)); + +__func__(double fdim(double a, double b)); + +__func__(double scalbn(double a, int b)); + +__func__(double scalbln(double a, long int b)); + +__func__(double exp2(double a)); + +__func__(double log1p(double a)); + +__func__(double expm1(double a)); + +__func__(double cbrt(double a)); + +__func__(double acosh(double a)); + +__func__(double asinh(double a)); + +__func__(double atanh(double a)); + +__func__(int ilogb(double a)); + +__func__(double logb(double a)); + +__func__(double remquo(double a, double b, int *quo)); + +__func__(double remainder(double a, double b)); + +__func__(double fma (double a, double b, double c)); + +__func__(double nextafter(double a, double b)); + +__func__(double erf(double a)); + +__func__(double erfc(double a)); + +__func__(double lgamma(double a)); + +__func__(unsigned long long int __internal_host_nan_kernel(const char *s)); + +__func__(double nan(const char *tagp)); + +__func__(double __host_tgamma_kernel(double a)); + +__func__(double __host_stirling_poly(double a)); + +__func__(double __host_tgamma_stirling(double a)); + +__func__(double tgamma(double a)); + +__func__(float fmaxf(float a, float b)); + +__func__(float fminf(float a, float b)); + +__func__(float roundf(float a)); + +__func__(long int lroundf(float a)); + +__func__(long long int llroundf(float a)); + +__func__(float truncf(float a)); + +__func__(float rintf(float a)); + +__func__(float nearbyintf(float a)); + +__func__(long int lrintf(float a)); + +__func__(long long int llrintf(float a)); + +__func__(float logbf(float a)); + +__func__(float scalblnf(float a, long int b)); + +__func__(float log2f(float a)); + +__func__(float exp2f(float a)); + +__func__(float acoshf(float a)); + +__func__(float asinhf(float a)); + +__func__(float atanhf(float a)); + +__func__(float cbrtf(float a)); + +__func__(float expm1f(float a)); + +__func__(float fdimf(float a, float b)); + +__func__(float log1pf(float a)); + +__func__(float scalbnf(float a, int b)); + +__func__(float fmaf(float a, float b, float c)); + +__func__(int ilogbf(float a)); + +__func__(float erff(float a)); + +__func__(float erfcf(float a)); + +__func__(float lgammaf(float a)); + +__func__(float tgammaf(float a)); + +__func__(float remquof(float a, float b, int *quo)); + +__func__(float remainderf(float a, float b)); + +__func__(float copysignf(float a, float b)); + +__func__(float nextafterf(float a, float b)); + +__func__(float nanf(const char *tagp)); + +#endif /* _WIN32 && (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if defined(_WIN32) +#pragma warning (pop) +#endif /* _WIN32 */ + +#endif /* !__CUDACC__ */ + +#undef EXCLUDE_FROM_RTC + +#if !defined(__CUDACC_RTC__) + +#include "math_functions.hpp" + +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__MATH_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H__ +#endif diff --git a/miniCUDA124/include/crt/math_functions.hpp b/miniCUDA124/include/crt/math_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f99c33d4c4db25fd1091b8a7afd30b13a7aef966 --- /dev/null +++ b/miniCUDA124/include/crt/math_functions.hpp @@ -0,0 +1,3398 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/math_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/math_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__ +#endif + +#if !defined(__MATH_FUNCTIONS_HPP__) +#define __MATH_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) + +__host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +__host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(static_cast(x));} + +__host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +__host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +__host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +__host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +__host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(static_cast(x)); } + +__host__ __device__ __cudart_builtin__ long long int abs(const long long int a) { return llabs(a); } + +__host__ __device__ __cudart_builtin__ long int abs(const long int in) { return llabs(in); } +__host__ __device__ __cudart_builtin__ float abs(const float in) { return fabsf(in); } +__host__ __device__ __cudart_builtin__ double abs(const double in) { return fabs(in); } +__host__ __device__ __cudart_builtin__ float fabs(const float in) { return fabsf(in); } +__host__ __device__ __cudart_builtin__ float ceil(const float in) { return ceilf(in); } +__host__ __device__ __cudart_builtin__ float floor(const float in) { return floorf(in); } +__host__ __device__ __cudart_builtin__ float sqrt(const float in) { return sqrtf(in); } +__host__ __device__ __cudart_builtin__ float pow(const float a, const float b) { return powf(a, b); } +extern "C" __device__ float powif(float, int); +__host__ __device__ __cudart_builtin__ float pow(const float a, const int b) { return powif(a, b); } +extern "C" __device__ double powi(double, int); +__host__ __device__ __cudart_builtin__ double pow(const double a, const int b) { return powi(a, b); } +__host__ __device__ __cudart_builtin__ float log(const float in) { return logf(in); } +__host__ __device__ __cudart_builtin__ float log10(const float in) { return log10f(in); } +__host__ __device__ __cudart_builtin__ float fmod(const float a, const float b) { return fmodf(a, b); } +__host__ __device__ __cudart_builtin__ float modf(const float a, float*b) { return modff(a, b); } +__host__ __device__ __cudart_builtin__ float exp(const float in) { return expf(in); } +__host__ __device__ __cudart_builtin__ float frexp(const float a, int*b) { return frexpf(a, b); } +__host__ __device__ __cudart_builtin__ float ldexp(const float a, int b) { return ldexpf(a, b); } +__host__ __device__ __cudart_builtin__ float asin(const float in) { return asinf(in); } +__host__ __device__ __cudart_builtin__ float sin(const float in) { return sinf(in); } +__host__ __device__ __cudart_builtin__ float sinh(const float in) { return sinhf(in); } +__host__ __device__ __cudart_builtin__ float acos(const float in) { return acosf(in); } +__host__ __device__ __cudart_builtin__ float cos(const float in) { return cosf(in); } +__host__ __device__ __cudart_builtin__ float cosh(const float in) { return coshf(in); } +__host__ __device__ __cudart_builtin__ float atan(const float in) { return atanf(in); } +__host__ __device__ __cudart_builtin__ float atan2(const float a, const float b) { return atan2f(a, b); } +__host__ __device__ __cudart_builtin__ float tan(const float in) { return tanf(in); } +__host__ __device__ __cudart_builtin__ float tanh(const float in) { return tanhf(in); } + +#elif defined(__GNUC__) + +#undef signbit +#undef isfinite +#undef isnan +#undef isinf + +#if defined(_LIBCPP_VERSION) +extern "C" __device__ float powif(float, int); +extern "C" __device__ double powi(double, int); +#endif /* _LIBCPP_VERSION */ + +#if defined(__APPLE__) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbitd(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __isfinitef(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __isfinited(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __isfinite(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) throw() { return __isnand(x); } +#if defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnan(x); } +#endif /* defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) throw() { return __isinfd(x); } +#if defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinf(x); } +#endif /* defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 7000 */ +#else /* __APPLE__ */ + +#if ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) +#if defined(__CUDA_ARCH__) +#define __NV_BUILTIN_FUNC_DECL__ __forceinline__ __host__ __device__ __cudart_builtin__ +#if _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC +__NV_BUILTIN_FUNC_DECL__ int isnan(const double a) throw() { return __isnan(a); } +__NV_BUILTIN_FUNC_DECL__ int isinf(const double x) throw() { return __isinf(x); } +#endif /* _GLIBCXX_HAVE_OBSOLETE_ISNAN && !_GLIBCXX_NO_OBSOLETE_ISINF_ISNAN_DYNAMIC */ +#undef __NV_BUILTIN_FUNC_DECL__ +#endif /* __CUDA_ARCH */ +#else /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ + +#if defined(__QNX__) +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const float x) +{ +#if defined(__CUDA_ARCH__) + return (__signbitf(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const double x) +{ +#if defined(__CUDA_ARCH__) + return (__signbit(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool signbit(const long double x) +{ +#if defined(__CUDA_ARCH__) + return (__signbitl(x) != 0); +#else /* !__CUDA_ARCH__ */ + return signbit(x); +#endif /* __CUDA_ARCH__ */ +} +#endif /* (__QNX__ && _LIBCPP_VERSION) */ + +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__finitel(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__finite(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__finitef(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnan(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinfl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isinff(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} + +#elif ( (defined(__ANDROID__) || defined(__HORIZON__)) && defined(_LIBCPP_VERSION)) +#if defined(__CUDA_ARCH__) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(x); } +#endif /* _LIBCPP_VERSION < 8000 */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(x); } +#endif /* _LIBCPP_VERSION < 8000 */ +#else /* !defined(__CUDA_ARCH__) */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return signbit(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return signbit(x);} + +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return isfinite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return isfinite(x); } +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return isfinite(x); } + +#if _LIBCPP_VERSION < 8000 +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return isnan(x); } +/* int isnan(double) provided by math.h */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return isnan(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return isinf(x); } +/* int isinf(double) provided by math.h */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return isinf(x); } +#endif /* _LIBCPP_VERSION < 8000 */ + +#endif /* defined(__CUDA_ARCH__) */ + +#else /* !__QNX__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const float x) { return __signbitf(x); } +#if defined(__ICC) +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) throw() { return __signbit(x); } +#else /* !__ICC */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const double x) { return __signbit(x); } +#endif /* __ICC */ +__forceinline__ __host__ __device__ __cudart_builtin__ int signbit(const long double x) { return __signbitl(x);} + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { +#if defined(__CUDA_ARCH__) + return __finitef(x); +#else /* !__CUDA_ARCH__ */ + return __isfinitef(x); +#endif /* __CUDA_ARCH__ */ +} +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const float x) { return __finitef(x); } +#endif /* __ANDROID__ */ + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) +{ +#ifdef __CUDA_ARCH__ + return __finite(x); +#else /* !__CUDA_ARCH__ */ + return __isfinite(x); +#endif /* __CUDA_ARCH__ */ +} +#elif defined(__ICC) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) throw() { return __finite(x); } +#else +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const double x) { return __finite(x); } +#endif /* __ANDROID__ */ + +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) +{ +#ifdef __CUDA_ARCH__ + return __finitel(x); +#else /* !__CUDA_ARCH__ */ + return __isfinitel(x); +#endif /* __CUDA_ARCH__ */ +} +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double x) { return __finitel(x); } +#endif /* __ANDROID__ */ + +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const float x) { return __isnanf(x); } +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) { return __isnan(x); } +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const double x) throw() { return __isnan(x); } +#endif /* __ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isnan(const long double x) { return __isnanl(x); } + +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const float x) { return __isinff(x); } +#if defined(__ANDROID__) +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) { return __isinf(x); } +#else /* !__ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const double x) throw() { return __isinf(x); } +#endif /* __ANDROID__ */ +__forceinline__ __host__ __device__ __cudart_builtin__ int isinf(const long double x) { return __isinfl(x); } +#endif /* __QNX__ || __HORIZON__ */ + +#endif /* ((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L) */ +#endif /* __APPLE__ */ + +#if defined(__arm__) && !defined(_STLPORT_VERSION) && !_GLIBCXX_USE_C99 +#if !defined(__ANDROID__) || (!defined(_LIBCPP_VERSION) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))) + +#if !defined(__QNX__) && !defined(__HORIZON__) +static __inline__ __host__ __device__ __cudart_builtin__ long long int abs(const long long int a) +{ + return llabs(a); +} +#endif /* !__QNX__ && !__HORIZON__*/ + +#endif /* !defined(__ANDROID__) || (!defined(_LIBCPP_VERSION) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))) */ +#endif /* __arm__ && !_STLPORT_VERSION && !_GLIBCXX_USE_C99 */ + +#elif defined(_WIN32) + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const long double a) +{ + return __signbitl(a); +} + +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const double a) +{ + return __signbit(a); +} + +static __inline__ __host__ __device__ __cudart_builtin__ int signbit(const float a) +{ + return __signbitf(a); +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const long double a) +{ + return __isinfl(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinfl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const double a) +{ + return __isinf(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isinf(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isinf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isinf(const float a) +{ + return __isinff(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ bool isinf(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isinff(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isinf(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const long double a) +{ + return __isnanl(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanl(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const double a) +{ + return __isnan(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__isnan(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isnan(const float a) +{ + return __isnanf(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isnan(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__isnanf(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isnan(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const long double a) +{ + return __finitel(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const long double a) +{ +#if defined(__CUDA_ARCH__) + return (__finitel(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const double a) +{ + return __finite(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const double a) +{ +#if defined(__CUDA_ARCH__) + return (__finite(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +static __inline__ __host__ __device__ __cudart_builtin__ int isfinite(const float a) +{ + return __finitef(a); +} +#else /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ +static __inline__ __host__ __device__ __cudart_builtin__ bool isfinite(const float a) +{ +#if defined(__CUDA_ARCH__) + return (__finitef(a) != 0); +#else /* defined(__CUDA_ARCH__) */ + return isfinite(a); +#endif /* defined(__CUDA_ARCH__) */ +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* __CUDACC_RTC__ */ + +#if defined(__CUDACC_RTC__) +#define __MATH_FUNCTIONS_DECL__ __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __MATH_FUNCTIONS_DECL__ static inline __host__ __device__ +#define __MATH_FUNCTIONS_DEVICE_DECL__ static inline __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) || _MSC_VER < 1800) +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +_LIBCPP_BEGIN_NAMESPACE_STD +#endif /* __QNX__ && _LIBCPP_VERSION */ +#if !defined(__QNX__) && !(defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800) +#if !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) +__MATH_FUNCTIONS_DECL__ float logb(const float a) +{ + return logbf(a); +} + +__MATH_FUNCTIONS_DECL__ int ilogb(const float a) +{ + return ilogbf(a); +} + +__MATH_FUNCTIONS_DECL__ float scalbn(const float a, const int b) +{ + return scalbnf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float scalbln(const float a, const long int b) +{ + return scalblnf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float exp2(const float a) +{ + return exp2f(a); +} + +__MATH_FUNCTIONS_DECL__ float expm1(const float a) +{ + return expm1f(a); +} + +__MATH_FUNCTIONS_DECL__ float log2(const float a) +{ + return log2f(a); +} + +__MATH_FUNCTIONS_DECL__ float log1p(const float a) +{ + return log1pf(a); +} + +__MATH_FUNCTIONS_DECL__ float acosh(const float a) +{ + return acoshf(a); +} + +__MATH_FUNCTIONS_DECL__ float asinh(const float a) +{ + return asinhf(a); +} + +__MATH_FUNCTIONS_DECL__ float atanh(const float a) +{ + return atanhf(a); +} + +__MATH_FUNCTIONS_DECL__ float hypot(const float a, const float b) +{ + return hypotf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float cbrt(const float a) +{ + return cbrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float erf(const float a) +{ + return erff(a); +} + +__MATH_FUNCTIONS_DECL__ float erfc(const float a) +{ + return erfcf(a); +} + +__MATH_FUNCTIONS_DECL__ float lgamma(const float a) +{ + return lgammaf(a); +} + +__MATH_FUNCTIONS_DECL__ float tgamma(const float a) +{ + return tgammaf(a); +} + +__MATH_FUNCTIONS_DECL__ float copysign(const float a, const float b) +{ + return copysignf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float nextafter(const float a, const float b) +{ + return nextafterf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float remainder(const float a, const float b) +{ + return remainderf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float remquo(const float a, const float b, int *quo) +{ + return remquof(a, b, quo); +} + +__MATH_FUNCTIONS_DECL__ float round(const float a) +{ + return roundf(a); +} + +__MATH_FUNCTIONS_DECL__ long int lround(const float a) +{ + return lroundf(a); +} + +__MATH_FUNCTIONS_DECL__ long long int llround(const float a) +{ + return llroundf(a); +} + +__MATH_FUNCTIONS_DECL__ float trunc(const float a) +{ + return truncf(a); +} + +__MATH_FUNCTIONS_DECL__ float rint(const float a) +{ + return rintf(a); +} + +__MATH_FUNCTIONS_DECL__ long int lrint(const float a) +{ + return lrintf(a); +} + +__MATH_FUNCTIONS_DECL__ long long int llrint(const float a) +{ + return llrintf(a); +} + +__MATH_FUNCTIONS_DECL__ float nearbyint(const float a) +{ + return nearbyintf(a); +} + +__MATH_FUNCTIONS_DECL__ float fdim(const float a, const float b) +{ + return fdimf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float fma(const float a, const float b, const float c) +{ + return fmaf(a, b, c); +} + +__MATH_FUNCTIONS_DECL__ float fmax(const float a, const float b) +{ + return fmaxf(a, b); +} + +__MATH_FUNCTIONS_DECL__ float fmin(const float a, const float b) +{ + return fminf(a, b); +} +#endif /* !(((defined _GLIBCXX_MATH_H) && _GLIBCXX_MATH_H) && (__cplusplus >= 201103L)) */ +#endif /* !(!defined(__QNX__) && !(defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 3800)) */ +#if defined(__QNX__) && defined(_LIBCPP_VERSION) +_LIBCPP_END_NAMESPACE_STD +#endif +#endif /* __CUDACC_RTC__ || (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +__MATH_FUNCTIONS_DECL__ float exp10(const float a) +{ + return exp10f(a); +} + +__MATH_FUNCTIONS_DECL__ float rsqrt(const float a) +{ + return rsqrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float rcbrt(const float a) +{ + return rcbrtf(a); +} + +__MATH_FUNCTIONS_DECL__ float sinpi(const float a) +{ + return sinpif(a); +} + +__MATH_FUNCTIONS_DECL__ float cospi(const float a) +{ + return cospif(a); +} + +__MATH_FUNCTIONS_DECL__ void sincospi(const float a, float *const sptr, float *const cptr) +{ + sincospif(a, sptr, cptr); +} + +__MATH_FUNCTIONS_DECL__ void sincos(const float a, float *const sptr, float *const cptr) +{ + sincosf(a, sptr, cptr); +} + +__MATH_FUNCTIONS_DECL__ float j0(const float a) +{ + return j0f(a); +} + +__MATH_FUNCTIONS_DECL__ float j1(const float a) +{ + return j1f(a); +} + +__MATH_FUNCTIONS_DECL__ float jn(const int n, const float a) +{ + return jnf(n, a); +} + +__MATH_FUNCTIONS_DECL__ float y0(const float a) +{ + return y0f(a); +} + +__MATH_FUNCTIONS_DECL__ float y1(const float a) +{ + return y1f(a); +} + +__MATH_FUNCTIONS_DECL__ float yn(const int n, const float a) +{ + return ynf(n, a); +} + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i0(const float a) +{ + return cyl_bessel_i0f(a); +} + +__MATH_FUNCTIONS_DEVICE_DECL__ float cyl_bessel_i1(const float a) +{ + return cyl_bessel_i1f(a); +} + +__MATH_FUNCTIONS_DECL__ float erfinv(const float a) +{ + return erfinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float erfcinv(const float a) +{ + return erfcinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float normcdfinv(const float a) +{ + return normcdfinvf(a); +} + +__MATH_FUNCTIONS_DECL__ float normcdf(const float a) +{ + return normcdff(a); +} + +__MATH_FUNCTIONS_DECL__ float erfcx(const float a) +{ + return erfcxf(a); +} + +__MATH_FUNCTIONS_DECL__ double copysign(const double a, const float b) +{ + return copysign(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ double copysign(const float a, const double b) +{ + return copysign(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b) +{ + return umin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b) +{ + return umin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b) +{ + return umin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b) +{ + long int retval; + /* Suppress VS warning: warning C4127: conditional expression is constant */ +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + /* long can be of 32-bit type on some systems. */ + if (sizeof(long int) == sizeof(int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(min(static_cast(a), static_cast(b))); + } else { + retval = static_cast(llmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umin(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmin(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b) +{ + return llmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b) +{ + return ullmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b) +{ + return ullmin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b) +{ + return ullmin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ float min(const float a, const float b) +{ + return fminf(a, b); +} + +__MATH_FUNCTIONS_DECL__ double min(const double a, const double b) +{ + return fmin(a, b); +} + +__MATH_FUNCTIONS_DECL__ double min(const float a, const double b) +{ + return fmin(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ double min(const double a, const float b) +{ + return fmin(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const unsigned int b) +{ + return umax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const int a, const unsigned int b) +{ + return umax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned int max(const unsigned int a, const int b) +{ + return umax(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ long int max(const long int a, const long int b) +{ + long int retval; + /* long can be of 32-bit type on some systems. */ +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(long int) == sizeof(int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(max(static_cast(a), static_cast(b))); + } else { + retval = static_cast(llmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const long int a, const unsigned long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ unsigned long int max(const unsigned long int a, const long int b) +{ + unsigned long int retval; +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (push) +#pragma warning (disable: 4127) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + if (sizeof(unsigned long int) == sizeof(unsigned int)) { +#if defined(_MSC_VER) && !defined(__CUDA_ARCH__) +#pragma warning (pop) +#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */ + retval = static_cast(umax(static_cast(a), static_cast(b))); + } else { + retval = static_cast(ullmax(static_cast(a), static_cast(b))); + } + return retval; +} + +__MATH_FUNCTIONS_DECL__ long long int max(const long long int a, const long long int b) +{ + return llmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const unsigned long long int b) +{ + return ullmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const long long int a, const unsigned long long int b) +{ + return ullmax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ unsigned long long int max(const unsigned long long int a, const long long int b) +{ + return ullmax(a, static_cast(b)); +} + +__MATH_FUNCTIONS_DECL__ float max(const float a, const float b) +{ + return fmaxf(a, b); +} + +__MATH_FUNCTIONS_DECL__ double max(const double a, const double b) +{ + return fmax(a, b); +} + +__MATH_FUNCTIONS_DECL__ double max(const float a, const double b) +{ + return fmax(static_cast(a), b); +} + +__MATH_FUNCTIONS_DECL__ double max(const double a, const float b) +{ + return fmax(a, static_cast(b)); +} + + +#if !defined(__CUDA_ARCH__) +#if defined(_WIN32) +#define __HELPER_FUNC_LINKAGE static inline __host__ __device__ +#pragma warning (push) +#pragma warning (disable : 4211) +#else /* !defined(_WIN32) */ +#define __HELPER_FUNC_LINKAGE inline __host__ __device__ +#endif /* defined(_WIN32) */ + +__HELPER_FUNC_LINKAGE int min(const int a, const int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned int umin(const unsigned int a, const unsigned int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE long long int llmin(const long long int a, const long long int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned long long int ullmin(const unsigned long long int a, + const unsigned long long int b) +{ + return (a < b) ? a : b; +} + +__HELPER_FUNC_LINKAGE int max(const int a, const int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned int umax(const unsigned int a, const unsigned int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE long long int llmax(const long long int a, const long long int b) +{ + return (a > b) ? a : b; +} + +__HELPER_FUNC_LINKAGE unsigned long long int ullmax(const unsigned long long int a, + const unsigned long long int b) +{ + return (a > b) ? a : b; +} + +#if defined(_WIN32) +#pragma warning (pop) +#endif /* defined(_WIN32) */ + +#undef __HELPER_FUNC_LINKAGE + +#endif /* !defined(__CUDA_ARCH__) */ + +#undef __MATH_FUNCTIONS_DECL__ +#undef __MATH_FUNCTIONS_DEVICE_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* __cplusplus && __CUDACC__ */ +#if !defined(__CUDACC__) + +#include "host_defines.h" +#include "math_constants.h" + +#define __cuda_INT_MAX \ + ((int)((unsigned int)-1 >> 1)) + +/******************************************************************************* +* * +* ONLY FOR HOST CODE! NOT FOR DEVICE EXECUTION * +* * +*******************************************************************************/ + +#include + +#if defined(_WIN32) +#pragma warning (push) +#pragma warning (disable : 4211) + +#endif /* _WIN32 */ + +#if defined(_WIN32) || defined(__APPLE__) || defined (__ANDROID__) || defined(__QNX__) + +__func__(int __isnan(const double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) > 0xffe0000000000000ULL; +} + +#endif /* _WIN32 || __APPLE__ || __ANDROID__ || __QNX__ */ + +#if defined(_WIN32) || defined(__APPLE__) || defined(__QNX__) + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE ROUTINES FOR WINDOWS & APPLE PLATFORMS * +* * +*******************************************************************************/ + +__func__(double exp10(const double a)) +{ + return pow(10.0, a); +} + +__func__(float exp10f(const float a)) +{ + return static_cast(exp10(static_cast(a))); +} + +__func__(void sincos(const double a, double *sptr, double *cptr)) +{ + *sptr = sin(a); + *cptr = cos(a); +} + +__func__(void sincosf(const float a, float *sptr, float *cptr)) +{ + double s, c; + + sincos(static_cast(a), &s, &c); + *sptr = static_cast(s); + *cptr = static_cast(c); +} + +__func__(int __isinf(const double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) == 0xffe0000000000000ULL; +} + +#endif /* _WIN32 || __APPLE__ */ + +#if defined(_WIN32) || defined (__ANDROID__) + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double log2(const double a)) +{ + return log(a) * 1.44269504088896340; +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* _WIN32 || __ANDROID__ */ + +#if defined(_WIN32) + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE ROUTINES FOR WINDOWS PLATFORM * +* * +*******************************************************************************/ + +__func__(int __signbit(double a)) +{ + signed long long int l; + memcpy(&l, &a, sizeof(double)); + return l < 0LL; +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double copysign(double a, double b)) +{ + unsigned long long int la, lb; + memcpy(&la, &a, sizeof(double)); + memcpy(&lb, &b, sizeof(double)); + la = (la & 0x7fffffffffffffffULL) | (lb & 0x8000000000000000ULL); + memcpy(&a, &la, sizeof(double)); + return a; +} +#endif /* MSC_VER < 1800 */ + +__func__(int __finite(double a)) +{ + unsigned long long int l; + memcpy(&l, &a, sizeof(double)); + return (l << 1ULL) < 0xffe0000000000000ULL; +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(double fmax(double a, double b)) +{ + if (__isnan(a) && __isnan(b)) return a + b; + if (__isnan(a)) return b; + if (__isnan(b)) return a; + if ((a == 0.0) && (b == 0.0) && __signbit(b)) return a; + return a > b ? a : b; +} + +__func__(double fmin(double a, double b)) +{ + if (__isnan(a) && __isnan(b)) return a + b; + if (__isnan(a)) return b; + if (__isnan(b)) return a; + if ((a == 0.0) && (b == 0.0) && __signbit(a)) return a; + return a < b ? a : b; +} + +__func__(double trunc(double a)) +{ + return a < 0.0 ? ceil(a) : floor(a); +} + +__func__(double round(double a)) +{ + double fa = fabs(a); + + if (fa > CUDART_TWO_TO_52) { + return a; + } else { + double u = floor(fa + 0.5); + if (fa < 0.5) u = 0; + u = copysign (u, a); + return u; + } +} + +__func__(long int lround(double a)) +{ + return static_cast(round(a)); +} + +__func__(long long int llround(double a)) +{ + return static_cast(round(a)); +} + +__func__(double rint(double a)) +{ + double fa = fabs(a); + double u = CUDART_TWO_TO_52 + fa; + if (fa >= CUDART_TWO_TO_52) { + u = a; + } else { + u = u - CUDART_TWO_TO_52; + u = copysign (u, a); + } + return u; +} + +__func__(double nearbyint(double a)) +{ + return rint(a); +} + +__func__(long int lrint(double a)) +{ + return static_cast(rint(a)); +} + +__func__(long long int llrint(double a)) +{ + return static_cast(rint(a)); +} + +__func__(double fdim(double a, double b)) +{ + if (a > b) { + return (a - b); + } else if (a <= b) { + return 0.0; + } else if (__isnan(a)) { + return a; + } else { + return b; + } +} + +__func__(double scalbn(double a, int b)) +{ + return ldexp(a, b); +} + +__func__(double scalbln(double a, long int b)) +{ + int t; + + if (b > 2147483647L) { + t = 2147483647; + } else if (b < (-2147483647 - 1)) { + t = (-2147483647 - 1); + } else { + t = static_cast(b); + } + return scalbn(a, t); +} + +__func__(double exp2(double a)) +{ + return pow(2.0, a); +} + +/* + * The following is based on: David Goldberg, "What every computer scientist + * should know about floating-point arithmetic", ACM Computing Surveys, Volume + * 23, Issue 1, March 1991. + */ +__func__(double log1p(double a)) +{ + volatile double u, m; + + u = 1.0 + a; + if (u == 1.0) { + /* a very close to zero */ + u = a; + } else { + m = u - 1.0; + u = log(u); + if (a < 1.0) { + /* a somewhat close to zero */ + u = a * u; + u = u / m; + } + } + return u; +} + +/* + * This code based on: http://www.cs.berkeley.edu/~wkahan/Math128/Sumnfp.pdf + */ +__func__(double expm1(double a)) +{ + volatile double u, m; + + u = exp(a); + m = u - 1.0; + if (m == 0.0) { + /* a very close zero */ + m = a; + } + else if (fabs(a) < 1.0) { + /* a somewhat close zero */ + u = log(u); + m = m * a; + m = m / u; + } + return m; +} + +__func__(double cbrt(double a)) +{ + double s, t; + + if (a == 0.0 || __isinf(a)) { + return a; + } + s = fabs(a); + t = exp2(CUDART_THIRD * log2(s)); /* initial approximation */ + t = t - (t - (s / (t * t))) * CUDART_THIRD; /* refine approximation */ + t = copysign(t, a); + return t; +} + +__func__(double acosh(double a)) +{ + double s, t; + + t = a - 1.0; + if (t == a) { + return log(2.0) + log(a); + } else { + s = a + 1.0; + t = t + sqrt(s * t); + return log1p(t); + } +} + +__func__(double asinh(double a)) +{ + double fa, oofa, t; + + fa = fabs(a); + if (fa > 1e18) { + t = log(2.0) + log(fa); + } else { + oofa = 1.0 / fa; + t = fa + fa / (oofa + sqrt(1.0 + oofa * oofa)); + t = log1p(t); + } + t = copysign(t, a); + return t; +} + +__func__(double atanh(double a)) +{ + double fa, t; + + if (__isnan(a)) { + return a + a; + } + fa = fabs(a); + t = (2.0 * fa) / (1.0 - fa); + t = 0.5 * log1p(t); + if (__isnan(t) || !__signbit(a)) { + return t; + } + return -t; +} + +__func__(int ilogb(double a)) +{ + unsigned long long int i; + int expo = -1022; + + if (__isnan(a)) return -__cuda_INT_MAX-1; + if (__isinf(a)) return __cuda_INT_MAX; + memcpy(&i, &a, sizeof(double)); + i = i & 0x7fffffffffffffffULL; + if (i == 0) return -__cuda_INT_MAX-1; + if (i >= 0x0010000000000000ULL) { + return (int)(((i >> 52ULL) & 0x7ffU) - 1023); + } + while (i < 0x0010000000000000ULL) { + expo--; + i <<= 1; + } + return expo; +} + +__func__(double logb(double a)) +{ + unsigned long long int i; + int expo = -1022; + + if (__isnan(a)) return a + a; + if (__isinf(a)) return fabs(a); + memcpy(&i, &a, sizeof(double)); + i = i & 0x7fffffffffffffffULL; + if (i == 0) return -1.0/fabs(a); + if (i >= 0x0010000000000000ULL) { + return (double)((int)((i >> 52ULL) & 0x7ffU) - 1023); + } + while (i < 0x0010000000000000ULL) { + expo--; + i <<= 1; + } + return static_cast(expo); +} + +__func__(double remquo(double a, double b, int *quo)) +{ + unsigned long long int aa, bb; + int rem1 = 1; /* do FPREM1, a.k.a IEEE remainder */ + int expo_a; + int expo_b; + unsigned long long mant_a; + unsigned long long mant_b; + unsigned long long mant_c; + unsigned long long temp; + int sign_a; + int sign_b; + int sign_c; + int expo_c; + int expodiff; + int quot = 0; /* initialize quotient */ + int l; + int iter; + + memcpy(&aa, &a, sizeof(double)); + mant_a = (aa << 11ULL) | 0x8000000000000000ULL; + expo_a = (int)((aa >> 52ULL) & 0x7ffU) - 1023; + sign_a = (int)(aa >> 63ULL); + + memcpy(&bb, &b, sizeof(double)); + mant_b = (bb << 11ULL) | 0x8000000000000000ULL; + expo_b = (int)((bb >> 52ULL) & 0x7ffU) - 1023; + sign_b = (int)(bb >> 63ULL); + + sign_c = sign_a; /* remainder has sign of dividend */ + expo_c = expo_a; /* default */ + + /* handled NaNs and infinities */ + if (__isnan(a) || __isnan(b)) { + *quo = quot; + return a + b; + } + if (__isinf(a) || (b == 0.0)) { + *quo = quot; + aa = 0xfff8000000000000ULL; + memcpy(&a, &aa, sizeof(double)); + return a; + } + if ((a == 0.0) || (__isinf(b))) { + *quo = quot; + return a; + } + /* normalize denormals */ + if (expo_a < -1022) { + mant_a = mant_a + mant_a; + while (mant_a < 0x8000000000000000ULL) { + mant_a = mant_a + mant_a; + expo_a--; + } + } + if (expo_b < -1022) { + mant_b = mant_b + mant_b; + while (mant_b < 0x8000000000000000ULL) { + mant_b = mant_b + mant_b; + expo_b--; + } + } + expodiff = expo_a - expo_b; + /* clamp iterations if exponent difference negative */ + if (expodiff < 0) { + iter = -1; + } else { + iter = expodiff; + } + /* Shift dividend and divisor right by one bit to prevent overflow + during the division algorithm. + */ + mant_a = mant_a >> 1ULL; + mant_b = mant_b >> 1ULL; + expo_c = expo_a - iter; /* default exponent of result */ + + /* Use binary longhand division (restoring) */ + for (l = 0; l < (iter + 1); l++) { + mant_a = mant_a - mant_b; + if (mant_a & 0x8000000000000000ULL) { + mant_a = mant_a + mant_b; + quot = quot + quot; + } else { + quot = quot + quot + 1; + } + mant_a = mant_a + mant_a; + } + + /* Save current remainder */ + mant_c = mant_a; + /* If remainder's mantissa is all zeroes, final result is zero. */ + if (mant_c == 0) { + quot = quot & 7; + *quo = (sign_a ^ sign_b) ? -quot : quot; + aa = static_cast(sign_c) << 63ULL; + memcpy(&a, &aa, sizeof(double)); + return a; + } + /* Normalize result */ + while (!(mant_c & 0x8000000000000000ULL)) { + mant_c = mant_c + mant_c; + expo_c--; + } + /* For IEEE remainder (quotient rounded to nearest-even we might need to + do a final subtraction of the divisor from the remainder. + */ + if (rem1 && ((expodiff+1) >= 0)) { + temp = mant_a - mant_b; + /* round quotient to nearest even */ + if (((temp != 0ULL) && (!(temp & 0x8000000000000000ULL))) || + ((temp == 0ULL) && (quot & 1))) { + mant_a = mant_a >> 1ULL; + quot++; + /* Since the divisor is greater than the remainder, the result will + have opposite sign of the dividend. To avoid a negative mantissa + when subtracting the divisor from remainder, reverse subtraction + */ + sign_c = 1 ^ sign_c; + expo_c = expo_a - iter + 1; + mant_c = mant_b - mant_a; + /* normalize result */ + while (!(mant_c & 0x8000000000000000ULL)) { + mant_c = mant_c + mant_c; + expo_c--; + } + } + } + /* package up result */ + if (expo_c >= -1022) { /* normal */ + mant_c = ((mant_c >> 11ULL) + + (((static_cast(sign_c)) << 63ULL) + + (((unsigned long long)(expo_c + 1022)) << 52ULL))); + } else { /* denormal */ + mant_c = (((static_cast(sign_c)) << 63ULL) + + (mant_c >> (unsigned long long)(11 - expo_c - 1022))); + } + quot = quot & 7; /* mask quotient down to least significant three bits */ + *quo = (sign_a ^ sign_b) ? -quot : quot; + memcpy(&a, &mant_c, sizeof(double)); + return a; +} + +__func__(double remainder(double a, double b)) +{ + int quo; + return remquo (a, b, &quo); +} + +__func__(double fma (double a, double b, double c)) +{ + struct { + unsigned int lo; + unsigned int hi; + } xx, yy, zz, ww; + double d; + unsigned int s, t, u, prod0, prod1, prod2, prod3, expo_x, expo_y, expo_z; + + memcpy(&xx, &a, sizeof(double)); + memcpy(&yy, &b, sizeof(double)); + memcpy(&zz, &c, sizeof(double)); + + expo_z = 0x7FFU; + t = xx.hi >> 20; + expo_x = expo_z & t; + expo_x = expo_x - 1; /* expo(x) - 1 */ + t = yy.hi >> 20; + expo_y = expo_z & t; + expo_y = expo_y - 1; /* expo(y) - 1 */ + t = zz.hi >> 20; + expo_z = expo_z & t; + expo_z = expo_z - 1; /* expo(z) - 1 */ + + if (!((expo_x <= 0x7FDU) && + (expo_y <= 0x7FDU) && + (expo_z <= 0x7FDU))) { + + /* fma (nan, y, z) --> nan + fma (x, nan, z) --> nan + fma (x, y, nan) --> nan + */ + if (((yy.hi << 1) | (yy.lo != 0)) > 0xffe00000U) { + yy.hi |= 0x00080000U; + memcpy(&d, &yy, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) > 0xffe00000U) { + zz.hi |= 0x00080000U; + memcpy(&d, &zz, sizeof(double)); + return d; + } + if (((xx.hi << 1) | (xx.lo != 0)) > 0xffe00000U) { + xx.hi |= 0x00080000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + + /* fma (0, inf, z) --> INDEFINITE + fma (inf, 0, z) --> INDEFINITE + fma (-inf,+y,+inf) --> INDEFINITE + fma (+x,-inf,+inf) --> INDEFINITE + fma (+inf,-y,+inf) --> INDEFINITE + fma (-x,+inf,+inf) --> INDEFINITE + fma (-inf,-y,-inf) --> INDEFINITE + fma (-x,-inf,-inf) --> INDEFINITE + fma (+inf,+y,-inf) --> INDEFINITE + fma (+x,+inf,-inf) --> INDEFINITE + */ + if (((((xx.hi << 1) | xx.lo) == 0) && + (((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U)) || + ((((yy.hi << 1) | yy.lo) == 0) && + (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U))) { + xx.hi = 0xfff80000U; + xx.lo = 0x00000000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) == 0xffe00000U) { + if ((((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U) || + (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U)) { + if ((int)(xx.hi ^ yy.hi ^ zz.hi) < 0) { + xx.hi = 0xfff80000U; + xx.lo = 0x00000000U; + memcpy(&d, &xx, sizeof(double)); + return d; + } + } + } + /* fma (inf, y, z) --> inf + fma (x, inf, z) --> inf + fma (x, y, inf) --> inf + */ + if (((xx.hi << 1) | (xx.lo != 0)) == 0xffe00000U) { + xx.hi = xx.hi ^ (yy.hi & 0x80000000U); + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (((yy.hi << 1) | (yy.lo != 0)) == 0xffe00000U) { + yy.hi = yy.hi ^ (xx.hi & 0x80000000U); + memcpy(&d, &yy, sizeof(double)); + return d; + } + if (((zz.hi << 1) | (zz.lo != 0)) == 0xffe00000U) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + /* fma (+0, -y, -0) --> -0 + fma (-0, +y, -0) --> -0 + fma (+x, -0, -0) --> -0 + fma (-x, +0, -0) --> -0 + */ + if ((zz.hi == 0x80000000U) && (zz.lo == 0)) { + if ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0)) { + if ((int)(xx.hi ^ yy.hi) < 0) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + } + } + /* fma (0, y, 0) --> +0 (-0 if round down and signs of addend differ) + fma (x, 0, 0) --> +0 (-0 if round down and signs of addend differ) + */ + if ((((zz.hi << 1) | zz.lo) == 0) && + ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0))) { + zz.hi &= 0x7fffffffU; + memcpy(&d, &zz, sizeof(double)); + return d; + } + + /* fma (0, y, z) --> z + fma (x, 0, z) --> z + */ + if ((((xx.hi << 1) | xx.lo) == 0) || + (((yy.hi << 1) | yy.lo) == 0)) { + memcpy(&d, &zz, sizeof(double)); + return d; + } + + if (expo_x == 0xffffffffU) { + expo_x++; + t = xx.hi & 0x80000000U; + s = xx.lo >> 21; + xx.lo = xx.lo << 11; + xx.hi = xx.hi << 11; + xx.hi = xx.hi | s; + if (!xx.hi) { + xx.hi = xx.lo; + xx.lo = 0; + expo_x -= 32; + } + while (static_cast(xx.hi) > 0) { + s = xx.lo >> 31; + xx.lo = xx.lo + xx.lo; + xx.hi = xx.hi + xx.hi; + xx.hi = xx.hi | s; + expo_x--; + } + xx.lo = (xx.lo >> 11); + xx.lo |= (xx.hi << 21); + xx.hi = (xx.hi >> 11) | t; + } + if (expo_y == 0xffffffffU) { + expo_y++; + t = yy.hi & 0x80000000U; + s = yy.lo >> 21; + yy.lo = yy.lo << 11; + yy.hi = yy.hi << 11; + yy.hi = yy.hi | s; + if (!yy.hi) { + yy.hi = yy.lo; + yy.lo = 0; + expo_y -= 32; + } + while (static_cast(yy.hi) > 0) { + s = yy.lo >> 31; + yy.lo = yy.lo + yy.lo; + yy.hi = yy.hi + yy.hi; + yy.hi = yy.hi | s; + expo_y--; + } + yy.lo = (yy.lo >> 11); + yy.lo |= (yy.hi << 21); + yy.hi = (yy.hi >> 11) | t; + } + if (expo_z == 0xffffffffU) { + expo_z++; + t = zz.hi & 0x80000000U; + s = zz.lo >> 21; + zz.lo = zz.lo << 11; + zz.hi = zz.hi << 11; + zz.hi = zz.hi | s; + if (!zz.hi) { + zz.hi = zz.lo; + zz.lo = 0; + expo_z -= 32; + } + while (static_cast(zz.hi) > 0) { + s = zz.lo >> 31; + zz.lo = zz.lo + zz.lo; + zz.hi = zz.hi + zz.hi; + zz.hi = zz.hi | s; + expo_z--; + } + zz.lo = (zz.lo >> 11); + zz.lo |= (zz.hi << 21); + zz.hi = (zz.hi >> 11) | t; + } + } + + expo_x = expo_x + expo_y; + expo_y = xx.hi ^ yy.hi; + t = xx.lo >> 21; + xx.lo = xx.lo << 11; + xx.hi = xx.hi << 11; + xx.hi = xx.hi | t; + yy.hi = yy.hi & 0x000fffffU; + xx.hi = xx.hi | 0x80000000U; /* set mantissa hidden bit */ + yy.hi = yy.hi | 0x00100000U; /* set mantissa hidden bit */ + + prod0 = xx.lo * yy.lo; + prod1 =(unsigned)((static_cast(xx.lo)*static_cast(yy.lo))>>32ULL); + prod2 = xx.hi * yy.lo; + prod3 = xx.lo * yy.hi; + prod1 += prod2; + t = (unsigned)(prod1 < prod2); + prod1 += prod3; + t += prod1 < prod3; + prod2 =(unsigned)((static_cast(xx.hi)*static_cast(yy.lo))>>32ULL); + prod3 =(unsigned)((static_cast(xx.lo)*static_cast(yy.hi))>>32ULL); + prod2 += prod3; + s = (unsigned)(prod2 < prod3); + prod3 = xx.hi * yy.hi; + prod2 += prod3; + s += prod2 < prod3; + prod2 += t; + s += prod2 < t; + prod3 =(unsigned)((static_cast(xx.hi)*static_cast(yy.hi))>>32ULL); + prod3 = prod3 + s; + + yy.lo = prod0; /* mantissa */ + yy.hi = prod1; /* mantissa */ + xx.lo = prod2; /* mantissa */ + xx.hi = prod3; /* mantissa */ + expo_x = expo_x - (1023 - 2); /* expo-1 */ + expo_y = expo_y & 0x80000000U; /* sign */ + + if (xx.hi < 0x00100000U) { + s = xx.lo >> 31; + s = (xx.hi << 1) + s; + xx.hi = s; + s = yy.hi >> 31; + s = (xx.lo << 1) + s; + xx.lo = s; + s = yy.lo >> 31; + s = (yy.hi << 1) + s; + yy.hi = s; + s = yy.lo << 1; + yy.lo = s; + expo_x--; + } + + t = 0; + if (((zz.hi << 1) | zz.lo) != 0) { /* z is not zero */ + + s = zz.hi & 0x80000000U; + + zz.hi &= 0x000fffffU; + zz.hi |= 0x00100000U; + ww.hi = 0; + ww.lo = 0; + + /* compare and swap. put augend into xx:yy */ + if (static_cast(expo_z) > static_cast(expo_x)) { + t = expo_z; + expo_z = expo_x; + expo_x = t; + t = zz.hi; + zz.hi = xx.hi; + xx.hi = t; + t = zz.lo; + zz.lo = xx.lo; + xx.lo = t; + t = ww.hi; + ww.hi = yy.hi; + yy.hi = t; + t = ww.lo; + ww.lo = yy.lo; + yy.lo = t; + t = expo_y; + expo_y = s; + s = t; + } + + /* augend_sign = expo_y, augend_mant = xx:yy, augend_expo = expo_x */ + /* addend_sign = s, addend_mant = zz:ww, addend_expo = expo_z */ + expo_z = expo_x - expo_z; + u = expo_y ^ s; + if (expo_z <= 107) { + /* denormalize addend */ + t = 0; + while (expo_z >= 32) { + t = ww.lo | (t != 0); + ww.lo = ww.hi; + ww.hi = zz.lo; + zz.lo = zz.hi; + zz.hi = 0; + expo_z -= 32; + } + if (expo_z) { + t = (t >> expo_z) | (ww.lo << (32 - expo_z)) | + ((t << (32 - expo_z)) != 0); + ww.lo = (ww.lo >> expo_z) | (ww.hi << (32 - expo_z)); + ww.hi = (ww.hi >> expo_z) | (zz.lo << (32 - expo_z)); + zz.lo = (zz.lo >> expo_z) | (zz.hi << (32 - expo_z)); + zz.hi = (zz.hi >> expo_z); + } + } else { + t = 1; + ww.lo = 0; + ww.hi = 0; + zz.lo = 0; + zz.hi = 0; + } + if (static_cast(u) < 0) { + /* signs differ, effective subtraction */ + t = (unsigned)(-static_cast(t)); + s = (unsigned)(t != 0); + u = yy.lo - s; + s = (unsigned)(u > yy.lo); + yy.lo = u - ww.lo; + s += yy.lo > u; + u = yy.hi - s; + s = (unsigned)(u > yy.hi); + yy.hi = u - ww.hi; + s += yy.hi > u; + u = xx.lo - s; + s = (unsigned)(u > xx.lo); + xx.lo = u - zz.lo; + s += xx.lo > u; + xx.hi = (xx.hi - zz.hi) - s; + if (!(xx.hi | xx.lo | yy.hi | yy.lo | t)) { + /* complete cancelation, return 0 */ + memcpy(&d, &xx, sizeof(double)); + return d; + } + if (static_cast(xx.hi) < 0) { + /* Oops, augend had smaller mantissa. Negate mantissa and flip + sign of result + */ + t = ~t; + yy.lo = ~yy.lo; + yy.hi = ~yy.hi; + xx.lo = ~xx.lo; + xx.hi = ~xx.hi; + if (++t == 0) { + if (++yy.lo == 0) { + if (++yy.hi == 0) { + if (++xx.lo == 0) { + ++xx.hi; + } + } + } + } + expo_y ^= 0x80000000U; + } + + /* normalize mantissa, if necessary */ + while (!(xx.hi & 0x00100000U)) { + xx.hi = (xx.hi << 1) | (xx.lo >> 31); + xx.lo = (xx.lo << 1) | (yy.hi >> 31); + yy.hi = (yy.hi << 1) | (yy.lo >> 31); + yy.lo = (yy.lo << 1); + expo_x--; + } + } else { + /* signs are the same, effective addition */ + yy.lo = yy.lo + ww.lo; + s = (unsigned)(yy.lo < ww.lo); + yy.hi = yy.hi + s; + u = (unsigned)(yy.hi < s); + yy.hi = yy.hi + ww.hi; + u += yy.hi < ww.hi; + xx.lo = xx.lo + u; + s = (unsigned)(xx.lo < u); + xx.lo = xx.lo + zz.lo; + s += xx.lo < zz.lo; + xx.hi = xx.hi + zz.hi + s; + if (xx.hi & 0x00200000U) { + t = t | (yy.lo << 31); + yy.lo = (yy.lo >> 1) | (yy.hi << 31); + yy.hi = (yy.hi >> 1) | (xx.lo << 31); + xx.lo = (xx.lo >> 1) | (xx.hi << 31); + xx.hi = ((xx.hi & 0x80000000U) | (xx.hi >> 1)) & ~0x40000000U; + expo_x++; + } + } + } + t = yy.lo | (t != 0); + t = yy.hi | (t != 0); + + xx.hi |= expo_y; /* or in sign bit */ + if (expo_x <= 0x7FDU) { + /* normal */ + xx.hi = xx.hi & ~0x00100000U; /* lop off integer bit */ + s = xx.lo & 1; /* mantissa lsb */ + u = xx.lo; + xx.lo += (t == 0x80000000U) ? s : (t >> 31); + xx.hi += (u > xx.lo); + xx.hi += ((expo_x + 1) << 20); + memcpy(&d, &xx, sizeof(double)); + return d; + } else if (static_cast(expo_x) >= 2046) { + /* overflow */ + xx.hi = (xx.hi & 0x80000000U) | 0x7ff00000U; + xx.lo = 0; + memcpy(&d, &xx, sizeof(double)); + return d; + } + /* subnormal */ + expo_x = (unsigned)(-static_cast(expo_x)); + if (expo_x > 54) { + xx.hi = xx.hi & 0x80000000U; + xx.lo = 0; + memcpy(&d, &xx, sizeof(double)); + return d; + } + yy.hi = xx.hi & 0x80000000U; /* save sign bit */ + xx.hi = xx.hi & ~0xffe00000U; + if (expo_x >= 32) { + t = xx.lo | (t != 0); + xx.lo = xx.hi; + xx.hi = 0; + expo_x -= 32; + } + if (expo_x) { + t = (t >> expo_x) | (xx.lo << (32 - expo_x)) | (t != 0); + xx.lo = (xx.lo >> expo_x) | (xx.hi << (32 - expo_x)); + xx.hi = (xx.hi >> expo_x); + } + expo_x = xx.lo & 1; + u = xx.lo; + xx.lo += (t == 0x80000000U) ? expo_x : (t >> 31); + xx.hi += (u > xx.lo); + xx.hi |= yy.hi; + memcpy(&d, &xx, sizeof(double)); + return d; +} + +__func__(double nextafter(double a, double b)) +{ + unsigned long long int ia; + unsigned long long int ib; + memcpy(&ia, &a, sizeof(double)); + memcpy(&ib, &b, sizeof(double)); + if (__isnan(a) || __isnan(b)) return a + b; /* NaN */ + if (((ia | ib) << 1ULL) == 0ULL) return b; + if (a == 0.0) { + return copysign (4.9406564584124654e-324, b); /* crossover */ + } + if ((a < b) && (a < 0.0)) ia--; + if ((a < b) && (a > 0.0)) ia++; + if ((a > b) && (a < 0.0)) ia++; + if ((a > b) && (a > 0.0)) ia--; + memcpy(&a, &ia, sizeof(double)); + return a; +} + +__func__(double erf(double a)) +{ + double t, r, q; + + t = fabs(a); + if (t >= 1.0) { + r = -1.28836351230756500E-019; + r = r * t + 1.30597472161093370E-017; + r = r * t - 6.33924401259620500E-016; + r = r * t + 1.96231865908940140E-014; + r = r * t - 4.35272243559990750E-013; + r = r * t + 7.37083927929352150E-012; + r = r * t - 9.91402142550461630E-011; + r = r * t + 1.08817017167760820E-009; + r = r * t - 9.93918713097634620E-009; + r = r * t + 7.66739923255145500E-008; + r = r * t - 5.05440278302806720E-007; + r = r * t + 2.87474157099000620E-006; + r = r * t - 1.42246725399722510E-005; + r = r * t + 6.16994555079419460E-005; + r = r * t - 2.36305221938908790E-004; + r = r * t + 8.05032844055371070E-004; + r = r * t - 2.45833366629108140E-003; + r = r * t + 6.78340988296706120E-003; + r = r * t - 1.70509103597554640E-002; + r = r * t + 3.93322852515666300E-002; + r = r * t - 8.37271292613764040E-002; + r = r * t + 1.64870423707623280E-001; + r = r * t - 2.99729521787681470E-001; + r = r * t + 4.99394435612628580E-001; + r = r * t - 7.52014596480123030E-001; + r = r * t + 9.99933138314926250E-001; + r = r * t - 1.12836725321102670E+000; + r = r * t + 9.99998988715182450E-001; + q = exp (-t * t); + r = 1.0 - r * q; + if (t >= 6.5) { + r = 1.0; + } + a = copysign (r, a); + } else { + q = a * a; + r = -7.77946848895991420E-010; + r = r * q + 1.37109803980285950E-008; + r = r * q - 1.62063137584932240E-007; + r = r * q + 1.64471315712790040E-006; + r = r * q - 1.49247123020098620E-005; + r = r * q + 1.20552935769006260E-004; + r = r * q - 8.54832592931448980E-004; + r = r * q + 5.22397760611847340E-003; + r = r * q - 2.68661706431114690E-002; + r = r * q + 1.12837916709441850E-001; + r = r * q - 3.76126389031835210E-001; + r = r * q + 1.12837916709551260E+000; + a = r * a; + } + return a; +} + +__func__(double erfc(double a)) +{ + double p, q, h, l; + + if (a < 0.75) { + return 1.0 - erf(a); + } + if (a > 27.3) { + return 0.0; + } + if (a < 5.0) { + double t; + t = 1.0 / a; + p = 1.9759923722227928E-008; + p = p * t - 1.0000002670474897E+000; + p = p * t - 7.4935303236347828E-001; + p = p * t - 1.5648136328071860E-001; + p = p * t + 1.2871196242447239E-001; + p = p * t + 1.1126459974811195E-001; + p = p * t + 4.0678642255914332E-002; + p = p * t + 7.9915414156678296E-003; + p = p * t + 7.1458332107840234E-004; + q = t + 2.7493547525030619E+000; + q = q * t + 3.3984254815725423E+000; + q = q * t + 2.4635304979947761E+000; + q = q * t + 1.1405284734691286E+000; + q = q * t + 3.4130157606195649E-001; + q = q * t + 6.2250967676044953E-002; + q = q * t + 5.5661370941268700E-003; + q = q * t + 1.0575248365468671E-009; + p = p / q; + p = p * t; + h = ((int)(a * 16.0)) * 0.0625; + l = (a - h) * (a + h); + q = exp(-h * h) * exp(-l); + q = q * 0.5; + p = p * q + q; + p = p * t; + } else { + double ooa, ooasq; + + ooa = 1.0 / a; + ooasq = ooa * ooa; + p = -4.0025406686930527E+005; + p = p * ooasq + 1.4420582543942123E+005; + p = p * ooasq - 2.7664185780951841E+004; + p = p * ooasq + 4.1144611644767283E+003; + p = p * ooasq - 5.8706000519209351E+002; + p = p * ooasq + 9.1490086446323375E+001; + p = p * ooasq - 1.6659491387740221E+001; + p = p * ooasq + 3.7024804085481784E+000; + p = p * ooasq - 1.0578553994424316E+000; + p = p * ooasq + 4.2314218745087778E-001; + p = p * ooasq - 2.8209479177354962E-001; + p = p * ooasq + 5.6418958354775606E-001; + h = a * a; + h = ((int)(a * 16.0)) * 0.0625; + l = (a - h) * (a + h); + q = exp(-h * h) * exp(-l); + p = p * ooa; + p = p * q; + } + return p; +} + +__func__(double lgamma(double a)) +{ + double s; + double t; + double i; + double fa; + double sum; + long long int quot; + if (__isnan(a) || __isinf(a)) { + return a * a; + } + fa = fabs(a); + if (fa >= 3.0) { + if (fa >= 8.0) { + /* Stirling approximation; coefficients from Hart et al, "Computer + * Approximations", Wiley 1968. Approximation 5404. + */ + s = 1.0 / fa; + t = s * s; + sum = -0.1633436431e-2; + sum = sum * t + 0.83645878922e-3; + sum = sum * t - 0.5951896861197e-3; + sum = sum * t + 0.793650576493454e-3; + sum = sum * t - 0.277777777735865004e-2; + sum = sum * t + 0.833333333333331018375e-1; + sum = sum * s + 0.918938533204672; + s = 0.5 * log (fa); + t = fa - 0.5; + s = s * t; + t = s - fa; + s = s + sum; + t = t + s; + } else { + i = fa - 3.0; + s = -4.02412642744125560E+003; + s = s * i - 2.97693796998962000E+005; + s = s * i - 6.38367087682528790E+006; + s = s * i - 5.57807214576539320E+007; + s = s * i - 2.24585140671479230E+008; + s = s * i - 4.70690608529125090E+008; + s = s * i - 7.62587065363263010E+008; + s = s * i - 9.71405112477113250E+008; + t = i - 1.02277248359873170E+003; + t = t * i - 1.34815350617954480E+005; + t = t * i - 4.64321188814343610E+006; + t = t * i - 6.48011106025542540E+007; + t = t * i - 4.19763847787431360E+008; + t = t * i - 1.25629926018000720E+009; + t = t * i - 1.40144133846491690E+009; + t = s / t; + t = t + i; + } + } else if (fa >= 1.5) { + i = fa - 2.0; + t = 9.84839283076310610E-009; + t = t * i - 6.69743850483466500E-008; + t = t * i + 2.16565148880011450E-007; + t = t * i - 4.86170275781575260E-007; + t = t * i + 9.77962097401114400E-007; + t = t * i - 2.03041287574791810E-006; + t = t * i + 4.36119725805364580E-006; + t = t * i - 9.43829310866446590E-006; + t = t * i + 2.05106878496644220E-005; + t = t * i - 4.49271383742108440E-005; + t = t * i + 9.94570466342226000E-005; + t = t * i - 2.23154589559238440E-004; + t = t * i + 5.09669559149637430E-004; + t = t * i - 1.19275392649162300E-003; + t = t * i + 2.89051032936815490E-003; + t = t * i - 7.38555102806811700E-003; + t = t * i + 2.05808084278121250E-002; + t = t * i - 6.73523010532073720E-002; + t = t * i + 3.22467033424113040E-001; + t = t * i + 4.22784335098467190E-001; + t = t * i; + } else if (fa >= 0.7) { + i = 1.0 - fa; + t = 1.17786911519331130E-002; + t = t * i + 3.89046747413522300E-002; + t = t * i + 5.90045711362049900E-002; + t = t * i + 6.02143305254344420E-002; + t = t * i + 5.61652708964839180E-002; + t = t * i + 5.75052755193461370E-002; + t = t * i + 6.21061973447320710E-002; + t = t * i + 6.67614724532521880E-002; + t = t * i + 7.14856037245421020E-002; + t = t * i + 7.69311251313347100E-002; + t = t * i + 8.33503129714946310E-002; + t = t * i + 9.09538288991182800E-002; + t = t * i + 1.00099591546322310E-001; + t = t * i + 1.11334278141734510E-001; + t = t * i + 1.25509666613462880E-001; + t = t * i + 1.44049896457704160E-001; + t = t * i + 1.69557177031481600E-001; + t = t * i + 2.07385551032182120E-001; + t = t * i + 2.70580808427600350E-001; + t = t * i + 4.00685634386517050E-001; + t = t * i + 8.22467033424113540E-001; + t = t * i + 5.77215664901532870E-001; + t = t * i; + } else { + t = -9.04051686831357990E-008; + t = t * fa + 7.06814224969349250E-007; + t = t * fa - 3.80702154637902830E-007; + t = t * fa - 2.12880892189316100E-005; + t = t * fa + 1.29108470307156190E-004; + t = t * fa - 2.15932815215386580E-004; + t = t * fa - 1.16484324388538480E-003; + t = t * fa + 7.21883433044470670E-003; + t = t * fa - 9.62194579514229560E-003; + t = t * fa - 4.21977386992884450E-002; + t = t * fa + 1.66538611813682460E-001; + t = t * fa - 4.20026350606819980E-002; + t = t * fa - 6.55878071519427450E-001; + t = t * fa + 5.77215664901523870E-001; + t = t * fa; + t = t * fa + fa; + t = -log (t); + } + if (a >= 0.0) return t; + if (fa < 1e-19) return -log(fa); + i = floor(fa); + if (fa == i) return 1.0 / (fa - i); /* a is an integer: return infinity */ + i = rint (2.0 * fa); + quot = static_cast(i); + i = fa - 0.5 * i; + i = i * CUDART_PI; + if (quot & 1) { + i = cos(i); + } else { + i = sin(i); + } + i = fabs(i); + t = log(CUDART_PI / (i * fa)) - t; + return t; +} + +__func__(unsigned long long int __internal_host_nan_kernel(const char *s)) +{ + unsigned long long i = 0; + int c; + int ovfl = 0; + int invld = 0; + if (s && (*s == '0')) { + s++; + if ((*s == 'x') || (*s == 'X')) { + s++; + while (*s == '0') s++; + while (*s) { + if (i > 0x0fffffffffffffffULL) { + ovfl = 1; + } + c = (((*s) >= 'A') && ((*s) <= 'F')) ? (*s + 'a' - 'A') : (*s); + if ((c >= 'a') && (c <= 'f')) { + c = c - 'a' + 10; + i = i * 16 + c; + } else if ((c >= '0') && (c <= '9')) { + c = c - '0'; + i = i * 16 + c; + } else { + invld = 1; + } + s++; + } + } else { + while (*s == '0') s++; + while (*s) { + if (i > 0x1fffffffffffffffULL) { + ovfl = 1; + } + c = *s; + if ((c >= '0') && (c <= '7')) { + c = c - '0'; + i = i * 8 + c; + } else { + invld = 1; + } + s++; + } + } + } else if (s) { + while (*s) { + c = *s; + if ((i > 1844674407370955161ULL) || + ((i == 1844674407370955161ULL) && (c > '5'))) { + ovfl = 1; + } + if ((c >= '0') && (c <= '9')) { + c = c - '0'; + i = i * 10 + c; + } else { + invld = 1; + } + s++; + } + } + if (ovfl) { + i = ~0ULL; + } + if (invld) { + i = 0ULL; + } + i = (i & 0x000fffffffffffffULL) | 0x7ff8000000000000ULL; + return i; +} + +__func__(double nan(const char *tagp)) +{ + unsigned long long l; + double d; + l = __internal_host_nan_kernel(tagp); + memcpy(&d, &l, sizeof(double)); + return d; +} + +__func__(double __host_tgamma_kernel(double a)) +{ + double t; + t = - 4.4268934071252475E-010; + t = t * a - 2.0266591846658954E-007; + t = t * a + 1.1381211721119527E-006; + t = t * a - 1.2507734816630748E-006; + t = t * a - 2.0136501740408771E-005; + t = t * a + 1.2805012607354486E-004; + t = t * a - 2.1524140811527418E-004; + t = t * a - 1.1651675459704604E-003; + t = t * a + 7.2189432248466381E-003; + t = t * a - 9.6219715326862632E-003; + t = t * a - 4.2197734554722394E-002; + t = t * a + 1.6653861138250356E-001; + t = t * a - 4.2002635034105444E-002; + t = t * a - 6.5587807152025712E-001; + t = t * a + 5.7721566490153287E-001; + t = t * a + 1.0000000000000000E+000; + return t; +} + +__func__(double __host_stirling_poly(double a)) +{ + double x = 1.0 / a; + double z = 0.0; + z = + 8.3949872067208726e-004; + z = z * x - 5.1717909082605919e-005; + z = z * x - 5.9216643735369393e-004; + z = z * x + 6.9728137583658571e-005; + z = z * x + 7.8403922172006662e-004; + z = z * x - 2.2947209362139917e-004; + z = z * x - 2.6813271604938273e-003; + z = z * x + 3.4722222222222220e-003; + z = z * x + 8.3333333333333329e-002; + z = z * x + 1.0000000000000000e+000; + return z; +} + +__func__(double __host_tgamma_stirling(double a)) +{ + double z; + double x; + z = __host_stirling_poly (a); + if (a < 142.0) { + x = pow (a, a - 0.5); + a = x * exp (-a); + a = a * CUDART_SQRT_2PI; + return a * z; + } else if (a < 172.0) { + x = pow (a, 0.5 * a - 0.25); + a = x * exp (-a); + a = a * CUDART_SQRT_2PI; + a = a * z; + return a * x; + } else { + return exp(1000.0); /* INF */ + } +} + +__func__(double tgamma(double a)) +{ + double s, xx, x = a; + if (__isnan(a)) { + return a + a; + } + if (fabs(x) < 20.0) { + if (x >= 0.0) { + s = 1.0; + xx = x; + while (xx > 1.5) { + xx = xx - 1.0; + s = s * xx; + } + if (x >= 0.5) { + xx = xx - 1.0; + } + xx = __host_tgamma_kernel (xx); + if (x < 0.5) { + xx = xx * x; + } + s = s / xx; + } else { + xx = x; + s = xx; + if (x == floor(x)) { + return 0.0 / (x - floor(x)); + } + while (xx < -0.5) { + xx = xx + 1.0; + s = s * xx; + } + xx = __host_tgamma_kernel (xx); + s = s * xx; + s = 1.0 / s; + } + return s; + } else { + if (x >= 0.0) { + return __host_tgamma_stirling (x); + } else { + double t; + int quot; + if (x == floor(x)) { + return 0.0 / (x - floor(x)); + } + if (x < -185.0) { + int negative; + x = floor(x); + negative = ((x - (2.0 * floor(0.5 * x))) == 1.0); + return negative ? (-1.0 / 1e308 / 1e308) : CUDART_ZERO; + } + /* compute sin(pi*x) accurately */ + xx = rint (2.0 * x); + quot = static_cast(xx); + xx = -0.5 * xx + x; + xx = xx * CUDART_PI; + if (quot & 1) { + xx = cos (xx); + } else { + xx = sin (xx); + } + if (quot & 2) { + xx = -xx; + } + x = fabs (x); + s = exp (-x); + t = x - 0.5; + if (x > 140.0) t = 0.5 * t; + t = pow (x, t); + if (x > 140.0) s = s * t; + s = s * __host_stirling_poly (x); + s = s * x; + s = s * xx; + s = 1.0 / s; + s = s * CUDART_SQRT_PIO2; + s = s / t; + return s; + } + } +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR FLOAT AND LONG DOUBLE ROUTINES FOR WINDOWS PLATFORM * +* MAP FLOAT AND LONG DOUBLE ROUTINES TO DOUBLE ROUTINES * +* * +*******************************************************************************/ + +__func__(int __signbitl(const long double a)) +{ + return __signbit(static_cast(a)); +} + +__func__(int __signbitf(const float a)) +{ + return __signbit(static_cast(a)); +} + +__func__(int __finitel(const long double a)) +{ + return __finite(static_cast(a)); +} + +__func__(int __finitef(const float a)) +{ + return __finite(static_cast(a)); +} + +__func__(int __isinfl(const long double a)) +{ + return __isinf(static_cast(a)); +} + +__func__(int __isinff(const float a)) +{ + return __isinf(static_cast(a)); +} + +__func__(int __isnanl(const long double a)) +{ + return __isnan(static_cast(a)); +} + +__func__(int __isnanf(const float a)) +{ + return __isnan(static_cast(a)); +} + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(float fmaxf(const float a, const float b)) +{ + return static_cast(fmax(static_cast(a), static_cast(b))); +} + +__func__(float fminf(const float a, const float b)) +{ + return static_cast(fmin(static_cast(a), static_cast(b))); +} + +__func__(float roundf(const float a)) +{ + return static_cast(round(static_cast(a))); +} + +__func__(long int lroundf(const float a)) +{ + return lround(static_cast(a)); +} + +__func__(long long int llroundf(const float a)) +{ + return llround(static_cast(a)); +} + +__func__(float truncf(const float a)) +{ + return static_cast(trunc(static_cast(a))); +} + +__func__(float rintf(const float a)) +{ + return static_cast(rint(static_cast(a))); +} + +__func__(float nearbyintf(const float a)) +{ + return static_cast(nearbyint(static_cast(a))); +} + +__func__(long int lrintf(const float a)) +{ + return lrint(static_cast(a)); +} + +__func__(long long int llrintf(const float a)) +{ + return llrint(static_cast(a)); +} + +__func__(float logbf(const float a)) +{ + return static_cast(logb(static_cast(a))); +} + +__func__(float scalblnf(const float a, const long int b)) +{ + return static_cast(scalbln(static_cast(a), b)); +} + +__func__(float log2f(const float a)) +{ + return static_cast(log2(static_cast(a))); +} + +__func__(float exp2f(const float a)) +{ + return static_cast(exp2(static_cast(a))); +} + +__func__(float acoshf(const float a)) +{ + return static_cast(acosh(static_cast(a))); +} + +__func__(float asinhf(const float a)) +{ + return static_cast(asinh(static_cast(a))); +} + +__func__(float atanhf(const float a)) +{ + return static_cast(atanh(static_cast(a))); +} + +__func__(float cbrtf(const float a)) +{ + return static_cast(cbrt(static_cast(a))); +} + +__func__(float expm1f(const float a)) +{ + return static_cast(expm1(static_cast(a))); +} + +__func__(float fdimf(const float a, const float b)) +{ + return static_cast(fdim(static_cast(a), static_cast(b))); +} + +__func__(float log1pf(const float a)) +{ + return static_cast(log1p(static_cast(a))); +} + +__func__(float scalbnf(const float a, const int b)) +{ + return static_cast(scalbn(static_cast(a), b)); +} + +__func__(float fmaf(const float a, const float b, const float c)) +{ + return static_cast(fma(static_cast(a), static_cast(b), static_cast(c))); +} + +__func__(int ilogbf(const float a)) +{ + return ilogb(static_cast(a)); +} + +__func__(float erff(const float a)) +{ + return static_cast(erf(static_cast(a))); +} + +__func__(float erfcf(const float a)) +{ + return static_cast(erfc(static_cast(a))); +} + +__func__(float lgammaf(const float a)) +{ + return static_cast(lgamma(static_cast(a))); +} + +__func__(float tgammaf(const float a)) +{ + return static_cast(tgamma(static_cast(a))); +} + +__func__(float remquof(const float a, const float b, int *quo)) +{ + return static_cast(remquo(static_cast(a), static_cast(b), quo)); +} + +__func__(float remainderf(const float a, const float b)) +{ + return static_cast(remainder(static_cast(a), static_cast(b))); +} +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#if (defined _MSC_VER) && (_MSC_VER >= 1700) +__func__(float j0f(const float a)) +{ + return static_cast(_j0(static_cast(a))); +} + +__func__(float j1f(const float a)) +{ + return static_cast(_j1(static_cast(a))); +} + +__func__(float jnf(const int n, const float a)) +{ + return static_cast(_jn(n, static_cast(a))); +} + +__func__(float y0f(const float a)) +{ + return static_cast(_y0(static_cast(a))); +} + +__func__(float y1f(const float a)) +{ + return static_cast(_y1(static_cast(a))); +} + +__func__(float ynf(const int n, const float a)) +{ + return static_cast(_yn(n, static_cast(a))); +} +#endif /* (defined _MSC_VER) && (_MSC_VER >= 1700) */ + + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR FLOAT ROUTINES FOR WINDOWS PLATFORM * +* * +*******************************************************************************/ + +#if (!defined(_MSC_VER) || _MSC_VER < 1800) +__func__(float copysignf(float a, const float b)) +{ + unsigned int aa, bb; + memcpy(&aa, &a, sizeof(float)); + memcpy(&bb, &b, sizeof(float)); + aa = (aa & ~0x80000000U) | (bb & 0x80000000U); + memcpy(&a, &aa, sizeof(float)); + return a; +} + +__func__(float nextafterf(float a, const float b)) +{ + unsigned int ia; + unsigned int ib; + memcpy(&ia, &a, sizeof(float)); + memcpy(&ib, &b, sizeof(float)); + if (__isnanf(a) || __isnanf(b)) return a + b; /*NaN*/ + if (((ia | ib) << 1U) == 0U) return b; + if (a == 0.0F) { + return copysignf(1.401298464e-045F, b); /*crossover*/ + } + if ((a < b) && (a < 0.0F)) ia--; + if ((a < b) && (a > 0.0F)) ia++; + if ((a > b) && (a < 0.0F)) ia++; + if ((a > b) && (a > 0.0F)) ia--; + memcpy(&a, &ia, sizeof(float)); + return a; +} + +__func__(float nanf(const char *tagp)) +{ + float f; + unsigned int i; + i = static_cast(__internal_host_nan_kernel(tagp)); + i = (i & 0x007fffffU) | 0x7fc00000U; + memcpy(&f, &i, sizeof(float)); + return f; +} + +#endif /* (!defined(_MSC_VER) || _MSC_VER < 1800) */ + +#endif /* _WIN32 */ + +/******************************************************************************* +* * +* HOST IMPLEMENTATION FOR DOUBLE AND FLOAT ROUTINES. ALL PLATFORMS * +* * +*******************************************************************************/ + +__func__(double rsqrt(const double a)) +{ + return 1.0 / sqrt(a); +} + +__func__(double rcbrt(const double a)) +{ + double s, t; + + if (__isnan(a)) { + return a + a; + } + if (a == 0.0 || __isinf(a)) { + return 1.0 / a; + } + s = fabs(a); + t = exp2(-CUDART_THIRD * log2(s)); /* initial approximation */ + t = ((t*t) * (-s*t) + 1.0) * (CUDART_THIRD*t) + t;/* refine approximation */ +#if defined(__APPLE__) + if (__signbitd(a)) +#else /* __APPLE__ */ + if (__signbit(a)) +#endif /* __APPLE__ */ + { + t = -t; + } + return t; +} + +__func__(double sinpi(double a)) +{ + int n; + + if (__isnan(a)) { + return a + a; + } + if (a == 0.0 || __isinf(a)) { + return sin (a); + } + if (a == floor(a)) { + return ((a / 1.0e308) / 1.0e308) / 1.0e308; + } + double twoa = a + a; + double rtwoa = round(twoa); + long long int l = (long long int)rtwoa; + n = (int)l; + a -= rtwoa * 0.5; + a = a * CUDART_PI; + if (n & 1) { + a = cos (a); + } else { + a = sin (a); + } + if (n & 2) { + a = -a; + } + return a; +} + +__func__(double cospi(double a)) +{ + int n; + + if (__isnan(a)) { + return a + a; + } + if (__isinf(a)) { + return cos (a); + } + if (fabs(a) > 9.0071992547409920e+015) { + a = 0.0; + } + double twoa = a + a; + double rtwoa = round(twoa); + long long int l = (long long int)rtwoa; + n = (int)l; + a -= rtwoa * 0.5; + a = a * CUDART_PI; + n++; + if (n & 1) { + a = cos (a); + } else { + a = sin (a); + } + if (n & 2) { + a = -a; + } + if (a == 0.0) { + a = fabs(a); + } + return a; +} + +__func__(void sincospi(const double a, double *sptr, double *cptr)) +{ + *sptr = sinpi(a); + *cptr = cospi(a); +} + +__func__(double erfinv(const double a)) +{ + double p, q, t, fa; + unsigned long long int l; + + fa = fabs(a); + if (fa >= 1.0) { + l = 0xfff8000000000000ULL; + memcpy(&t, &l, sizeof(double)); /* INDEFINITE */ + if (fa == 1.0) { + t = a * exp(1000.0); /* Infinity */ + } + } else if (fa >= 0.9375) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 59 + */ + t = log1p(-fa); + t = 1.0 / sqrt(-t); + p = 2.7834010353747001060e-3; + p = p * t + 8.6030097526280260580e-1; + p = p * t + 2.1371214997265515515e+0; + p = p * t + 3.1598519601132090206e+0; + p = p * t + 3.5780402569085996758e+0; + p = p * t + 1.5335297523989890804e+0; + p = p * t + 3.4839207139657522572e-1; + p = p * t + 5.3644861147153648366e-2; + p = p * t + 4.3836709877126095665e-3; + p = p * t + 1.3858518113496718808e-4; + p = p * t + 1.1738352509991666680e-6; + q = t + 2.2859981272422905412e+0; + q = q * t + 4.3859045256449554654e+0; + q = q * t + 4.6632960348736635331e+0; + q = q * t + 3.9846608184671757296e+0; + q = q * t + 1.6068377709719017609e+0; + q = q * t + 3.5609087305900265560e-1; + q = q * t + 5.3963550303200816744e-2; + q = q * t + 4.3873424022706935023e-3; + q = q * t + 1.3858762165532246059e-4; + q = q * t + 1.1738313872397777529e-6; + t = p / (q * t); + if (a < 0.0) t = -t; + } else if (fa >= 0.75) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 39 + */ + t = a * a - .87890625; + p = .21489185007307062000e+0; + p = p * t - .64200071507209448655e+1; + p = p * t + .29631331505876308123e+2; + p = p * t - .47644367129787181803e+2; + p = p * t + .34810057749357500873e+2; + p = p * t - .12954198980646771502e+2; + p = p * t + .25349389220714893917e+1; + p = p * t - .24758242362823355486e+0; + p = p * t + .94897362808681080020e-2; + q = t - .12831383833953226499e+2; + q = q * t + .41409991778428888716e+2; + q = q * t - .53715373448862143349e+2; + q = q * t + .33880176779595142685e+2; + q = q * t - .11315360624238054876e+2; + q = q * t + .20369295047216351160e+1; + q = q * t - .18611650627372178511e+0; + q = q * t + .67544512778850945940e-2; + p = p / q; + t = a * p; + } else { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 18 + */ + t = a * a - .5625; + p = - .23886240104308755900e+2; + p = p * t + .45560204272689128170e+3; + p = p * t - .22977467176607144887e+4; + p = p * t + .46631433533434331287e+4; + p = p * t - .43799652308386926161e+4; + p = p * t + .19007153590528134753e+4; + p = p * t - .30786872642313695280e+3; + q = t - .83288327901936570000e+2; + q = q * t + .92741319160935318800e+3; + q = q * t - .35088976383877264098e+4; + q = q * t + .59039348134843665626e+4; + q = q * t - .48481635430048872102e+4; + q = q * t + .18997769186453057810e+4; + q = q * t - .28386514725366621129e+3; + p = p / q; + t = a * p; + } + return t; +} + +__func__(double erfcinv(const double a)) +{ + double t; + unsigned long long int l; + + if (__isnan(a)) { + return a + a; + } + if (a <= 0.0) { + l = 0xfff8000000000000ULL; + memcpy(&t, &l, sizeof(double)); /* INDEFINITE */ + if (a == 0.0) { + t = (1.0 - a) * exp(1000.0); /* Infinity */ + } + } + else if (a >= 0.0625) { + t = erfinv (1.0 - a); + } + else if (a >= 1e-100) { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 59 + */ + double p, q; + t = log(a); + t = 1.0 / sqrt(-t); + p = 2.7834010353747001060e-3; + p = p * t + 8.6030097526280260580e-1; + p = p * t + 2.1371214997265515515e+0; + p = p * t + 3.1598519601132090206e+0; + p = p * t + 3.5780402569085996758e+0; + p = p * t + 1.5335297523989890804e+0; + p = p * t + 3.4839207139657522572e-1; + p = p * t + 5.3644861147153648366e-2; + p = p * t + 4.3836709877126095665e-3; + p = p * t + 1.3858518113496718808e-4; + p = p * t + 1.1738352509991666680e-6; + q = t + 2.2859981272422905412e+0; + q = q * t + 4.3859045256449554654e+0; + q = q * t + 4.6632960348736635331e+0; + q = q * t + 3.9846608184671757296e+0; + q = q * t + 1.6068377709719017609e+0; + q = q * t + 3.5609087305900265560e-1; + q = q * t + 5.3963550303200816744e-2; + q = q * t + 4.3873424022706935023e-3; + q = q * t + 1.3858762165532246059e-4; + q = q * t + 1.1738313872397777529e-6; + t = p / (q * t); + } + else { + /* Based on: J.M. Blair, C.A. Edwards, J.H. Johnson: Rational Chebyshev + Approximations for the Inverse of the Error Function. Mathematics of + Computation, Vol. 30, No. 136 (Oct. 1976), pp. 827-830. Table 82 + */ + double p, q; + t = log(a); + t = 1.0 / sqrt(-t); + p = 6.9952990607058154858e-1; + p = p * t + 1.9507620287580568829e+0; + p = p * t + 8.2810030904462690216e-1; + p = p * t + 1.1279046353630280005e-1; + p = p * t + 6.0537914739162189689e-3; + p = p * t + 1.3714329569665128933e-4; + p = p * t + 1.2964481560643197452e-6; + p = p * t + 4.6156006321345332510e-9; + p = p * t + 4.5344689563209398450e-12; + q = t + 1.5771922386662040546e+0; + q = q * t + 2.1238242087454993542e+0; + q = q * t + 8.4001814918178042919e-1; + q = q * t + 1.1311889334355782065e-1; + q = q * t + 6.0574830550097140404e-3; + q = q * t + 1.3715891988350205065e-4; + q = q * t + 1.2964671850944981713e-6; + q = q * t + 4.6156017600933592558e-9; + q = q * t + 4.5344687377088206783e-12; + t = p / (q * t); + } + return t; +} + +__func__(double normcdfinv(const double a)) +{ + return -1.4142135623730951 * erfcinv(a + a); +} + +__func__(double normcdf(double a)) +{ + double ah, al, t1, t2, u1, u2, v1, v2, z; + if (fabs (a) > 38.5) a = copysign (38.5, a); + ah = a * 134217729.0; + u1 = (a - ah) + ah; + u2 = a - u1; + v1 = -7.0710678398609161e-01; + v2 = 2.7995440410322203e-09; + t1 = a * -CUDART_SQRT_HALF_HI; + t2 = (((u1 * v1 - t1) + u1 * v2) + u2 * v1) + u2 * v2; + t2 = (a * -CUDART_SQRT_HALF_LO) + t2; + ah = t1 + t2; + z = erfc (ah); + if (a < -1.0) { + al = (t1 - ah) + t2; + t1 = -2.0 * ah * z; + z = t1 * al + z; + } + return 0.5 * z; +} + +__func__(double erfcx(const double a)) +{ + double x, t1, t2, t3; + + if (__isnan(a)) { + return a + a; + } + x = fabs(a); + if (x < 32.0) { + /* + * This implementation of erfcx() is based on the algorithm in: M. M. + * Shepherd and J. G. Laframboise, "Chebyshev Approximation of (1 + 2x) + * exp(x^2)erfc x in 0 <= x < INF", Mathematics of Computation, Vol. + * 36, No. 153, January 1981, pp. 249-253. For the core approximation, + * the input domain [0,INF] is transformed via (x-k) / (x+k) where k is + * a precision-dependent constant. Here, we choose k = 4.0, so the input + * domain [0, 27.3] is transformed into the core approximation domain + * [-1, 0.744409]. + */ + /* + // Compute (1+2*x)*exp(x*x)*erfc(x) + */ + /* t2 = (x-4.0)/(x+4.0), transforming [0,INF] to [-1,+1] */ + t1 = x - 4.0; + t2 = x + 4.0; + t2 = t1 / t2; + /* approximate on [-1, 0.744409] */ + t1 = - 3.5602694826817400E-010; + t1 = t1 * t2 - 9.7239122591447274E-009; + t1 = t1 * t2 - 8.9350224851649119E-009; + t1 = t1 * t2 + 1.0404430921625484E-007; + t1 = t1 * t2 + 5.8806698585341259E-008; + t1 = t1 * t2 - 8.2147414929116908E-007; + t1 = t1 * t2 + 3.0956409853306241E-007; + t1 = t1 * t2 + 5.7087871844325649E-006; + t1 = t1 * t2 - 1.1231787437600085E-005; + t1 = t1 * t2 - 2.4399558857200190E-005; + t1 = t1 * t2 + 1.5062557169571788E-004; + t1 = t1 * t2 - 1.9925637684786154E-004; + t1 = t1 * t2 - 7.5777429182785833E-004; + t1 = t1 * t2 + 5.0319698792599572E-003; + t1 = t1 * t2 - 1.6197733895953217E-002; + t1 = t1 * t2 + 3.7167515553018733E-002; + t1 = t1 * t2 - 6.6330365827532434E-002; + t1 = t1 * t2 + 9.3732834997115544E-002; + t1 = t1 * t2 - 1.0103906603555676E-001; + t1 = t1 * t2 + 6.8097054254735140E-002; + t1 = t1 * t2 + 1.5379652102605428E-002; + t1 = t1 * t2 - 1.3962111684056291E-001; + t1 = t1 * t2 + 1.2329951186255526E+000; + /* + // Note: (1+2*x)*exp(x*x)*erfc(x) / (1+2*x) = exp(x*x)*erfc(x) + */ + t2 = 2.0 * x + 1.0; + t1 = t1 / t2; + } else { + /* asymptotic expansion for large aguments */ + t2 = 1.0 / x; + t3 = t2 * t2; + t1 = -29.53125; + t1 = t1 * t3 + 6.5625; + t1 = t1 * t3 - 1.875; + t1 = t1 * t3 + 0.75; + t1 = t1 * t3 - 0.5; + t1 = t1 * t3 + 1.0; + t2 = t2 * 5.6418958354775628e-001; + t1 = t1 * t2; + } + if (a < 0.0) { + /* + // Note: erfcx(x) = 2*exp(x^2) - erfcx(|x|) + */ + t2 = (static_cast(x * 16.0)) * 0.0625; + t3 = (x - t2) * (x + t2); + t3 = exp(t2 * t2) * exp(t3); + t3 = t3 + t3; + t1 = t3 - t1; + } + return t1; +} + +__func__(float rsqrtf(const float a)) +{ + return static_cast(rsqrt(static_cast(a))); +} + +__func__(float rcbrtf(const float a)) +{ + return static_cast(rcbrt(static_cast(a))); +} + +__func__(float sinpif(const float a)) +{ + return static_cast(sinpi(static_cast(a))); +} + +__func__(float cospif(const float a)) +{ + return static_cast(cospi(static_cast(a))); +} + +__func__(void sincospif(const float a, float *sptr, float *cptr)) +{ + double s, c; + + sincospi(static_cast(a), &s, &c); + *sptr = static_cast(s); + *cptr = static_cast(c); +} + +__func__(float erfinvf(const float a)) +{ + return static_cast(erfinv(static_cast(a))); +} + +__func__(float erfcinvf(const float a)) +{ + return static_cast(erfcinv(static_cast(a))); +} + +__func__(float normcdfinvf(const float a)) +{ + return static_cast(normcdfinv(static_cast(a))); +} + +__func__(float normcdff(const float a)) +{ + return static_cast(normcdf(static_cast(a))); +} + +__func__(float erfcxf(const float a)) +{ + return static_cast(erfcx(static_cast(a))); +} + +#if defined(_WIN32) +#pragma warning (pop) +#endif /* _WIN32 */ + +#endif /* !__CUDACC__ */ + +#endif /* !__MATH_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_HPP__ +#endif diff --git a/miniCUDA124/include/crt/mma.h b/miniCUDA124/include/crt/mma.h new file mode 100644 index 0000000000000000000000000000000000000000..d42b2cf288d5d3ae7edaf7dcc44b3fd59b25a384 --- /dev/null +++ b/miniCUDA124/include/crt/mma.h @@ -0,0 +1,754 @@ +/* + * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead.") +#else +#warning "crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__ +#endif + +#if !defined(__CUDA_MMA_H__) +#define __CUDA_MMA_H__ + +#include +#include + +#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + + +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ && !_NVHPC_CUDA */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 +#define __CUDA_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 +#define __CUDA_SUBBYTE_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#define __CUDA_AMPERE_MMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */ + +namespace nvcuda { +namespace wmma { + + // utility functions +#ifdef __CUDA_AMPERE_MMA__ + inline __device__ float __float_to_tf32(float in) + { + float ret; + asm("{\n .reg .b32 __$1;" + "\n cvt.rna.tf32.f32 __$1, %1;" + "\n mov.b32 %0, __$1;\n}\n" : "=f"(ret) : "f"(in) ); + return ret; + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // tags + // + struct row_major; + struct col_major; + struct matrix_a; + struct matrix_b; + struct accumulator; + +#ifdef __CUDA_AMPERE_MMA__ + namespace precision { + struct tf32; + } +#endif /* __CUDA_AMPERE_MMA__ */ +#ifdef __CUDA_SUBBYTE_IMMA__ + namespace experimental { + namespace precision { + struct u4; // 4-bit unsigned + struct s4; // 4-bit signed + struct b1; // 1-bit + } + enum bmmaBitOp { bmmaBitOpXOR = 1 +#ifdef __CUDA_AMPERE_MMA__ + , bmmaBitOpAND = 2 +#endif /* __CUDA_AMPERE_MMA__ */ + }; + enum bmmaAccumulateOp { bmmaAccumulateOpPOPC = 1 }; + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + // + // layout + // + enum layout_t { + mem_row_major, mem_col_major + }; + + template + struct helper_traits { + typedef T element_type; + typedef T storage_element_type; + typedef T fill_argument_type; + }; + +#ifdef __CUDA_SUBBYTE_IMMA__ + template<> struct helper_traits { + typedef experimental::precision::u4 element_type; + typedef unsigned int storage_element_type; + typedef unsigned int fill_argument_type; + }; + + template<> struct helper_traits { + typedef experimental::precision::s4 element_type; + typedef int storage_element_type; + typedef int fill_argument_type; + }; + + template<> struct helper_traits { + typedef experimental::precision::b1 element_type; + typedef unsigned int storage_element_type; + typedef unsigned int fill_argument_type; + }; +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> struct helper_traits { + typedef precision::tf32 element_type; + typedef float storage_element_type; + typedef float fill_argument_type; + }; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // The base fragment type + // + /* note: alignment required for compiler implementation */ + template + struct __align__(8) __frag_base { + + /* Number of elements in the fragment */ + enum {num_elements = size}; + + /* Number of storage elements in the fragment. + + The elements of the fragment are packed together when the + fragment element type is experimental::precision::u4, + experimental::precision::s4 or experimental::precision::b1. + When elements are packed, num_storage_elements + will be smaller than num_elements. + */ + enum {num_storage_elements = packed_size}; + + /* element type of the fragment */ + typedef T element_type; + + /* element type of the storage representation. + + The mapping from element_type to storage_element_type is as follows: + experimental::precision::u4 -> unsigned (8 elements in 1 storage element) + experimental::precision::s4 -> int (8 elements in 1 storage element) + experimental::precision::b1 -> unsigned (32 elements in 1 storage element) + precision::tf32 -> float (1 element in 1 storage element) + all other types T -> T + */ + typedef typename helper_traits::storage_element_type storage_element_type; + + /* Storage for the (possibly packed) fragment elements. */ + storage_element_type x[num_storage_elements]; + }; + + template + static inline __device__ StorageType __get_storage_value(ArgType in) { return in; } + +#ifdef __CUDA_SUBBYTE_IMMA__ + template<> + __device__ inline unsigned + __get_storage_value(unsigned in) + { + /* For experimental::precision::u4 fragment element type, pack 8 elements into a single + 32-bit unsigned int storage element */ + unsigned val = in & 0xf; + return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) | + (val << 20) | (val << 24) | (val << 28)); + }; + + template<> + __device__ inline int + __get_storage_value(int in) + { + /* For experimental::precision::s4 fragment element type, pack 8 elements into a single + 32-bit signed int storage element */ + int val = in & 0xf; + return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) | + (val << 20) | (val << 24) | (val << 28)); + }; + + template<> + __device__ inline unsigned + __get_storage_value(unsigned in) + { + /* For experimental::precision::b1 fragment element type, pack 32 elements into a + single 32-bit unsigned int storage element */ + return (in & 0x1) ? 0xFFFFFFFFU : 0; + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + template + __CUDA_MMA_DEVICE_DECL__ void fill_fragment(__frag_base& f, + /* The mapping from fragment element type (FragEleType) to fill_argument_type is: + experimental::precision::u4 -> unsigned (only lower 4 bits taken) + experimental::precision::s4 -> int (only lower 4 bits taken) + experimental::precision::b1 -> unsigned (only lowest 1 bit taken) + precision::tf32 -> float + all other types T -> T + */ + const typename helper_traits::fill_argument_type & in) { + + /* get the (possibly packed) storage element value. See the specializations above for fragment + element types where the storage representation is packed */ + typedef typename helper_traits::storage_element_type storage_type; + storage_type v = __get_storage_value(in); +#pragma unroll + for (int i=0; i< f.num_storage_elements; i++) + f.x[i] = v; + } + + // + // Fragment template + // + template class fragment; + + // + // Fragments for 16x16x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 8> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Fragments for 32x8x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Fragments for 8x32x16 + // + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 16> {}; + template<> class fragment : public __frag_base<__half, 8> {}; + template<> class fragment : public __frag_base {}; + +#ifdef __CUDA_IMMA__ + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 4> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; + template<> class fragment : public __frag_base<__nv_bfloat16, 16> {}; +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Fragments for 8x8x32 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + + // + // Fragments for 8x8x128 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // Fragments for 16x16x8 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + + // + // Fragments for 8x8x4 + // + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; + template<> class fragment : public __frag_base {}; +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Load functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Load functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Load functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Load functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + +#ifdef __CUDA_AMPERE_MMA__ + // + // Load functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Load functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Store functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Store functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Store functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // Store functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST + + // + // Store functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment& a, unsigned ldm, layout_t layout) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // MMA functions for shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf=false) __DEF_IF_HOST + + + // + // MMA functions for shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, + experimental::bmmaBitOp = experimental::bmmaBitOpXOR, + experimental::bmmaAccumulateOp = experimental::bmmaAccumulateOpPOPC) __DEF_IF_HOST + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // MMA functions for shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + + // + // MMA functions for shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) __DEF_IF_HOST +#endif /* __CUDA_AMPERE_MMA__ */ +}; +}; + +#undef __DEF_IF_HOST +#undef __CUDA_IMMA__ +#undef __CUDA_SUBBYTE_IMMA__ +#undef __CUDA_AMPERE_MMA__ +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __CUDA_MMA_DEVICE_DECL__ + +#if defined(__CUDA_ARCH__) +#include "mma.hpp" +#endif /* defined(__CUDA_ARCH__) */ + + +#endif /* !__CUDA_MMA_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__ +#endif diff --git a/miniCUDA124/include/crt/mma.hpp b/miniCUDA124/include/crt/mma.hpp new file mode 100644 index 0000000000000000000000000000000000000000..71ad131295702bd5bfa97c109ff15dc5864d3285 --- /dev/null +++ b/miniCUDA124/include/crt/mma.hpp @@ -0,0 +1,1128 @@ +/* + * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__ +#endif + +#if !defined(__CUDA_MMA_HPP__) +#define __CUDA_MMA_HPP__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +#include +#include + +#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 +#define __CUDA_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 +#define __CUDA_SUBBYTE_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#define __CUDA_AMPERE_MMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */ + +namespace nvcuda { +namespace wmma { + + // + // Load functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Load functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_a_s4((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_a_u4((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_b_s4((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_b_u4((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 1); + } + + // + // Load functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __bmma_m8n8k128_ld_a_b1((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __bmma_m8n8k128_ld_b_b1((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + + +#ifdef __CUDA_AMPERE_MMA__ + // load functions for frags of shape m16n16k8 + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 0); + else + __mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 1); + } + + // load functions for frags of shape m8n8k4 + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_a((double *)&a, p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_a((double *)&a, p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_b((double *)&a, p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_b((double *)&a, p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __dmma_m8n8k4_ld_c((double *)&a, p, ldm, 0); + else + __dmma_m8n8k4_ld_c((double *)&a, p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Store functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Store functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 1); + } + + // + // Store functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 0); + else + __bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + +#ifdef __CUDA_AMPERE_MMA__ + + // + // Store functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 0); + else + __mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 1); + } + + + // + // Store functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 0); + else + __dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m16n16k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0); + } + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // MMA functions for shape m32n8k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m8n32k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // MMA functions for shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + // + // MMA functions for shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, + experimental::bmmaBitOp op, experimental::bmmaAccumulateOp) + { + +#ifdef __CUDA_AMPERE_MMA__ + if (op == experimental::bmmaBitOpAND) + __bmma_m8n8k128_mma_and_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1); + else +#endif /* __CUDA_AMPERE_MMA__ */ + __bmma_m8n8k128_mma_xor_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1); + } + + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // MMA functions for shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + + // + // MMA functions for shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 2, 0); + } + +#endif /* __CUDA_AMPERE_MMA__ */ + +}; +}; + +#undef __CUDA_IMMA__ +#undef __CUDA_SUBBYTE_IMMA__ +#undef __CUDA_MMA_DEVICE_DECL__ +#undef __CUDA_AMPERE_MMA__ + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + + +#endif /* __CUDA_MMA_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__ +#endif diff --git a/miniCUDA124/include/crt/nvfunctional b/miniCUDA124/include/crt/nvfunctional new file mode 100644 index 0000000000000000000000000000000000000000..9a4dcd695cca89a84de367ce8ffd0e43ad8dd9f2 --- /dev/null +++ b/miniCUDA124/include/crt/nvfunctional @@ -0,0 +1,621 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead.") +#else +#warning "crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__ +#endif + +#ifndef __NV_LIBCXX_FUNCTIONAL_H__ +#define __NV_LIBCXX_FUNCTIONAL_H__ + +#if __cplusplus < 201103L + #if defined(_MSC_VER) + #if _MSC_VER < 1800 + #error This library requires VS 2013 and above + #endif /* _MSC_VER < 1800 */ + #else /* !_MSC_VER */ + #error This library requires support for the ISO C++ 2011 standard + #endif /* _MSC_VER */ +#endif /* __cplusplus */ + +#if defined(_MSC_VER) + #define __NV_ALIGNOF __alignof + #define __NV_NOEXCEPT + #define __NV_CONSTEXPR +#else /* !_MSC_VER */ + #define __NV_ALIGNOF alignof + #define __NV_NOEXCEPT noexcept + #define __NV_CONSTEXPR constexpr +#endif /* _MSC_VER */ + +#include +#include +#include + +// n3290 20.8 +namespace nvstd +{ + +namespace internal { + +// D.8.1 base (deprecated) [depr.base] +template +struct unary_function +{ + typedef _Arg argument_type; + typedef _Result result_type; +}; + +template +struct binary_function +{ + typedef _Arg1 first_argument_type; + typedef _Arg2 second_argument_type; + typedef _Result result_type; +}; + +// move +template +inline __device__ __host__ +typename std::remove_reference<_T>::type&& move(_T&& __t) __NV_NOEXCEPT +{ + return static_cast::type&&>(__t); +} + +// 20.2.2 swap [utility.swap] +// swap +template::value && + std::is_move_assignable<_T>::value>::type> +inline __device__ __host__ +void swap(_T& __a, _T& __b) +#if !defined(_MSC_VER) +noexcept(std::is_nothrow_move_constructible<_T>::value && + std::is_nothrow_move_assignable<_T>::value) +#endif /* !defined(_MSC_VER) */ +{ + _T __t(internal::move(__a)); + __a = internal::move(__b); + __b = internal::move(__t); +} + +// 20.2.3 forward/move helpers [forward] +// forward +template +inline __device__ __host__ +_T&& forward(typename std::remove_reference<_T>::type& __t) __NV_NOEXCEPT +{ + return static_cast<_T&&>(__t); +} + +template +inline __device__ __host__ +_T&& forward(typename std::remove_reference<_T>::type&& __t) __NV_NOEXCEPT +{ + static_assert(!std::is_lvalue_reference<_T>::value, + "Error: __t is instantiated with an lvalue reference type"); + return static_cast<_T&&>(__t); +} + +} // namespace internal + +namespace __functional_helpers +{ + +struct __dummy_class; + +// Store small functors locally: +// a functor is legitimate to local storage if it is one of the following types: +// * member object pointer; +// * member function pointer; +// * closure type of size less than or equal to the largest size of +// the above types; +// * function pointer; +// * any callable class whose size is less than or equal to +// the largest one of the above types; +union _Small_functor_types +{ + void *__obj; + void (*__func_ptr)(); + void (__dummy_class::*mem_fn_ptr)(); +}; + +struct _Small_functor_data { + char __data[sizeof(_Small_functor_types)]; +}; + +template +struct __maybe_base_function +{ }; + +template +struct __maybe_base_function<_RetType(_T1)> + : public internal::unary_function<_T1, _RetType> +{ }; + +template +struct __maybe_base_function<_RetType(_T1, _T2)> + : public internal::binary_function<_T1, _T2, _RetType> +{ }; + +} // namespace __functional_helpers + +// 20.8.11 Polymorphic function wrappers [func.wrap] + +// 20.8.11.1 Class bad_function_call [func.wrap.badcall] +// unimplemented because of exception +// class bad_function_call : public std::exception + +// 20.8.11.2 Class template function [func.wrap.func] + +template class function; // undefined + +// Simplified version of template class function, which +// * does not support allocator_arg_t; +// * does not support target and target_type that rely on RTTI +// * does not throw bad_function_call exception on invoking a NULL target +template +class function<_RetType(_ArgTypes...)> + : public __functional_helpers::__maybe_base_function<_RetType(_ArgTypes...)> +{ + __functional_helpers::_Small_functor_data __small_functor_data; + void *__obj; + typedef _RetType(*__meta_fn_type)(void *, _ArgTypes...); + __meta_fn_type __meta_fn; + typedef void(*__cloner_type)(function &, const function &); + __cloner_type __cloner; + typedef void(*__destructor_type)(function *); + __destructor_type __destructor; + + #pragma nv_exec_check_disable + template + __device__ __host__ + __NV_CONSTEXPR bool __use_small_functor_data() const + { + return (sizeof(_F) <= sizeof(__small_functor_data) && + __NV_ALIGNOF(_F) <= __NV_ALIGNOF( + __functional_helpers::_Small_functor_types)); + } + + #pragma nv_exec_check_disable + __device__ __host__ + void* __get_small_functor_data() const + { + return (void*)(&__small_functor_data.__data[0]); + } + + #pragma nv_exec_check_disable + __device__ __host__ + bool __is_small_functor_data() const + { + return __obj == __get_small_functor_data(); + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static _F& __get_functor(void *__p) + { + return *((_F*)__p); + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _F& /*__p*/) + { + return false; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _F* __p) + { + return !__p; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _Res _C::* __p) + { + return !__p; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const function<_Res(_Args...)>& __p) + { + return !__p; + } + + template + struct __make_cloner + { + #pragma nv_exec_check_disable + __device__ __host__ + static void __clone_data(function &__dest, const function &__src) + { + if (__dest.__use_small_functor_data<_F>()) { + __dest.__obj = __dest.__get_small_functor_data(); + new (__dest.__obj) _F(__src.__get_functor<_F>(__src.__obj)); + } + else { + __dest.__obj = new _F(__src.__get_functor<_F>(__src.__obj)); + } + } + }; + + template + struct __make_destructor + { + #pragma nv_exec_check_disable + __device__ __host__ + static void __destruct(function *__fn) + { + if (__fn->__use_small_functor_data<_F>()) { + (__fn->__get_functor<_F>(__fn->__obj)).~_F(); + } + else { + delete (_F*)(__fn->__obj); + } + } + }; + + // We cannot simple define __make_functor in the following way: + // template + // __make_functor; + // template + // struct __make_functor<_RetType1(_ArgTypes1...), _F> + // + // because VS 2013 cannot unpack _RetType1(_ArgTypes1...) + template + struct __make_functor + { + typedef _RetType1 type; + + #pragma nv_exec_check_disable + __device__ __host__ + static _RetType1 __invoke(void *__d, _ArgTypes1... __args) + { + return __get_functor<_F>(__d)( + internal::forward<_ArgTypes1>(__args)...); + } + }; + + template + struct __make_functor<_RetType1, _M _C::*,_ArgTypes1...> + { + typedef _RetType1 type; + typedef _RetType1(*_Fn)(_ArgTypes1...); + + #pragma nv_exec_check_disable + __device__ __host__ + static _RetType1 __invoke(void *__d, _ArgTypes1... __args) + { + return __get_functor<_Fn>(__d)( + internal::forward<_ArgTypes1>(__args)...); + } + }; + +// workaround for GCC version below 4.8 +#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 8) + template + struct __check_callability + : public std::integral_constant::value> + { }; +#elif defined(_MSC_VER) + // simulate VC 2013's behavior... + template + struct __check_callability1 + : public + std::integral_constant::value || + std::is_convertible< + _RetType, + typename std::result_of<_F(_ArgTypes...)>::type + >::value + > + { }; + + template + struct __check_callability + : public std::integral_constant< + bool, + !std::is_same<_F, function>::value && + __check_callability1::type>::value> + { }; +#else /* !((__GNUC__ == 4) && (__GNUC_MINOR__ < 8)) _MSC_VER */ + template ::type> + struct __check_callability + : public std::integral_constant< + bool, + !std::is_same<_F, function>::value && + std::is_convertible< _T, _RetType>::value> + { }; +#endif /* __GNUC__ == 4) && (__GNUC_MINOR__ < 8) */ + + #pragma nv_exec_check_disable + __device__ __host__ + void __destroy() + { + if (__obj) { + __destructor(this); + __obj = 0; + } + } + + #pragma nv_exec_check_disable + __device__ __host__ + void __clear() + { + __obj = 0; + __meta_fn = 0; + __cloner = 0; + __destructor = 0; + } + +public: + typedef _RetType result_type; + +/* + * These typedef(s) are derived from __maybe_base_function + * typedef T1 argument_type; // only if sizeof...(ArgTypes) == 1 and + * // the type in ArgTypes is T1 + * typedef T1 first_argument_type; // only if sizeof...(ArgTypes) == 2 and + * // ArgTypes contains T1 and T2 + * typedef T2 second_argument_type; // only if sizeof...(ArgTypes) == 2 and + * // ArgTypes contains T1 and T2 + */ + + // 20.8.11.2.1 construct/copy/destroy [func.wrap.con] + + #pragma nv_exec_check_disable + __device__ __host__ + function() __NV_NOEXCEPT + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {} + + #pragma nv_exec_check_disable + __device__ __host__ + function(std::nullptr_t) __NV_NOEXCEPT + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {} + + #pragma nv_exec_check_disable + __device__ __host__ + function(const function &__fn) + { + if (__fn.__obj == 0) { + __clear(); + } + else { + __meta_fn = __fn.__meta_fn; + __destructor = __fn.__destructor; + __fn.__cloner(*this, __fn); + __cloner = __fn.__cloner; + } + } + + #pragma nv_exec_check_disable + __device__ __host__ + function(function &&__fn) + { + __fn.swap(*this); + } + + // VS 2013 cannot process __check_callability type trait. + // So, we check callability using static_assert instead of + // using SFINAE such as + // template::value + // >::type> + + #pragma nv_exec_check_disable + template + __device__ __host__ + function(_F); + + // copy and swap + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(const function& __fn) + { + function(__fn).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(function&& __fn) + { + function(internal::move(__fn)).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(std::nullptr_t) + { + __destroy(); + return *this; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + function& + operator=(_F&& __fn) + { + static_assert(__check_callability<_F>::value, + "Unable to create functor object!"); + function(internal::forward<_F>(__fn)).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + ~function() + { + __destroy(); + } + + // 20.8.11.2.2 function modifiers [func.wrap.func.mod] + #pragma nv_exec_check_disable + __device__ __host__ + void swap(function& __fn) __NV_NOEXCEPT + { + internal::swap(__meta_fn, __fn.__meta_fn); + internal::swap(__cloner, __fn.__cloner); + internal::swap(__destructor, __fn.__destructor); + + if (__is_small_functor_data() && __fn.__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + } + else if (__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + internal::swap(__obj, __fn.__obj); + __fn.__obj = __fn.__get_small_functor_data(); + } + else if (__fn.__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + internal::swap(__obj, __fn.__obj); + __obj = __get_small_functor_data(); + } + else { + internal::swap(__obj, __fn.__obj); + } + } + + // 20.8.11.2.3 function capacity [func.wrap.func.cap] + #pragma nv_exec_check_disable + __device__ __host__ + explicit operator bool() const __NV_NOEXCEPT + { + return __obj; + } + + // 20.8.11.2.4 function invocation [func.wrap.func.inv] + // function::operator() can only be called in device code + // to avoid cross-execution space calls + #pragma nv_exec_check_disable + __device__ __host__ + _RetType operator()(_ArgTypes...) const; + +}; + +// Out-of-line definitions +#pragma nv_exec_check_disable +template +template +__device__ __host__ +function<_RetType(_ArgTypes...)>::function(_F __fn) + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) +{ + static_assert(__check_callability<_F>::value, + "Unable to construct functor object!"); + if (__is_empty_functor(__fn)) + return; + __meta_fn = &__make_functor<_RetType, _F, _ArgTypes...>::__invoke; + __cloner = &__make_cloner<_F>::__clone_data; + __destructor = &__make_destructor<_F>::__destruct; + + if (__use_small_functor_data<_F>()) { + __obj = __get_small_functor_data(); + new ((void*)__obj) _F(internal::move(__fn)); + } + else { + __obj = new _F(internal::move(__fn)); + } +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +_RetType +function<_RetType(_ArgTypes...)>::operator()(_ArgTypes... __args) const +{ + return __meta_fn(__obj, internal::forward<_ArgTypes>(__args)...); +} + +// 20.8.11.2.6, Null pointer comparisons: + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator==(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t) +__NV_NOEXCEPT +{ + return !__fn; +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator==(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn) +__NV_NOEXCEPT +{ + return !__fn; +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator!=(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t) +__NV_NOEXCEPT +{ + return static_cast(__fn); +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator!=(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn) +__NV_NOEXCEPT +{ + return static_cast(__fn); +} + +// 20.8.11.2.7, specialized algorithms: +#pragma nv_exec_check_disable +template +__device__ __host__ +void swap(function<_R(_ArgTypes...)>& __fn1, function<_R(_ArgTypes...)>& __fn2) +{ + __fn1.swap(__fn2); +} + +} // namespace nvstd + +#undef __NV_NOEXCEPT +#undef __NV_CONSTEXPR +#undef __NV_ALIGNOF + +#endif // __NV_LIBCXX_FUNCTIONAL_H__ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__ +#endif diff --git a/miniCUDA124/include/crt/sm_70_rt.h b/miniCUDA124/include/crt/sm_70_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..5d7c84bd55ef75eb11166f3931f26b8c0e71f8db --- /dev/null +++ b/miniCUDA124/include/crt/sm_70_rt.h @@ -0,0 +1,139 @@ +/* + * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__ +#endif + +#if !defined(__SM_70_RT_H__) +#define __SM_70_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_70_RT_DECL__ __host__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_70_RT_DECL__ extern __device__ __cudart_builtin__ +#else /* !__CUDACC_RTC__ */ +#define __SM_70_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +/****************************************************************************** + * match * + ******************************************************************************/ +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) __DEF_IF_HOST + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) __DEF_IF_HOST +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) __DEF_IF_HOST + +__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) __DEF_IF_HOST + +__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_70_RT_DECL__ + +#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) +#include "sm_70_rt.hpp" +#endif /* (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) */ + +#endif /* !__SM_70_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__ +#endif + + +#undef EXCLUDE_FROM_RTC \ No newline at end of file diff --git a/miniCUDA124/include/crt/sm_70_rt.hpp b/miniCUDA124/include/crt/sm_70_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..91c8a96f44889d38c144673e4a87c3441682e8cb --- /dev/null +++ b/miniCUDA124/include/crt/sm_70_rt.hpp @@ -0,0 +1,192 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__ +#endif + +#if !defined(__SM_70_RT_HPP__) +#define __SM_70_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_70_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_70_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-7.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// +// __match_any_sync +// +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) { + return __match32_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) { + return __match32_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) { + return (sizeof(long) == sizeof(long long)) ? + __match64_any_sync(mask, (unsigned long long)value): + __match32_any_sync(mask, (unsigned)value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) { + return (sizeof(long) == sizeof(long long)) ? + __match64_any_sync(mask, (unsigned long long)value): + __match32_any_sync(mask, (unsigned)value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) { + return __match64_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) { + return __match64_any_sync(mask, value); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) { + return __match32_any_sync(mask, __float_as_uint(value)); +} + +__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) { + return __match64_any_sync(mask, __double_as_longlong(value)); +} + +// +// __match_all_sync +// +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) { + return __match32_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) { + return __match32_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) { + return (sizeof(long) == sizeof(long long)) ? + __match64_all_sync(mask, (unsigned long long)value, pred): + __match32_all_sync(mask, (unsigned)value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) { + return (sizeof(long) == sizeof(long long)) ? + __match64_all_sync(mask, (unsigned long long)value, pred): + __match32_all_sync(mask, (unsigned)value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) { + return __match64_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) { + return __match64_all_sync(mask, value, pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) { + return __match32_all_sync(mask, __float_as_uint(value), pred); +} + +__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) { + return __match64_all_sync(mask, __double_as_longlong(value), pred); +} + +__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) { + asm volatile("nanosleep.u32 %0;" :: "r"(ns)); +} + + +extern "C" __device__ __device_builtin__ +unsigned short __usAtomicCAS(unsigned short *, unsigned short, unsigned short); + +__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) { + return __usAtomicCAS(address, compare, val); +} + + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_70_RT_DECL__ + +#endif /* !__SM_70_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__ +#endif diff --git a/miniCUDA124/include/crt/sm_80_rt.h b/miniCUDA124/include/crt/sm_80_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..748e504f2a1732a6b509f0d7f2bbe86fb8cabe2a --- /dev/null +++ b/miniCUDA124/include/crt/sm_80_rt.h @@ -0,0 +1,164 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__ +#endif + +#if !defined(__SM_80_RT_H__) +#define __SM_80_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_80_RT_DECL__ __host__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_80_RT_DECL__ extern __device__ __cudart_builtin__ +#else /* !__CUDACC_RTC__ */ +#define __SM_80_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC +/****************************************************************************** + * reduce * + ******************************************************************************/ +__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) __DEF_IF_HOST + +__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) __DEF_IF_HOST +__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) __DEF_IF_HOST + +__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) __DEF_IF_HOST +__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) __DEF_IF_HOST + +#undef EXCLUDE_FROM_RTC + + +extern "C" { +inline __device__ void *__nv_associate_access_property(const void *ptr, + unsigned long long property) { + extern __device__ void *__nv_associate_access_property_impl(const void *, + unsigned long long); + return __nv_associate_access_property_impl(ptr, property); +} + +inline __device__ void __nv_memcpy_async_shared_global_4(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_4_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_4_impl(dst, src, src_size); +} + +inline __device__ void __nv_memcpy_async_shared_global_8(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_8_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_8_impl(dst, src, src_size); +} + +inline __device__ void __nv_memcpy_async_shared_global_16(void *dst, + const void *src, + unsigned src_size) { + extern __device__ void __nv_memcpy_async_shared_global_16_impl(void *, + const void *, + unsigned); + __nv_memcpy_async_shared_global_16_impl(dst, src, src_size); +} + +} +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_80_RT_DECL__ + +#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) +#include "sm_80_rt.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_80_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__ +#endif diff --git a/miniCUDA124/include/crt/sm_80_rt.hpp b/miniCUDA124/include/crt/sm_80_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6f8ad5e27975b2efe3430a5ee89f02bcf1e36f13 --- /dev/null +++ b/miniCUDA124/include/crt/sm_80_rt.hpp @@ -0,0 +1,148 @@ +/* + * Copyright 2017-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__ +#endif + +#if !defined(__SM_80_RT_HPP__) +#define __SM_80_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_80_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_80_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-8.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +extern "C" { + __device_builtin__ __device__ unsigned __reduce_add_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_min_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_max_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ int __reduce_add_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ int __reduce_min_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ int __reduce_max_sync_signed_impl(unsigned, int); + __device_builtin__ __device__ unsigned __reduce_or_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_and_sync_unsigned_impl(unsigned, unsigned); + __device_builtin__ __device__ unsigned __reduce_xor_sync_unsigned_impl(unsigned, unsigned); +} + +__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) { + return __reduce_add_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) { + return __reduce_min_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) { + return __reduce_max_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) { + return __reduce_add_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) { + return __reduce_min_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) { + return __reduce_max_sync_signed_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) { + return __reduce_and_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) { + return __reduce_or_sync_unsigned_impl(mask, value); +} + +__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) { + return __reduce_xor_sync_unsigned_impl(mask, value); +} +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_80_RT_DECL__ + +#endif /* !__SM_80_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__ +#endif diff --git a/miniCUDA124/include/crt/sm_90_rt.h b/miniCUDA124/include/crt/sm_90_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..33264feaaf3690f01a026190e35f2e765d6f3804 --- /dev/null +++ b/miniCUDA124/include/crt/sm_90_rt.h @@ -0,0 +1,282 @@ +/* + * Copyright 2022-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__ +#endif + +#if !defined(__SM_90_RT_H__) +#define __SM_90_RT_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_90_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_90_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ && !_NVHPC_CUDA */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */ + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, unsigned target_block_rank) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) __DEF_IF_HOST +__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, unsigned cluster_cta_mask) __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterDim() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() __DEF_IF_HOST +__SM_90_RT_DECL__ dim3 __clusterIdx() __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() __DEF_IF_HOST +__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_arrive() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() __DEF_IF_HOST +__SM_90_RT_DECL__ void __cluster_barrier_wait() __DEF_IF_HOST +__SM_90_RT_DECL__ void __threadfence_cluster() __DEF_IF_HOST + +__SM_90_RT_DECL__ float2 atomicAdd(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *__address, float2 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd(float4 *__address, float4 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *__address, float4 val) __DEF_IF_HOST +__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *__address, float4 val) __DEF_IF_HOST + +#undef EXCLUDE_FROM_RTC + +//Note: below atomic functions are templates, so cannot be represented in NVRTC +//builtins representation, so they have to be parsed on every NVRTC compilation. +//(notice 'EXCLUDE_FROM_RTC' ends above) + + +#ifndef __NV_DISABLE_128_ATOMICS +// lgen definitions for 128b atomics +extern "C" { + __device__ __device_builtin__ void __u128AtomicCAS(void *, void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicCAS_block(void *, void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicCAS_system(void *, void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicExch(void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicExch_block(void *, void *, void *); + __device__ __device_builtin__ void __u128AtomicExch_system(void *, void *, void *); +} + +// macro to get address of object, to workaround situations where the type overloads the "&" operator +#define __NV_ATOMIC_ADDRESSOF(__val) \ + (void *)(&(const_cast(reinterpret_cast(__val)))) + +// enable_if +template +struct __nv_atomic_enable_if { }; + +template +struct __nv_atomic_enable_if { typedef _T __type; }; + +// alignof +#if defined(__CUDACC_RTC__) +#define __NV_ATOMIC_ALIGNOF __alignof__ +#else +#define __NV_ATOMIC_ALIGNOF __alignof +#endif + +// trivially copyable +template +struct __nv_atomic_triv_cp_helper { +#if defined(__GNUC__) +#if (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) + static const bool __val = true; +#elif (__GNUC__ < 5) + static const bool __val = __has_trivial_copy(_T); +#else + static const bool __val = __is_trivially_copyable(_T); +#endif +#else + static const bool __val = __is_trivially_copyable(_T); +#endif +}; +#define __NV_ATOMIC_TRIVIALLY_COPYABLE(_T) \ + __nv_atomic_triv_cp_helper<_T>::__val + +// return type +#if __cplusplus >= 202002L // C++20 or greater +#define __NV_ATOMIC_RET_TYPE(_T) _T +#else +#define __NV_ATOMIC_RET_TYPE(_T) typename \ + __nv_atomic_enable_if= 16 && \ + __NV_ATOMIC_TRIVIALLY_COPYABLE(_T), _T>::__type +#endif + +// requires +#if __cplusplus >= 202002L // C++20 or greater +#define __NV_ATOMIC_REQUIRES(_T) \ + requires(sizeof(_T) == 16 && \ + __NV_ATOMIC_ALIGNOF(_T) >= 16 && \ + __NV_ATOMIC_TRIVIALLY_COPYABLE(_T)) +#else +#define __NV_ATOMIC_REQUIRES(_T) +#endif + +// temp value and return value +#if __cplusplus >= 201103L || defined(_MSC_VER) // C++11 or greater, or MSC +#define __NV_ATOMIC_TEMP(_T) union _U \ + {_T __ret; __device__ __inline__ _U() {}}; _U __u +#define __NV_ATOMIC_RET(_T) __u.__ret +#else +#define __NV_ATOMIC_TEMP(_T) _T __ret +#define __NV_ATOMIC_RET(_T) __ret +#endif + +// templated 128-bit atomics +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicCAS(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicCAS((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__compare), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicCAS_block(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicCAS_block((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__compare), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicCAS_system(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicCAS_system((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__compare), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicExch(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicExch((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicExch_block(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicExch_block((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} + +template +__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T) +atomicExch_system(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) { + __NV_ATOMIC_TEMP(_T); + __u128AtomicExch_system((void *)(__address), + __NV_ATOMIC_ADDRESSOF(__val), + __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T))); + return __NV_ATOMIC_RET(_T); +} +#endif /* !__NV_DISABLE_128_ATOMICS */ + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_90_RT_DECL__ + +#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) +#include "sm_90_rt.hpp" +#endif /* (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) */ + +#endif /* !__SM_90_RT_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__ +#endif + diff --git a/miniCUDA124/include/crt/sm_90_rt.hpp b/miniCUDA124/include/crt/sm_90_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..fe73918c2b3230f605b066bc34efa229df273003 --- /dev/null +++ b/miniCUDA124/include/crt/sm_90_rt.hpp @@ -0,0 +1,248 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__ +#endif + +#if !defined(__SM_90_RT_HPP__) +#define __SM_90_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_90_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_90_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-9.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ +extern "C" { + __device__ unsigned __nv_isClusterShared_impl(const void *); + __device__ void * __nv_cluster_map_shared_rank_impl(const void *, unsigned); + __device__ unsigned __nv_cluster_query_shared_rank_impl(const void *); + __device__ unsigned __nv_clusterDimIsSpecifed_impl(); + __device__ void __nv_clusterDim_impl(unsigned *, unsigned *, unsigned *); + __device__ void __nv_clusterRelativeBlockIdx_impl(unsigned *, + unsigned *, unsigned *); + __device__ void __nv_clusterGridDimInClusters_impl(unsigned *, + unsigned *, unsigned *); + __device__ void __nv_clusterIdx_impl(unsigned *, unsigned *, unsigned *); + __device__ unsigned __nv_clusterRelativeBlockRank_impl(); + __device__ unsigned __nv_clusterSizeInBlocks_impl(); + __device__ void __nv_cluster_barrier_arrive_impl(); + __device__ void __nv_cluster_barrier_arrive_relaxed_impl(); + __device__ void __nv_cluster_barrier_wait_impl(); + __device__ void __nv_threadfence_cluster_impl(); + + __device__ __device_builtin__ float2 __f2AtomicAdd(float2 *, float2); + __device__ __device_builtin__ float2 __f2AtomicAdd_block(float2 *, float2); + __device__ __device_builtin__ float2 __f2AtomicAdd_system(float2 *, float2); + __device__ __device_builtin__ float4 __f4AtomicAdd(float4 *, float4); + __device__ __device_builtin__ float4 __f4AtomicAdd_block(float4 *, float4); + __device__ __device_builtin__ float4 __f4AtomicAdd_system(float4 *, float4); +} // extern "C" + +__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) +{ + return __isShared(ptr); +} + +__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) +{ + return __nv_isClusterShared_impl(ptr); +} + +__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, + unsigned target_block_rank) +{ + return __nv_cluster_map_shared_rank_impl(ptr, target_block_rank); +} + +__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) +{ + return __nv_cluster_query_shared_rank_impl(ptr); +} + +__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, + unsigned int cluster_cta_mask) +{ + return make_uint2((unsigned)__cvta_generic_to_shared(ptr), cluster_cta_mask); +} + +__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() +{ + return __nv_clusterDimIsSpecifed_impl(); +} + +__SM_90_RT_DECL__ dim3 __clusterDim() +{ + unsigned x, y, z; + __nv_clusterDim_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() +{ + unsigned x, y, z; + __nv_clusterRelativeBlockIdx_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() +{ + unsigned x, y, z; + __nv_clusterGridDimInClusters_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterIdx() +{ + unsigned x, y, z; + __nv_clusterIdx_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() +{ + return __nv_clusterRelativeBlockRank_impl(); +} + +__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() +{ + return __nv_clusterSizeInBlocks_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_arrive() +{ + __nv_cluster_barrier_arrive_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() +{ + __nv_cluster_barrier_arrive_relaxed_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_wait() +{ + __nv_cluster_barrier_wait_impl(); +} + +__SM_90_RT_DECL__ void __threadfence_cluster() +{ + __nv_threadfence_cluster_impl(); +} + + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +__SM_90_RT_DECL__ float2 atomicAdd(float2 *address, float2 val) { + return __f2AtomicAdd(address, val); +} + +__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *address, float2 val) { + return __f2AtomicAdd_block(address, val); +} + +__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *address, float2 val) { + return __f2AtomicAdd_system(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd(float4 *address, float4 val) { + return __f4AtomicAdd(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *address, float4 val) { + return __f4AtomicAdd_block(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *address, float4 val) { + return __f4AtomicAdd_system(address, val); +} + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_90_RT_DECL__ + +#endif /* !__SM_90_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__ +#endif diff --git a/miniCUDA124/include/crt/storage_class.h b/miniCUDA124/include/crt/storage_class.h new file mode 100644 index 0000000000000000000000000000000000000000..c76c9d009ead575b53b61f54e603883b2c040c99 --- /dev/null +++ b/miniCUDA124/include/crt/storage_class.h @@ -0,0 +1,142 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__ +#endif + +#if !defined(__STORAGE_CLASS_H__) +#define __STORAGE_CLASS_H__ + +#if !defined(__var_used__) + +#define __var_used__ + +#endif /* __var_used__ */ + +#if !defined(__loc_sc__) + +#define __loc_sc__(loc, size, sc) \ + __storage##_##sc##size##loc loc + +#endif /* !__loc_sc__ */ + +#if !defined(__storage___device__) +#define __storage___device__ static __var_used__ +#endif /* __storage___device__ */ + +#if !defined(__storage_extern__device__) +#define __storage_extern__device__ static __var_used__ +#endif /* __storage_extern__device__ */ + +#if !defined(__storage_auto__device__) +#define __storage_auto__device__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__device__ */ + +#if !defined(__storage_static__device__) +#define __storage_static__device__ static __var_used__ +#endif /* __storage_static__device__ */ + +#if !defined(__storage___constant__) +#define __storage___constant__ static __var_used__ +#endif /* __storage___constant__ */ + +#if !defined(__storage_extern__constant__) +#define __storage_extern__constant__ static __var_used__ +#endif /* __storage_extern__constant__ */ + +#if !defined(__storage_auto__constant__) +#define __storage_auto__constant__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__constant__ */ + +#if !defined(__storage_static__constant__) +#define __storage_static__constant__ static __var_used__ +#endif /* __storage_static__constant__ */ + +#if !defined(__storage___shared__) +#define __storage___shared__ static __var_used__ +#endif /* __storage___shared__ */ + +#if !defined(__storage_extern__shared__) +#define __storage_extern__shared__ static __var_used__ +#endif /* __storage_extern__shared__ */ + +#if !defined(__storage_auto__shared__) +#define __storage_auto__shared__ static +#endif /* __storage_auto__shared__ */ + +#if !defined(__storage_static__shared__) +#define __storage_static__shared__ static __var_used__ +#endif /* __storage_static__shared__ */ + +#if !defined(__storage__unsized__shared__) +#define __storage__unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage__unsized__shared__ */ + +#if !defined(__storage_extern_unsized__shared__) +#define __storage_extern_unsized__shared__ static __var_used__ +#endif /* __storage_extern_unsized__shared__ */ + +#if !defined(__storage_auto_unsized__shared__) +#define __storage_auto_unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto_unsized__shared__ */ + +#if !defined(__storage_static_unsized__shared__) +#define __storage_static_unsized__shared__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_static_unsized__shared__ */ + +#if !defined(__storage___text__) +#define __storage___text__ static __var_used__ +#endif /* __storage___text__ */ + +#if !defined(__storage_extern__text__) +#define __storage_extern__text__ static __var_used__ +#endif /* __storage_extern__text__ */ + +#if !defined(__storage_auto__text__) +#define __storage_auto__text__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__text__ */ + +#if !defined(__storage_static__text__) +#define __storage_static__text__ static __var_used__ +#endif /* __storage_static__text__ */ + +#if !defined(__storage___surf__) +#define __storage___surf__ static __var_used__ +#endif /* __storage___surf__ */ + +#if !defined(__storage_extern__surf__) +#define __storage_extern__surf__ static __var_used__ +#endif /* __storage_extern__surf__ */ + +#if !defined(__storage_auto__surf__) +#define __storage_auto__surf__ @@@ COMPILER @@@ ERROR @@@ +#endif /* __storage_auto__surf__ */ + +#if !defined(__storage_static__surf__) +#define __storage_static__surf__ static __var_used__ +#endif /* __storage_static__surf__ */ + +#endif /* !__STORAGE_CLASS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__ +#endif diff --git a/miniCUDA124/include/cub/config.cuh b/miniCUDA124/include/cub/config.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f7f25ddef028603bbfad7e2282acdda9a4086e31 --- /dev/null +++ b/miniCUDA124/include/cub/config.cuh @@ -0,0 +1,51 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Static configuration header for the CUB project. + */ + +#pragma once + +// For _CCCL_IMPLICIT_SYSTEM_HEADER +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include diff --git a/miniCUDA124/include/cub/cub.cuh b/miniCUDA124/include/cub/cub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..73136a6077df85436e00f7cffc118217e95a8d43 --- /dev/null +++ b/miniCUDA124/include/cub/cub.cuh @@ -0,0 +1,116 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * CUB umbrella include file + */ + +#pragma once + +// Static configuration +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +// Block +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include + +// Device +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Grid +// #include +#include +#include +#include + +// Thread +#include +#include +#include +#include +#include + +// Warp +#include +#include +#include +#include +#include +#include + +// Iterator +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Util +#include +#include +#include +#include +#include +#include diff --git a/miniCUDA124/include/cub/util_allocator.cuh b/miniCUDA124/include/cub/util_allocator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..135e8f8cf20831a63cc17cc9a5d17fce9bc5cadd --- /dev/null +++ b/miniCUDA124/include/cub/util_allocator.cuh @@ -0,0 +1,880 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/****************************************************************************** + * Simple caching allocator for device memory allocations. The allocator is + * thread-safe and capable of managing device allocations on multiple devices. + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + + +/** + * @addtogroup UtilMgmt + * @{ + */ + + +/****************************************************************************** + * CachingDeviceAllocator (host use) + ******************************************************************************/ + +/** + * @brief A simple caching allocator for device memory allocations. + * + * @par Overview + * The allocator is thread-safe and stream-safe and is capable of managing cached + * device allocations on multiple devices. It behaves as follows: + * + * @par + * - Allocations from the allocator are associated with an @p active_stream. Once freed, + * the allocation becomes available immediately for reuse within the @p active_stream + * with which it was associated with during allocation, and it becomes available for + * reuse within other streams when all prior work submitted to @p active_stream has completed. + * - Allocations are categorized and cached by bin size. A new allocation request of + * a given size will only consider cached allocations within the corresponding bin. + * - Bin limits progress geometrically in accordance with the growth factor + * @p bin_growth provided during construction. Unused device allocations within + * a larger bin cache are not reused for allocation requests that categorize to + * smaller bin sizes. + * - Allocation requests below ( @p bin_growth ^ @p min_bin ) are rounded up to + * ( @p bin_growth ^ @p min_bin ). + * - Allocations above ( @p bin_growth ^ @p max_bin ) are not rounded up to the nearest + * bin and are simply freed when they are deallocated instead of being returned + * to a bin-cache. + * - If the total storage of cached allocations on a given device will exceed + * @p max_cached_bytes, allocations for that device are simply freed when they are + * deallocated instead of being returned to their bin-cache. + * + * @par + * For example, the default-constructed CachingDeviceAllocator is configured with: + * - @p bin_growth = 8 + * - @p min_bin = 3 + * - @p max_bin = 7 + * - @p max_cached_bytes = 6MB - 1B + * + * @par + * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB + * and sets a maximum of 6,291,455 cached bytes per device + * + */ +struct CachingDeviceAllocator +{ + + //--------------------------------------------------------------------- + // Constants + //--------------------------------------------------------------------- + + /// Out-of-bounds bin + static constexpr unsigned int INVALID_BIN = (unsigned int) -1; + + /// Invalid size + static constexpr size_t INVALID_SIZE = (size_t) -1; + +#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document + + /// Invalid device ordinal + static constexpr int INVALID_DEVICE_ORDINAL = -1; + + //--------------------------------------------------------------------- + // Type definitions and helper types + //--------------------------------------------------------------------- + + /** + * Descriptor for device memory allocations + */ + struct BlockDescriptor + { + // Device pointer + void *d_ptr; + + // Size of allocation in bytes + size_t bytes; + + // Bin enumeration + unsigned int bin; + + // device ordinal + int device; + + // Associated associated_stream + cudaStream_t associated_stream; + + // Signal when associated stream has run to the point at which this block was freed + cudaEvent_t ready_event; + + // Constructor (suitable for searching maps for a specific block, given its pointer and + // device) + BlockDescriptor(void *d_ptr, int device) + : d_ptr(d_ptr) + , bytes(0) + , bin(INVALID_BIN) + , device(device) + , associated_stream(0) + , ready_event(0) + {} + + // Constructor (suitable for searching maps for a range of suitable blocks, given a device) + BlockDescriptor(int device) + : d_ptr(NULL) + , bytes(0) + , bin(INVALID_BIN) + , device(device) + , associated_stream(0) + , ready_event(0) + {} + + // Comparison functor for comparing device pointers + static bool PtrCompare(const BlockDescriptor &a, const BlockDescriptor &b) + { + if (a.device == b.device) + return (a.d_ptr < b.d_ptr); + else + return (a.device < b.device); + } + + // Comparison functor for comparing allocation sizes + static bool SizeCompare(const BlockDescriptor &a, const BlockDescriptor &b) + { + if (a.device == b.device) + return (a.bytes < b.bytes); + else + return (a.device < b.device); + } + }; + + /// BlockDescriptor comparator function interface + typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &); + + class TotalBytes { + public: + size_t free; + size_t live; + TotalBytes() { free = live = 0; } + }; + + /// Set type for cached blocks (ordered by size) + typedef std::multiset CachedBlocks; + + /// Set type for live blocks (ordered by ptr) + typedef std::multiset BusyBlocks; + + /// Map type of device ordinals to the number of cached bytes cached by each device + typedef std::map GpuCachedBytes; + + + //--------------------------------------------------------------------- + // Utility functions + //--------------------------------------------------------------------- + + /** + * Integer pow function for unsigned base and exponent + */ + static unsigned int IntPow( + unsigned int base, + unsigned int exp) + { + unsigned int retval = 1; + while (exp > 0) + { + if (exp & 1) { + retval = retval * base; // multiply the result by the current base + } + base = base * base; // square the base + exp = exp >> 1; // divide the exponent in half + } + return retval; + } + + + /** + * Round up to the nearest power-of + */ + void NearestPowerOf( + unsigned int &power, + size_t &rounded_bytes, + unsigned int base, + size_t value) + { + power = 0; + rounded_bytes = 1; + + if (value * base < value) + { + // Overflow + power = sizeof(size_t) * 8; + rounded_bytes = size_t(0) - 1; + return; + } + + while (rounded_bytes < value) + { + rounded_bytes *= base; + power++; + } + } + + //--------------------------------------------------------------------- + // Fields + //--------------------------------------------------------------------- + + /// Mutex for thread-safety + std::mutex mutex; + + /// Geometric growth factor for bin-sizes + unsigned int bin_growth; + + /// Minimum bin enumeration + unsigned int min_bin; + + /// Maximum bin enumeration + unsigned int max_bin; + + /// Minimum bin size + size_t min_bin_bytes; + + /// Maximum bin size + size_t max_bin_bytes; + + /// Maximum aggregate cached bytes per device + size_t max_cached_bytes; + + /// Whether or not to skip a call to FreeAllCached() when destructor is called. + /// (The CUDA runtime may have already shut down for statically declared allocators) + const bool skip_cleanup; + + /// Whether or not to print (de)allocation events to stdout + bool debug; + + /// Map of device ordinal to aggregate cached bytes on that device + GpuCachedBytes cached_bytes; + + /// Set of cached device allocations available for reuse + CachedBlocks cached_blocks; + + /// Set of live device allocations currently in use + BusyBlocks live_blocks; + +#endif // DOXYGEN_SHOULD_SKIP_THIS + + //--------------------------------------------------------------------- + // Methods + //--------------------------------------------------------------------- + + /** + * @brief Constructor. + * + * @param bin_growth + * Geometric growth factor for bin-sizes + * + * @param min_bin + * Minimum bin (default is bin_growth ^ 1) + * + * @param max_bin + * Maximum bin (default is no max bin) + * + * @param max_cached_bytes + * Maximum aggregate cached bytes per device (default is no limit) + * + * @param skip_cleanup + * Whether or not to skip a call to @p FreeAllCached() when the destructor is called (default + * is to deallocate) + * + * @param debug + * Whether or not to print (de)allocation events to stdout (default is no stderr output) + */ + CachingDeviceAllocator(unsigned int bin_growth, + unsigned int min_bin = 1, + unsigned int max_bin = INVALID_BIN, + size_t max_cached_bytes = INVALID_SIZE, + bool skip_cleanup = false, + bool debug = false) + : bin_growth(bin_growth) + , min_bin(min_bin) + , max_bin(max_bin) + , min_bin_bytes(IntPow(bin_growth, min_bin)) + , max_bin_bytes(IntPow(bin_growth, max_bin)) + , max_cached_bytes(max_cached_bytes) + , skip_cleanup(skip_cleanup) + , debug(debug) + , cached_blocks(BlockDescriptor::SizeCompare) + , live_blocks(BlockDescriptor::PtrCompare) + {} + + + /** + * @brief Default constructor. + * + * Configured with: + * @par + * - @p bin_growth = 8 + * - @p min_bin = 3 + * - @p max_bin = 7 + * - @p max_cached_bytes = ( @p bin_growth ^ @p max_bin) * 3 ) - 1 = 6,291,455 bytes + * + * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB and + * sets a maximum of 6,291,455 cached bytes per device + */ + CachingDeviceAllocator( + bool skip_cleanup = false, + bool debug = false) + : + bin_growth(8), + min_bin(3), + max_bin(7), + min_bin_bytes(IntPow(bin_growth, min_bin)), + max_bin_bytes(IntPow(bin_growth, max_bin)), + max_cached_bytes((max_bin_bytes * 3) - 1), + skip_cleanup(skip_cleanup), + debug(debug), + cached_blocks(BlockDescriptor::SizeCompare), + live_blocks(BlockDescriptor::PtrCompare) + {} + + + /** + * @brief Sets the limit on the number bytes this allocator is allowed to cache per device. + * + * Changing the ceiling of cached bytes does not cause any allocations (in-use or + * cached-in-reserve) to be freed. See \p FreeAllCached(). + */ + cudaError_t SetMaxCachedBytes(size_t max_cached_bytes_) + { + // Lock + mutex.lock(); + + if (debug) _CubLog("Changing max_cached_bytes (%lld -> %lld)\n", (long long) this->max_cached_bytes, (long long) max_cached_bytes_); + + this->max_cached_bytes = max_cached_bytes_; + + // Unlock + mutex.unlock(); + + return cudaSuccess; + } + + /** + * @brief Provides a suitable allocation of device memory for the given size on the specified + * device. + * + * Once freed, the allocation becomes available immediately for reuse within the @p + * active_stream with which it was associated with during allocation, and it becomes available + * for reuse within other streams when all prior work submitted to @p active_stream has + * completed. + * + * @param[in] device + * Device on which to place the allocation + * + * @param[out] d_ptr + * Reference to pointer to the allocation + * + * @param[in] bytes + * Minimum number of bytes for the allocation + * + * @param[in] active_stream + * The stream to be associated with this allocation + */ + cudaError_t + DeviceAllocate(int device, void **d_ptr, size_t bytes, cudaStream_t active_stream = 0) + { + *d_ptr = NULL; + int entrypoint_device = INVALID_DEVICE_ORDINAL; + cudaError_t error = cudaSuccess; + + if (device == INVALID_DEVICE_ORDINAL) + { + error = CubDebug(cudaGetDevice(&entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + + device = entrypoint_device; + } + + // Create a block descriptor for the requested allocation + bool found = false; + BlockDescriptor search_key(device); + search_key.associated_stream = active_stream; + NearestPowerOf(search_key.bin, search_key.bytes, bin_growth, bytes); + + if (search_key.bin > max_bin) + { + // Bin is greater than our maximum bin: allocate the request + // exactly and give out-of-bounds bin. It will not be cached + // for reuse when returned. + search_key.bin = INVALID_BIN; + search_key.bytes = bytes; + } + else + { + // Search for a suitable cached allocation: lock + mutex.lock(); + + if (search_key.bin < min_bin) + { + // Bin is less than minimum bin: round up + search_key.bin = min_bin; + search_key.bytes = min_bin_bytes; + } + + // Iterate through the range of cached blocks on the same device in the same bin + CachedBlocks::iterator block_itr = cached_blocks.lower_bound(search_key); + while ((block_itr != cached_blocks.end()) + && (block_itr->device == device) + && (block_itr->bin == search_key.bin)) + { + // To prevent races with reusing blocks returned by the host but still + // in use by the device, only consider cached blocks that are + // either (from the active stream) or (from an idle stream) + bool is_reusable = false; + if (active_stream == block_itr->associated_stream) + { + is_reusable = true; + } + else + { + const cudaError_t event_status = cudaEventQuery(block_itr->ready_event); + if(event_status != cudaErrorNotReady) + { + CubDebug(event_status); + is_reusable = true; + } + } + + if(is_reusable) + { + // Reuse existing cache block. Insert into live blocks. + found = true; + search_key = *block_itr; + search_key.associated_stream = active_stream; + live_blocks.insert(search_key); + + // Remove from free blocks + cached_bytes[device].free -= search_key.bytes; + cached_bytes[device].live += search_key.bytes; + + if (debug) _CubLog("\tDevice %d reused cached block at %p (%lld bytes) for stream %lld (previously associated with stream %lld).\n", + device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) block_itr->associated_stream); + + cached_blocks.erase(block_itr); + + break; + } + block_itr++; + } + + // Done searching: unlock + mutex.unlock(); + } + + // Allocate the block if necessary + if (!found) + { + // Set runtime's current device to specified device (entrypoint may not be set) + if (device != entrypoint_device) + { + error = CubDebug(cudaGetDevice(&entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + + error = CubDebug(cudaSetDevice(device)); + if (cudaSuccess != error) + { + return error; + } + } + + // Attempt to allocate + error = CubDebug(cudaMalloc(&search_key.d_ptr, search_key.bytes)); + if (error == cudaErrorMemoryAllocation) + { + // The allocation attempt failed: free all cached blocks on device and retry + if (debug) _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations", + device, (long long) search_key.bytes, (long long) search_key.associated_stream); + + error = cudaSuccess; // Reset the error we will return + cudaGetLastError(); // Reset CUDART's error + + // Lock + mutex.lock(); + + // Iterate the range of free blocks on the same device + BlockDescriptor free_key(device); + CachedBlocks::iterator block_itr = cached_blocks.lower_bound(free_key); + + while ((block_itr != cached_blocks.end()) && (block_itr->device == device)) + { + // No need to worry about synchronization with the device: cudaFree is + // blocking and will synchronize across all kernels executing + // on the current device + + // Free device memory and destroy stream event. + error = CubDebug(cudaFree(block_itr->d_ptr)); + if (cudaSuccess != error) + { + break; + } + + error = CubDebug(cudaEventDestroy(block_itr->ready_event)); + if (cudaSuccess != error) + { + break; + } + + // Reduce balance and erase entry + cached_bytes[device].free -= block_itr->bytes; + + if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", + device, (long long) block_itr->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); + + block_itr = cached_blocks.erase(block_itr); + } + + // Unlock + mutex.unlock(); + + // Return under error + if (error) return error; + + // Try to allocate again + error = CubDebug(cudaMalloc(&search_key.d_ptr, search_key.bytes)); + if (cudaSuccess != error) + { + return error; + } + } + + // Create ready event + error = + CubDebug(cudaEventCreateWithFlags(&search_key.ready_event, cudaEventDisableTiming)); + + if (cudaSuccess != error) + { + return error; + } + + // Insert into live blocks + mutex.lock(); + live_blocks.insert(search_key); + cached_bytes[device].live += search_key.bytes; + mutex.unlock(); + + if (debug) _CubLog("\tDevice %d allocated new device block at %p (%lld bytes associated with stream %lld).\n", + device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream); + + // Attempt to revert back to previous device if necessary + if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device)) + { + error = CubDebug(cudaSetDevice(entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + } + } + + // Copy device pointer to output parameter + *d_ptr = search_key.d_ptr; + + if (debug) _CubLog("\t\t%lld available blocks cached (%lld bytes), %lld live blocks outstanding(%lld bytes).\n", + (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); + + return error; + } + + /** + * @brief Provides a suitable allocation of device memory for the given size on the current + * device. + * + * Once freed, the allocation becomes available immediately for reuse within the @p + * active_stream with which it was associated with during allocation, and it becomes available + * for reuse within other streams when all prior work submitted to @p active_stream has + * completed. + * + * @param[out] d_ptr + * Reference to pointer to the allocation + * + * @param[in] bytes + * Minimum number of bytes for the allocation + * + * @param[in] active_stream + * The stream to be associated with this allocation + */ + cudaError_t DeviceAllocate(void **d_ptr, size_t bytes, cudaStream_t active_stream = 0) + { + return DeviceAllocate(INVALID_DEVICE_ORDINAL, d_ptr, bytes, active_stream); + } + + /** + * @brief Frees a live allocation of device memory on the specified device, returning it to the + * allocator. + * + * Once freed, the allocation becomes available immediately for reuse within the + * @p active_stream with which it was associated with during allocation, and it becomes + * available for reuse within other streams when all prior work submitted to @p active_stream + * has completed. + */ + cudaError_t DeviceFree( + int device, + void* d_ptr) + { + int entrypoint_device = INVALID_DEVICE_ORDINAL; + cudaError_t error = cudaSuccess; + + if (device == INVALID_DEVICE_ORDINAL) + { + error = CubDebug(cudaGetDevice(&entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + device = entrypoint_device; + } + + // Lock + mutex.lock(); + + // Find corresponding block descriptor + bool recached = false; + BlockDescriptor search_key(d_ptr, device); + BusyBlocks::iterator block_itr = live_blocks.find(search_key); + if (block_itr != live_blocks.end()) + { + // Remove from live blocks + search_key = *block_itr; + live_blocks.erase(block_itr); + cached_bytes[device].live -= search_key.bytes; + + // Keep the returned allocation if bin is valid and we won't exceed the max cached threshold + if ((search_key.bin != INVALID_BIN) && (cached_bytes[device].free + search_key.bytes <= max_cached_bytes)) + { + // Insert returned allocation into free blocks + recached = true; + cached_blocks.insert(search_key); + cached_bytes[device].free += search_key.bytes; + + if (debug) _CubLog("\tDevice %d returned %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks outstanding. (%lld bytes)\n", + device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), + (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); + } + } + + // Unlock + mutex.unlock(); + + // First set to specified device (entrypoint may not be set) + if (device != entrypoint_device) + { + error = CubDebug(cudaGetDevice(&entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + + error = CubDebug(cudaSetDevice(device)); + if (cudaSuccess != error) + { + return error; + } + } + + if (recached) + { + // Insert the ready event in the associated stream (must have current device set properly) + error = CubDebug(cudaEventRecord(search_key.ready_event, search_key.associated_stream)); + if (cudaSuccess != error) + { + return error; + } + } + + if (!recached) + { + // Free the allocation from the runtime and cleanup the event. + error = CubDebug(cudaFree(d_ptr)); + if (cudaSuccess != error) + { + return error; + } + + error = CubDebug(cudaEventDestroy(search_key.ready_event)); + if (cudaSuccess != error) + { + return error; + } + + if (debug) _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", + device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); + } + + // Reset device + if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device)) + { + error = CubDebug(cudaSetDevice(entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + } + + return error; + } + + /** + * @brief Frees a live allocation of device memory on the current device, returning it to the + * allocator. + * + * Once freed, the allocation becomes available immediately for reuse within the @p + * active_stream with which it was associated with during allocation, and it becomes available + * for reuse within other streams when all prior work submitted to @p active_stream has + * completed. + */ + cudaError_t DeviceFree( + void* d_ptr) + { + return DeviceFree(INVALID_DEVICE_ORDINAL, d_ptr); + } + + + /** + * @brief Frees all cached device allocations on all devices + */ + cudaError_t FreeAllCached() + { + cudaError_t error = cudaSuccess; + int entrypoint_device = INVALID_DEVICE_ORDINAL; + int current_device = INVALID_DEVICE_ORDINAL; + + mutex.lock(); + + while (!cached_blocks.empty()) + { + // Get first block + CachedBlocks::iterator begin = cached_blocks.begin(); + + // Get entry-point device ordinal if necessary + if (entrypoint_device == INVALID_DEVICE_ORDINAL) + { + error = CubDebug(cudaGetDevice(&entrypoint_device)); + if (cudaSuccess != error) + { + break; + } + } + + // Set current device ordinal if necessary + if (begin->device != current_device) + { + error = CubDebug(cudaSetDevice(begin->device)); + if (cudaSuccess != error) + { + break; + } + current_device = begin->device; + } + + // Free device memory + error = CubDebug(cudaFree(begin->d_ptr)); + if (cudaSuccess != error) + { + break; + } + + error = CubDebug(cudaEventDestroy(begin->ready_event)); + if (cudaSuccess != error) + { + break; + } + + // Reduce balance and erase entry + const size_t block_bytes = begin->bytes; + cached_bytes[current_device].free -= block_bytes; + cached_blocks.erase(begin); + + if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", + current_device, (long long) block_bytes, (long long) cached_blocks.size(), (long long) cached_bytes[current_device].free, (long long) live_blocks.size(), (long long) cached_bytes[current_device].live); + + } + + mutex.unlock(); + + // Attempt to revert back to entry-point device if necessary + if (entrypoint_device != INVALID_DEVICE_ORDINAL) + { + error = CubDebug(cudaSetDevice(entrypoint_device)); + if (cudaSuccess != error) + { + return error; + } + } + + return error; + } + + + /** + * @brief Destructor + */ + virtual ~CachingDeviceAllocator() + { + if (!skip_cleanup) + FreeAllCached(); + } + +}; + + + + +/** @} */ // end group UtilMgmt + +CUB_NAMESPACE_END diff --git a/miniCUDA124/include/cub/util_arch.cuh b/miniCUDA124/include/cub/util_arch.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8756a4a896df47edd157cc6479efd5375af7a51c --- /dev/null +++ b/miniCUDA124/include/cub/util_arch.cuh @@ -0,0 +1,174 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Static architectural properties by SM version. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +// Legacy include; this functionality used to be defined in here. +#include + +CUB_NAMESPACE_BEGIN + +#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document + +// \deprecated [Since 2.1.0] +#define CUB_USE_COOPERATIVE_GROUPS + +/// In device code, CUB_PTX_ARCH expands to the PTX version for which we are +/// compiling. In host code, CUB_PTX_ARCH's value is implementation defined. +#ifndef CUB_PTX_ARCH + #if defined(_NVHPC_CUDA) + // __NVCOMPILER_CUDA_ARCH__ is the target PTX version, and is defined + // when compiling both host code and device code. Currently, only one + // PTX version can be targeted. + #define CUB_PTX_ARCH __NVCOMPILER_CUDA_ARCH__ + #elif !defined(__CUDA_ARCH__) + #define CUB_PTX_ARCH 0 + #else + #define CUB_PTX_ARCH __CUDA_ARCH__ + #endif +#endif + +// These definitions were intended for internal use only and are now obsolete. +// If you relied on them, consider porting your code to use the functionality +// in libcu++'s header. +// For a temporary workaround, define CUB_PROVIDE_LEGACY_ARCH_MACROS to make +// them available again. These should be considered deprecated and will be +// fully removed in a future version. +#ifdef CUB_PROVIDE_LEGACY_ARCH_MACROS + #ifndef CUB_IS_DEVICE_CODE + #if defined(_NVHPC_CUDA) + #define CUB_IS_DEVICE_CODE __builtin_is_device_code() + #define CUB_IS_HOST_CODE (!__builtin_is_device_code()) + #define CUB_INCLUDE_DEVICE_CODE 1 + #define CUB_INCLUDE_HOST_CODE 1 + #elif CUB_PTX_ARCH > 0 + #define CUB_IS_DEVICE_CODE 1 + #define CUB_IS_HOST_CODE 0 + #define CUB_INCLUDE_DEVICE_CODE 1 + #define CUB_INCLUDE_HOST_CODE 0 + #else + #define CUB_IS_DEVICE_CODE 0 + #define CUB_IS_HOST_CODE 1 + #define CUB_INCLUDE_DEVICE_CODE 0 + #define CUB_INCLUDE_HOST_CODE 1 + #endif + #endif +#endif // CUB_PROVIDE_LEGACY_ARCH_MACROS + +/// Maximum number of devices supported. +#ifndef CUB_MAX_DEVICES + #define CUB_MAX_DEVICES (128) +#endif + +static_assert(CUB_MAX_DEVICES > 0, "CUB_MAX_DEVICES must be greater than 0."); + + +/// Number of threads per warp +#ifndef CUB_LOG_WARP_THREADS + #define CUB_LOG_WARP_THREADS(unused) (5) + #define CUB_WARP_THREADS(unused) (1 << CUB_LOG_WARP_THREADS(0)) + + #define CUB_PTX_WARP_THREADS CUB_WARP_THREADS(0) + #define CUB_PTX_LOG_WARP_THREADS CUB_LOG_WARP_THREADS(0) +#endif + + +/// Number of smem banks +#ifndef CUB_LOG_SMEM_BANKS + #define CUB_LOG_SMEM_BANKS(unused) (5) + #define CUB_SMEM_BANKS(unused) (1 << CUB_LOG_SMEM_BANKS(0)) + + #define CUB_PTX_LOG_SMEM_BANKS CUB_LOG_SMEM_BANKS(0) + #define CUB_PTX_SMEM_BANKS CUB_SMEM_BANKS +#endif + + +/// Oversubscription factor +#ifndef CUB_SUBSCRIPTION_FACTOR + #define CUB_SUBSCRIPTION_FACTOR(unused) (5) + #define CUB_PTX_SUBSCRIPTION_FACTOR CUB_SUBSCRIPTION_FACTOR(0) +#endif + + +/// Prefer padding overhead vs X-way conflicts greater than this threshold +#ifndef CUB_PREFER_CONFLICT_OVER_PADDING + #define CUB_PREFER_CONFLICT_OVER_PADDING(unused) (1) + #define CUB_PTX_PREFER_CONFLICT_OVER_PADDING CUB_PREFER_CONFLICT_OVER_PADDING(0) +#endif + + +template < + int NOMINAL_4B_BLOCK_THREADS, + int NOMINAL_4B_ITEMS_PER_THREAD, + typename T> +struct RegBoundScaling +{ + enum { + ITEMS_PER_THREAD = CUB_MAX(1, NOMINAL_4B_ITEMS_PER_THREAD * 4 / CUB_MAX(4, sizeof(T))), + BLOCK_THREADS = CUB_MIN(NOMINAL_4B_BLOCK_THREADS, (((1024 * 48) / (sizeof(T) * ITEMS_PER_THREAD)) + 31) / 32 * 32), + }; +}; + + +template < + int NOMINAL_4B_BLOCK_THREADS, + int NOMINAL_4B_ITEMS_PER_THREAD, + typename T> +struct MemBoundScaling +{ + enum { + ITEMS_PER_THREAD = CUB_MAX(1, CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T), NOMINAL_4B_ITEMS_PER_THREAD * 2)), + BLOCK_THREADS = CUB_MIN(NOMINAL_4B_BLOCK_THREADS, (((1024 * 48) / (sizeof(T) * ITEMS_PER_THREAD)) + 31) / 32 * 32), + }; +}; + + + + +#endif // Do not document + +CUB_NAMESPACE_END diff --git a/miniCUDA124/include/cub/util_compiler.cuh b/miniCUDA124/include/cub/util_compiler.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7de9323f0722933c1cf2916360f6ac88abaa0e9e --- /dev/null +++ b/miniCUDA124/include/cub/util_compiler.cuh @@ -0,0 +1,92 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Detect compiler information. + */ + +#pragma once + +// For _CCCL_IMPLICIT_SYSTEM_HEADER +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +// enumerate host compilers we know about +#define CUB_HOST_COMPILER_UNKNOWN 0 +#define CUB_HOST_COMPILER_MSVC 1 +#define CUB_HOST_COMPILER_GCC 2 +#define CUB_HOST_COMPILER_CLANG 3 + +// enumerate device compilers we know about +#define CUB_DEVICE_COMPILER_UNKNOWN 0 +#define CUB_DEVICE_COMPILER_MSVC 1 +#define CUB_DEVICE_COMPILER_GCC 2 +#define CUB_DEVICE_COMPILER_NVCC 3 +#define CUB_DEVICE_COMPILER_CLANG 4 + +// figure out which host compiler we're using +#if defined(_MSC_VER) +# define CUB_HOST_COMPILER CUB_HOST_COMPILER_MSVC +# define CUB_MSVC_VERSION _MSC_VER +# define CUB_MSVC_VERSION_FULL _MSC_FULL_VER +#elif defined(__clang__) +# define CUB_HOST_COMPILER CUB_HOST_COMPILER_CLANG +# define CUB_CLANG_VERSION \ + (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) +#elif defined(__GNUC__) +# define CUB_HOST_COMPILER CUB_HOST_COMPILER_GCC +# define CUB_GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#else +# define CUB_HOST_COMPILER CUB_HOST_COMPILER_UNKNOWN +#endif // CUB_HOST_COMPILER + +// figure out which device compiler we're using +#if defined(__CUDACC__) || defined(_NVHPC_CUDA) +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_NVCC +#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_MSVC +#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_GCC +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_GCC +#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_CLANG +// CUDA-capable clang should behave similar to NVCC. +# if defined(__CUDA__) +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_NVCC +# else +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_CLANG +# endif +#else +# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_UNKNOWN +#endif diff --git a/miniCUDA124/include/cub/util_cpp_dialect.cuh b/miniCUDA124/include/cub/util_cpp_dialect.cuh new file mode 100644 index 0000000000000000000000000000000000000000..44b6f57322917656055418fb040406f7488b96e1 --- /dev/null +++ b/miniCUDA124/include/cub/util_cpp_dialect.cuh @@ -0,0 +1,161 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/*! \file + * \brief Detect the version of the C++ standard used by the compiler. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +// Deprecation warnings may be silenced by defining the following macros. These +// may be combined. +// - CUB_IGNORE_DEPRECATED_CPP_DIALECT: +// Ignore all deprecated C++ dialects and outdated compilers. +// - CUB_IGNORE_DEPRECATED_CPP_11: +// Ignore deprecation warnings when compiling with C++11. C++03 and outdated +// compilers will still issue warnings. +// - CUB_IGNORE_DEPRECATED_COMPILER +// Ignore deprecation warnings when using deprecated compilers. Compiling +// with C++03 and C++11 will still issue warnings. + +// Check for the thrust opt-outs as well: +#if !defined(CUB_IGNORE_DEPRECATED_CPP_DIALECT) && \ + defined(THRUST_IGNORE_DEPRECATED_CPP_DIALECT) +# define CUB_IGNORE_DEPRECATED_CPP_DIALECT +#endif +#if !defined(CUB_IGNORE_DEPRECATED_CPP_11) && \ + defined(THRUST_IGNORE_DEPRECATED_CPP_11) +# define CUB_IGNORE_DEPRECATED_CPP_11 +#endif +#if !defined(CUB_IGNORE_DEPRECATED_COMPILER) && \ + defined(THRUST_IGNORE_DEPRECATED_COMPILER) +# define CUB_IGNORE_DEPRECATED_COMPILER +#endif + +#ifdef CUB_IGNORE_DEPRECATED_CPP_DIALECT +# define CUB_IGNORE_DEPRECATED_CPP_11 +# define CUB_IGNORE_DEPRECATED_COMPILER +#endif + +// Define this to override the built-in detection. +#ifndef CUB_CPP_DIALECT + +// MSVC does not define __cplusplus correctly. _MSVC_LANG is used instead. +// This macro is only defined in MSVC 2015U3+. +# ifdef _MSVC_LANG // Do not replace with CUB_HOST_COMPILER test (see above) +// MSVC2015 reports C++14 but lacks extended constexpr support. Treat as C++11. +# if CUB_MSVC_VERSION < 1910 && _MSVC_LANG > 201103L /* MSVC < 2017 && CPP > 2011 */ +# define CUB_CPLUSPLUS 201103L /* Fix to 2011 */ +# else +# define CUB_CPLUSPLUS _MSVC_LANG /* We'll trust this for now. */ +# endif // MSVC 2015 C++14 fix +# else +# define CUB_CPLUSPLUS __cplusplus +# endif + +// Detect current dialect: +# if CUB_CPLUSPLUS < 201103L +# define CUB_CPP_DIALECT 2003 +# elif CUB_CPLUSPLUS < 201402L +# define CUB_CPP_DIALECT 2011 +# elif CUB_CPLUSPLUS < 201703L +# define CUB_CPP_DIALECT 2014 +# elif CUB_CPLUSPLUS == 201703L +# define CUB_CPP_DIALECT 2017 +# elif CUB_CPLUSPLUS > 201703L // unknown, but is higher than 2017. +# define CUB_CPP_DIALECT 2020 +# endif + +# undef CUB_CPLUSPLUS // cleanup + +#endif // !CUB_CPP_DIALECT + +// Define CUB_COMPILER_DEPRECATION macro: +#if CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC +# define CUB_COMP_DEPR_IMPL(msg) \ + __pragma(message(__FILE__ ":" CUB_COMP_DEPR_IMPL0(__LINE__) ": warning: " #msg)) +# define CUB_COMP_DEPR_IMPL0(x) CUB_COMP_DEPR_IMPL1(x) +# define CUB_COMP_DEPR_IMPL1(x) #x +#else // clang / gcc: +# define CUB_COMP_DEPR_IMPL(msg) CUB_COMP_DEPR_IMPL0(GCC warning #msg) +# define CUB_COMP_DEPR_IMPL0(expr) _Pragma(#expr) +# define CUB_COMP_DEPR_IMPL1 /* intentionally blank */ +#endif + +#define CUB_COMPILER_DEPRECATION(REQ) \ + CUB_COMP_DEPR_IMPL(CUB requires at least REQ. Define CUB_IGNORE_DEPRECATED_COMPILER to suppress this message.) + +#define CUB_COMPILER_DEPRECATION_SOFT(REQ, CUR) \ + CUB_COMP_DEPR_IMPL(CUB requires at least REQ. CUR is deprecated but still supported. CUR support will be removed in a future release. Define CUB_IGNORE_DEPRECATED_CPP_DIALECT to suppress this message.) + +#ifndef CUB_IGNORE_DEPRECATED_COMPILER + +// Compiler checks: +# if CUB_HOST_COMPILER == CUB_HOST_COMPILER_GCC && CUB_GCC_VERSION < 50000 + CUB_COMPILER_DEPRECATION(GCC 5.0); +# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_CLANG && CUB_CLANG_VERSION < 70000 + CUB_COMPILER_DEPRECATION(Clang 7.0); +# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC && CUB_MSVC_VERSION < 1910 + // <2017. Hard upgrade message: + CUB_COMPILER_DEPRECATION(MSVC 2019 (19.20/16.0/14.20)); +# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC && CUB_MSVC_VERSION < 1920 + // >=2017, <2019. Soft deprecation message: + CUB_COMPILER_DEPRECATION_SOFT(MSVC 2019 (19.20/16.0/14.20), MSVC 2017); +# endif + +#endif // CUB_IGNORE_DEPRECATED_COMPILER + +#ifndef CUB_IGNORE_DEPRECATED_DIALECT + +// Dialect checks: +# if CUB_CPP_DIALECT < 2011 + // + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include + +CUB_NAMESPACE_BEGIN + + +#ifdef DOXYGEN_SHOULD_SKIP_THIS // Only parse this during doxygen passes: + +/** + * @def CUB_DEBUG_LOG + * + * Causes kernel launch configurations to be printed to the console + */ +#define CUB_DEBUG_LOG + +/** + * @def CUB_DEBUG_SYNC + * + * Causes synchronization of the stream after every kernel launch to check + * for errors. Also causes kernel launch configurations to be printed to the + * console. + */ +#define CUB_DEBUG_SYNC + +/** + * @def CUB_DEBUG_HOST_ASSERTIONS + * + * Extends `CUB_DEBUG_SYNC` effects by checking host-side precondition + * assertions. + */ +#define CUB_DEBUG_HOST_ASSERTIONS + +/** + * @def CUB_DEBUG_DEVICE_ASSERTIONS + * + * Extends `CUB_DEBUG_HOST_ASSERTIONS` effects by checking device-side + * precondition assertions. + */ +#define CUB_DEBUG_DEVICE_ASSERTIONS + +/** + * @def CUB_DEBUG_ALL + * + * Causes host and device-side precondition assertions to be checked. Apart + * from that, causes synchronization of the stream after every kernel launch to + * check for errors. Also causes kernel launch configurations to be printed to + * the console. + */ +#define CUB_DEBUG_ALL + +#endif // DOXYGEN_SHOULD_SKIP_THIS + +/** + * \addtogroup UtilMgmt + * @{ + */ + + +// `CUB_DETAIL_DEBUG_LEVEL_*`: Implementation details, internal use only: + +#define CUB_DETAIL_DEBUG_LEVEL_NONE 0 +#define CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY 1 +#define CUB_DETAIL_DEBUG_LEVEL_LOG 2 +#define CUB_DETAIL_DEBUG_LEVEL_SYNC 3 +#define CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS 4 +#define CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS 5 +#define CUB_DETAIL_DEBUG_LEVEL_ALL 1000 + +// `CUB_DEBUG_*`: User interfaces: + +// Extra logging, no syncs +#ifdef CUB_DEBUG_LOG +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_LOG +#endif + +// Logging + syncs +#ifdef CUB_DEBUG_SYNC +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_SYNC +#endif + +// Logging + syncs + host assertions +#ifdef CUB_DEBUG_HOST_ASSERTIONS +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS +#endif + +// Logging + syncs + host assertions + device assertions +#ifdef CUB_DEBUG_DEVICE_ASSERTIONS +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS +#endif + +// All +#ifdef CUB_DEBUG_ALL +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_ALL +#endif + +// Default case, no extra debugging: +#ifndef CUB_DETAIL_DEBUG_LEVEL +#ifdef NDEBUG +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_NONE +#else +#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY +#endif +#endif + +/* + * `CUB_DETAIL_DEBUG_ENABLE_*`: + * Internal implementation details, used for testing enabled debug features: + */ + +#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_LOG +#define CUB_DETAIL_DEBUG_ENABLE_LOG +#endif + +#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_SYNC +#define CUB_DETAIL_DEBUG_ENABLE_SYNC +#endif + +#if (CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS) || \ + (CUB_DETAIL_DEBUG_LEVEL == CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY) +#define CUB_DETAIL_DEBUG_ENABLE_HOST_ASSERTIONS +#endif + +#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS +#define CUB_DETAIL_DEBUG_ENABLE_DEVICE_ASSERTIONS +#endif + + +/// CUB error reporting macro (prints error messages to stderr) +#if (defined(DEBUG) || defined(_DEBUG)) && !defined(CUB_STDERR) + #define CUB_STDERR +#endif + +/** + * \brief %If \p CUB_STDERR is defined and \p error is not \p cudaSuccess, the + * corresponding error message is printed to \p stderr (or \p stdout in device + * code) along with the supplied source context. + * + * \return The CUDA error. + */ +__host__ __device__ +__forceinline__ +cudaError_t Debug(cudaError_t error, const char *filename, int line) +{ + // Clear the global CUDA error state which may have been set by the last + // call. Otherwise, errors may "leak" to unrelated kernel launches. + + // clang-format off + #ifndef CUB_RDC_ENABLED + #define CUB_TEMP_DEVICE_CODE + #else + #define CUB_TEMP_DEVICE_CODE last_error = cudaGetLastError() + #endif + + cudaError_t last_error = cudaSuccess; + + NV_IF_TARGET( + NV_IS_HOST, + (last_error = cudaGetLastError();), + (CUB_TEMP_DEVICE_CODE;) + ); + + #undef CUB_TEMP_DEVICE_CODE + // clang-format on + + if (error == cudaSuccess && last_error != cudaSuccess) + { + error = last_error; + } + +#ifdef CUB_STDERR + if (error) + { + NV_IF_TARGET( + NV_IS_HOST, ( + fprintf(stderr, + "CUDA error %d [%s, %d]: %s\n", + error, + filename, + line, + cudaGetErrorString(error)); + fflush(stderr); + ), + ( + printf("CUDA error %d [block (%d,%d,%d) thread (%d,%d,%d), %s, %d]\n", + error, + blockIdx.z, + blockIdx.y, + blockIdx.x, + threadIdx.z, + threadIdx.y, + threadIdx.x, + filename, + line); + ) + ); + } +#else + (void)filename; + (void)line; +#endif + + return error; +} + +/** + * \brief Debug macro + */ +#ifndef CubDebug + #define CubDebug(e) CUB_NS_QUALIFIER::Debug((cudaError_t) (e), __FILE__, __LINE__) +#endif + + +/** + * \brief Debug macro with exit + */ +#ifndef CubDebugExit + #define CubDebugExit(e) if (CUB_NS_QUALIFIER::Debug((cudaError_t) (e), __FILE__, __LINE__)) { exit(1); } +#endif + + +/** + * \brief Log macro for printf statements. + */ +#if !defined(_CubLog) +#if defined(_NVHPC_CUDA) || !(defined(__clang__) && defined(__CUDA__)) + +// NVCC / NVC++ +#define _CubLog(format, ...) \ + do \ + { \ + NV_IF_TARGET(NV_IS_HOST, \ + (printf(format, __VA_ARGS__);), \ + (printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \ + blockIdx.z, \ + blockIdx.y, \ + blockIdx.x, \ + threadIdx.z, \ + threadIdx.y, \ + threadIdx.x, \ + __VA_ARGS__);)); \ + } while (false) + +#else // Clang: + +// XXX shameless hack for clang around variadic printf... +// Compilies w/o supplying -std=c++11 but shows warning, +// so we silence them :) +#pragma clang diagnostic ignored "-Wc++11-extensions" +#pragma clang diagnostic ignored "-Wunnamed-type-template-args" +template +inline __host__ __device__ void va_printf(char const *format, + Args const &...args) +{ +#ifdef __CUDA_ARCH__ + printf(format, + blockIdx.z, + blockIdx.y, + blockIdx.x, + threadIdx.z, + threadIdx.y, + threadIdx.x, + args...); +#else + printf(format, args...); +#endif +} +#ifndef __CUDA_ARCH__ +#define _CubLog(format, ...) CUB_NS_QUALIFIER::va_printf(format, __VA_ARGS__); +#else +#define _CubLog(format, ...) \ + CUB_NS_QUALIFIER::va_printf("[block (%d,%d,%d), thread " \ + "(%d,%d,%d)]: " format, \ + __VA_ARGS__); +#endif +#endif +#endif + +/** @} */ // end group UtilMgmt + +CUB_NAMESPACE_END diff --git a/miniCUDA124/include/cuda/__cccl_config b/miniCUDA124/include/cuda/__cccl_config new file mode 100644 index 0000000000000000000000000000000000000000..50c16356e1024a956bb202803e95eff87fc16e54 --- /dev/null +++ b/miniCUDA124/include/cuda/__cccl_config @@ -0,0 +1,16 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA__CCCL_CONFIG +#define _CUDA__CCCL_CONFIG + +#include "std/detail/libcxx/include/__cccl_config" + +#endif // _CUDA__CCCL_CONFIG diff --git a/miniCUDA124/include/cuda/atomic b/miniCUDA124/include/cuda/atomic new file mode 100644 index 0000000000000000000000000000000000000000..8f8b3673236d803e41a6e4fb2aacad875216a3a0 --- /dev/null +++ b/miniCUDA124/include/cuda/atomic @@ -0,0 +1,16 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_ATOMIC +#define _CUDA_ATOMIC + +#include "std/atomic" + +#endif // _CUDA_ATOMIC diff --git a/miniCUDA124/include/cuda/barrier b/miniCUDA124/include/cuda/barrier new file mode 100644 index 0000000000000000000000000000000000000000..3c21d0c9b1fbc995481596904719145f10a75933 --- /dev/null +++ b/miniCUDA124/include/cuda/barrier @@ -0,0 +1,285 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_BARRIER +#define _CUDA_BARRIER + +#include "std/barrier" + +// Forward-declare CUtensorMap for use in cp_async_bulk_tensor_* PTX wrapping +// functions. These functions take a pointer to CUtensorMap, so do not need to +// know its size. This type is defined in cuda.h (driver API) as: +// +// typedef struct CUtensorMap_st { [ .. snip .. ] } CUtensorMap; +// +// We need to forward-declare both CUtensorMap_st (the struct) and CUtensorMap +// (the typedef): +struct CUtensorMap_st; +typedef struct CUtensorMap_st CUtensorMap; + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL + +// Experimental exposure of TMA PTX: +// +// - cp_async_bulk_global_to_shared +// - cp_async_bulk_shared_to_global +// - cp_async_bulk_tensor_{1,2,3,4,5}d_global_to_shared +// - cp_async_bulk_tensor_{1,2,3,4,5}d_shared_to_global +// - fence_proxy_async_shared_cta +// - cp_async_bulk_commit_group +// - cp_async_bulk_wait_group_read<0, …, 7> + +// These PTX wrappers are only available when the code is compiled compute +// capability 9.0 and above. The check for (!defined(__CUDA_MINIMUM_ARCH__)) is +// necessary to prevent cudafe from ripping out the device functions before +// device compilation begins. +#ifdef __cccl_lib_experimental_ctk12_cp_async_exposure + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_global_to_shared(void *__dest, const void *__src, _CUDA_VSTD::uint32_t __size, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); + _LIBCUDACXX_DEBUG_ASSERT(__isShared(__dest), "Destination must be shared memory address."); + _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__src), "Source must be global memory address."); + + asm volatile( + "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(static_cast<_CUDA_VSTD::uint64_t>(__cvta_generic_to_global(__src))), + "r"(__size), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_shared_to_global(void *__dest, const void * __src, _CUDA_VSTD::uint32_t __size) +{ + _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); + _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__dest), "Destination must be global memory address."); + _LIBCUDACXX_DEBUG_ASSERT(__isShared(__src), "Source must be shared memory address."); + + asm volatile( + "cp.async.bulk.global.shared::cta.bulk_group [%0], [%1], %2;\n" + : + : "l"(static_cast<_CUDA_VSTD::uint64_t>(__cvta_generic_to_global(__dest))), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))), + "r"(__size) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_1d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + asm volatile( + "cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes " + "[%0], [%1, {%2}], [%3];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(__tensor_map), + "r"(__c0), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_2d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + asm volatile( + "cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes " + "[%0], [%1, {%2, %3}], [%4];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_3d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + asm volatile( + "cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes " + "[%0], [%1, {%2, %3, %4}], [%5];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_4d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + asm volatile( + "cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes " + "[%0], [%1, {%2, %3, %4, %5}], [%6];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(__c3), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_5d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, int __c4, ::cuda::barrier<::cuda::thread_scope_block> &__bar) +{ + asm volatile( + "cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes " + "[%0], [%1, {%2, %3, %4, %5, %6}], [%7];\n" + : + : "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))), + "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(__c3), + "r"(__c4), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar)))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_1d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, const void *__src) +{ + asm volatile( + "cp.async.bulk.tensor.1d.global.shared::cta.tile.bulk_group " + "[%0, {%1}], [%2];\n" + : + : "l"(__tensor_map), + "r"(__c0), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_2d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, const void *__src) +{ + asm volatile( + "cp.async.bulk.tensor.2d.global.shared::cta.tile.bulk_group " + "[%0, {%1, %2}], [%3];\n" + : + : "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_3d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, const void *__src) +{ + asm volatile( + "cp.async.bulk.tensor.3d.global.shared::cta.tile.bulk_group " + "[%0, {%1, %2, %3}], [%4];\n" + : + : "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_4d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, const void *__src) +{ + asm volatile( + "cp.async.bulk.tensor.4d.global.shared::cta.tile.bulk_group " + "[%0, {%1, %2, %3, %4}], [%5];\n" + : + : "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(__c3), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_tensor_5d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, int __c4, const void *__src) +{ + asm volatile( + "cp.async.bulk.tensor.5d.global.shared::cta.tile.bulk_group " + "[%0, {%1, %2, %3, %4, %5}], [%6];\n" + : + : "l"(__tensor_map), + "r"(__c0), + "r"(__c1), + "r"(__c2), + "r"(__c3), + "r"(__c4), + "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))) + : "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-membar +inline _LIBCUDACXX_DEVICE +void fence_proxy_async_shared_cta() { + asm volatile("fence.proxy.async.shared::cta; \n":::"memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-commit-group +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_commit_group() +{ + asm volatile("cp.async.bulk.commit_group;\n" ::: "memory"); +} + +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-wait-group +template +inline _LIBCUDACXX_DEVICE +void cp_async_bulk_wait_group_read() +{ + static_assert(n_prior <= 63, "cp_async_bulk_wait_group_read: waiting for more than 63 groups is not supported."); + asm volatile("cp.async.bulk.wait_group.read %0; \n" + : + : "n"(n_prior) + : "memory"); +} + +#endif // __cccl_lib_experimental_ctk12_cp_async_exposure + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL + +#endif // _CUDA_BARRIER diff --git a/miniCUDA124/include/cuda/functional b/miniCUDA124/include/cuda/functional new file mode 100644 index 0000000000000000000000000000000000000000..40343338c901736baf945dd37a15f28fc176687e --- /dev/null +++ b/miniCUDA124/include/cuda/functional @@ -0,0 +1,155 @@ +// -*- C++ -*- +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ + +#ifndef _CUDA_FUNCTIONAL_ +#define _CUDA_FUNCTIONAL_ + +#include +#include +#include + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA +namespace __detail +{ + +template +class __return_type_wrapper { + private: + _DecayFn __fn_; + + public: + __return_type_wrapper() = delete; + + template , _DecayFn>::value>> + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 + explicit __return_type_wrapper(_Fn &&__fn) noexcept + : __fn_(_CUDA_VSTD::forward<_Fn>(__fn)) {} + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 + _Ret operator()(_As&&... __as) & noexcept { +#if !defined(__NVCC__) || defined(__CUDA_ARCH__) + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of<_DecayFn&, _As...>::type + >::value, + "Return type shall match the proclaimed one exactly"); +#endif + + return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 + _Ret operator()(_As&&... __as) && noexcept { +#if !defined(__NVCC__) || defined(__CUDA_ARCH__) + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of<_DecayFn, _As...>::type + >::value, + "Return type shall match the proclaimed one exactly"); +#endif + + return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), + _CUDA_VSTD::forward<_As>(__as)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 + _Ret operator()(_As&&... __as) const& noexcept { +#if !defined(__NVCC__) || defined(__CUDA_ARCH__) + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of::type + >::value, + "Return type shall match the proclaimed one exactly"); +#endif + + return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 + _Ret operator()(_As&&... __as) const&& noexcept { +#if !defined(__NVCC__) || defined(__CUDA_ARCH__) + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of::type + >::value, + "Return type shall match the proclaimed one exactly"); +#endif + + return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), + _CUDA_VSTD::forward<_As>(__as)...); + } +}; + +} // __detail + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>> +proclaim_return_type(_Fn&& __fn) noexcept { + return __detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>>( + _CUDA_VSTD::forward<_Fn>(__fn)); +} +_LIBCUDACXX_END_NAMESPACE_CUDA + +#endif // _CUDA_FUNCTIONAL_ + diff --git a/miniCUDA124/include/cuda/latch b/miniCUDA124/include/cuda/latch new file mode 100644 index 0000000000000000000000000000000000000000..e78c1287505173c76dee9ae1e8012473f77a3807 --- /dev/null +++ b/miniCUDA124/include/cuda/latch @@ -0,0 +1,16 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_LATCH +#define _CUDA_LATCH + +#include "std/latch" + +#endif // _CUDA_LATCH diff --git a/miniCUDA124/include/cuda/memory_resource b/miniCUDA124/include/cuda/memory_resource new file mode 100644 index 0000000000000000000000000000000000000000..a95ab201aa59a92a9e520734b58a07d7ef1c6696 --- /dev/null +++ b/miniCUDA124/include/cuda/memory_resource @@ -0,0 +1,632 @@ +//===----------------------------------------------------------------------===// +// +// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_MEMORY_RESOURCE +#define _CUDA_MEMORY_RESOURCE + +// clang-format off +/* + memory_resource synopsis +namespace cuda { +namespace mr { +template +concept resource = equality_comparable + && requires(Resource& res, void* ptr, size_t size, size_t alignment) { + { res.allocate(size, alignment) } -> same_as; + { res.deallocate(ptr, size, alignment) } -> same_as; +}; + +template +concept async_resource = resource + && requires(Resource& res, void* ptr, size_t size, size_t alignment, cuda_stream_ref stream) { + { res.allocate_async(size, alignment, stream) } -> same_as; + { res.deallocate_async(ptr, size, alignment, stream) } -> same_as; +}; + +template +concept has_property = resource && requires(const Resource& res, Property prop) { + get_property(res, prop); +}; + +template +concept property_with_value = requires { + typename Property::value_type; +}; + +template +concept has_property_with = resource + && property_with_value + && same_as + && requires(const Resource& res, Property prop) { + get_property(res, prop) -> Return; +}; + +template +concept resource_with = resource && (has_property && ...); + +template +concept async_resource_with = async_resource && (has_property && ...); + +template +class resource_ref { + template Resource> + resource_ref(Resource&) noexcept; + + void* allocate(size_t size, size_t alignment); + void deallocate(void* ptr, size_t size, size_t alignment); + + template + requires resource_with + && resource_with, Properties...> + friend bool operator==(const resource_ref& left, const resource_ref& right); + + template + requires has_property + friend typename Property::value_type get_property(const resource_ref& ref, Property) noexcept; + + template + requires (has_property && !property_with_value) + friend void get_property(const resource_ref& ref, Property) noexcept; +}; + +} // mr +} // cuda +*/ +// clang-format on + +#ifdef LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE + +#include + +#include +#include + +#include + +#include + +#if _LIBCUDACXX_STD_VER > 11 +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +/////////////////////////////////////////////////////////////////////////////// +// properties + +/// \concept has_property +/// \brief The \c has_property concept +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __has_property_, + requires(const _Resource& __res)( + get_property(__res, _Property{}) + )); +template +_LIBCUDACXX_CONCEPT has_property = _LIBCUDACXX_FRAGMENT(__has_property_, _Resource, _Property); + +/// \concept property_with_value +/// \brief The \c property_with_value concept +template +using __property_value_t = typename _Property::value_type; + +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __property_with_value_, + requires()( + typename(__property_value_t<_Property>) + )); +template +_LIBCUDACXX_CONCEPT property_with_value = _LIBCUDACXX_FRAGMENT(__property_with_value_, _Property); + +/// \concept has_property_with +/// \brief The \c has_property_with concept +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __has_property_with_, + requires(const _Resource& __res)( + requires(property_with_value<_Property>), + requires(_CUDA_VSTD::same_as<_Return, decltype(get_property(__res, _Property{}))>) + )); +template +_LIBCUDACXX_CONCEPT has_property_with = _LIBCUDACXX_FRAGMENT(__has_property_with_, _Resource, _Property, _Return); + +/// \concept __has_upstream_resource +/// \brief The \c __has_upstream_resource concept +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __has_upstream_resource_, + requires(const _Resource& __res)( + requires(_CUDA_VSTD::same_as<_CUDA_VSTD::__remove_const_ref_t, _Upstream>) + )); +template +_LIBCUDACXX_CONCEPT __has_upstream_resource = _LIBCUDACXX_FRAGMENT(__has_upstream_resource_, _Resource, _Upstream); + +/// class forward_property +/// \brief The \c forward_property crtp template simplifies the user facing side of forwarding properties +/// We can just derive from it to properly forward all properties +_LIBCUDACXX_BEGIN_NAMESPACE_CPO(__forward_property) +template +struct __fn { + _LIBCUDACXX_DISABLE_EXEC_CHECK + _LIBCUDACXX_TEMPLATE(class _Property) + _LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>) + _LIBCUDACXX_INLINE_VISIBILITY friend constexpr void get_property(const _Derived&, _Property) noexcept {} + + // The indirection is needed, otherwise the compiler might believe that _Derived is an incomplete type + _LIBCUDACXX_DISABLE_EXEC_CHECK + _LIBCUDACXX_TEMPLATE(class _Property, class _Derived2 = _Derived) + _LIBCUDACXX_REQUIRES( property_with_value<_Property> _LIBCUDACXX_AND has_property<_Upstream, _Property> _LIBCUDACXX_AND + __has_upstream_resource<_Derived2, _Upstream>) + _LIBCUDACXX_INLINE_VISIBILITY friend constexpr __property_value_t<_Property> get_property( + const _Derived& __res, _Property __prop) { + return get_property(__res.upstream_resource(), __prop); + } +}; +_LIBCUDACXX_END_NAMESPACE_CPO + +template +using forward_property = __forward_property::__fn<_Derived, _Upstream>; + +/// class get_property +/// \brief The \c get_property crtp temaplate simplifies the user facing side of forwarding properties +/// We can always tell people to just derive from it to properly forward all properties +_LIBCUDACXX_BEGIN_NAMESPACE_CPO(__get_property) +struct __fn { + _LIBCUDACXX_DISABLE_EXEC_CHECK + _LIBCUDACXX_TEMPLATE(class _Upstream, class _Property) + _LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr void operator()(const _Upstream&, _Property) const noexcept {} + + _LIBCUDACXX_DISABLE_EXEC_CHECK + _LIBCUDACXX_TEMPLATE(class _Upstream, class _Property) + _LIBCUDACXX_REQUIRES( (property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr __property_value_t<_Property> operator()( + const _Upstream& __res, _Property __prop) const { + return get_property(__res, __prop); + } +}; +_LIBCUDACXX_END_NAMESPACE_CPO + +inline namespace __cpo { + _LIBCUDACXX_CPO_ACCESSIBILITY auto get_property = __get_property::__fn{}; +} // namespace __cpo + +namespace mr +{ + +/////////////////////////////////////////////////////////////////////////////// +// memory_resource + +/// \concept resource +/// \brief The \c resource concept +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __resource_, + requires(_Resource& __res, void* __ptr, size_t __bytes, size_t __alignment) ( + requires(_CUDA_VSTD::same_as), + requires(_CUDA_VSTD::same_as), + requires(_CUDA_VSTD::equality_comparable<_Resource>) + )); + +template +_LIBCUDACXX_CONCEPT resource = _LIBCUDACXX_FRAGMENT(__resource_, _Resource); + +/// \concept async_resource +/// \brief The \c async_resource concept +template +_LIBCUDACXX_CONCEPT_FRAGMENT( + __async_resource_, + requires(_Resource& __res, void* __ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) ( + requires(resource<_Resource>), + requires(_CUDA_VSTD::same_as), + requires(_CUDA_VSTD::same_as), + requires(_CUDA_VSTD::equality_comparable<_Resource>) + )); + +template +_LIBCUDACXX_CONCEPT async_resource = _LIBCUDACXX_FRAGMENT(__async_resource_, _Resource); + +/// \concept resource_with +/// \brief The \c resource_with concept +template +#if _LIBCUDACXX_STD_VER < 17 +_LIBCUDACXX_CONCEPT resource_with = + resource<_Resource>&& _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant>...>; +#else +_LIBCUDACXX_CONCEPT resource_with = resource<_Resource> && (has_property<_Resource, _Properties> && ...); +#endif + +/// \concept async_resource_with +/// \brief The \c async_resource_with concept +template +#if _LIBCUDACXX_STD_VER < 17 +_LIBCUDACXX_CONCEPT async_resource_with = async_resource<_Resource> && + _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant>...>; +#else +_LIBCUDACXX_CONCEPT async_resource_with = async_resource<_Resource> && + (has_property<_Resource, _Properties> && ...); +#endif + +/////////////////////////////////////////////////////////////////////////////// +/// class resource_ref +/// class async_resource_ref +enum class _AllocType +{ + _Default, + _Async, +}; + +struct _Alloc_vtable +{ + using _AllocFn = void* (*)(void*, size_t, size_t); + using _DeallocFn = void (*)(void*, void*, size_t, size_t); + using _EqualFn = bool (*)(void*, void*); + + _AllocFn __alloc_fn; + _DeallocFn __dealloc_fn; + _EqualFn __equal_fn; + + constexpr _Alloc_vtable(_AllocFn __alloc_fn_, _DeallocFn __dealloc_fn_, _EqualFn __equal_fn_) noexcept + : __alloc_fn(__alloc_fn_) + , __dealloc_fn(__dealloc_fn_) + , __equal_fn(__equal_fn_) + {} +}; + +struct _Async_alloc_vtable : public _Alloc_vtable +{ + using _AsyncAllocFn = void* (*)(void*, size_t, size_t, cuda::stream_ref); + using _AsyncDeallocFn = void (*)(void*, void*, size_t, size_t, cuda::stream_ref); + + _AsyncAllocFn __async_alloc_fn; + _AsyncDeallocFn __async_dealloc_fn; + + constexpr _Async_alloc_vtable(_Alloc_vtable::_AllocFn __alloc_fn_, + _Alloc_vtable::_DeallocFn __dealloc_fn_, + _Alloc_vtable::_EqualFn __equal_fn_, + _AsyncAllocFn __async_alloc_fn_, + _AsyncDeallocFn __async_dealloc_fn_) noexcept + : _Alloc_vtable(__alloc_fn_, __dealloc_fn_, __equal_fn_) + , __async_alloc_fn(__async_alloc_fn_) + , __async_dealloc_fn(__async_dealloc_fn_) + {} +}; + +// clang-format off +struct _Resource_vtable_builder +{ + template + static __property_value_t<_Property> _Get_property(void* __res) noexcept { + return get_property(*static_cast(__res), _Property{}); + } + + template + static void* _Alloc(void* __object, size_t __bytes, size_t __alignment) { + return static_cast<_Resource *>(__object)->allocate(__bytes, __alignment); + } + + template + static void _Dealloc(void* __object, void* __ptr, size_t __bytes, size_t __alignment) { + return static_cast<_Resource *>(__object)->deallocate(__ptr, __bytes, __alignment); + } + + template + static void* _Alloc_async(void* __object, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) { + return static_cast<_Resource *>(__object)->allocate_async(__bytes, __alignment, __stream); + } + + template + static void _Dealloc_async(void* __object, void* __ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) { + return static_cast<_Resource *>(__object)->deallocate_async(__ptr, __bytes, __alignment, __stream); + } + + template + static bool _Equal(void* __left, void* __right) { + return *static_cast<_Resource *>(__left) == *static_cast<_Resource *>(__right); + } + + _LIBCUDACXX_TEMPLATE(class _Resource, _AllocType _Alloc_type) + _LIBCUDACXX_REQUIRES((_Alloc_type == _AllocType::_Default)) // + static constexpr _Alloc_vtable _Create() noexcept + { + return {&_Resource_vtable_builder::_Alloc<_Resource>, + &_Resource_vtable_builder::_Dealloc<_Resource>, + &_Resource_vtable_builder::_Equal<_Resource>}; + } + + _LIBCUDACXX_TEMPLATE(class _Resource, _AllocType _Alloc_type) + _LIBCUDACXX_REQUIRES((_Alloc_type == _AllocType::_Async)) // + static constexpr _Async_alloc_vtable _Create() noexcept + { + return {&_Resource_vtable_builder::_Alloc<_Resource>, + &_Resource_vtable_builder::_Dealloc<_Resource>, + &_Resource_vtable_builder::_Equal<_Resource>, + &_Resource_vtable_builder::_Alloc_async<_Resource>, + &_Resource_vtable_builder::_Dealloc_async<_Resource>}; + } +}; +// clang-format on + +template +struct _Property_vtable +{ + using _PropertyFn = __property_value_t<_Property> (*)(void*); + _PropertyFn __property_fn = nullptr; + + constexpr _Property_vtable(_PropertyFn __property_fn_) noexcept + : __property_fn(__property_fn_) + {} +}; + +template <_AllocType _Alloc_type, class... _Properties> // +class basic_resource_ref; + +template +struct _Resource_vtable : public _Property_vtable<_Properties>... +{ + template + constexpr _Resource_vtable(_PropertyFns... __property_fn_) noexcept + : _Property_vtable<_Properties>(__property_fn_)... + {} + + template <_AllocType _Alloc_type, class... _OtherProperties> + constexpr _Resource_vtable(basic_resource_ref<_Alloc_type, _OtherProperties...>& __ref) noexcept + : _Property_vtable<_Properties>(__ref._Property_vtable<_Properties>::__property_fn)... + {} + + template + static constexpr _Resource_vtable _Create() noexcept + { + return {&_Resource_vtable_builder::_Get_property<_Resource, _Properties>...}; + } +}; + +template +struct _Filtered; + +template +struct _Property_filter +{ + template + using _Filtered_properties = + typename _Filtered<_Properties...>::_Filtered_vtable::template _Append_property<_Property>; +}; +template <> +struct _Property_filter +{ + template + using _Filtered_properties = typename _Filtered<_Properties...>::_Filtered_vtable; +}; + +template +struct _Filtered<_Property, _Properties...> +{ + using _Filtered_vtable = + typename _Property_filter && !_CUDA_VSTD::_One_of<_Property, _Properties...>>:: + template _Filtered_properties<_Property, _Properties...>; + + template + using _Append_property = _Filtered<_OtherPropery, _Property, _Properties...>; + + using _Vtable = _Resource_vtable<_Property, _Properties...>; +}; + +template <> +struct _Filtered<> +{ + using _Filtered_vtable = _Filtered<>; + + template + using _Append_property = _Filtered<_OtherPropery>; + + using _Vtable = _Resource_vtable<>; +}; + +template +using _Filtered_vtable = typename _Filtered<_Properties...>::_Filtered_vtable::_Vtable; + +template +struct _Alloc_base +{ + static_assert(_CUDA_VSTD::is_base_of_v<_Alloc_vtable, _Vtable>, ""); + + _Alloc_base(void* __object_, const _Vtable* __static_vtabl_) noexcept + : __object(__object_) + , __static_vtable(__static_vtabl_) + {} + + void* allocate(size_t __bytes, size_t __alignment = alignof(max_align_t)) + { + return __static_vtable->__alloc_fn(__object, __bytes, __alignment); + } + + void deallocate(void* _Ptr, size_t __bytes, size_t __alignment = alignof(max_align_t)) + { + __static_vtable->__dealloc_fn(__object, _Ptr, __bytes, __alignment); + } + +protected: + void* __object = nullptr; + const _Vtable* __static_vtable = nullptr; +}; + +template +struct _Async_alloc_base : public _Alloc_base<_Vtable> +{ + static_assert(_CUDA_VSTD::is_base_of_v<_Async_alloc_vtable, _Vtable>, ""); + + _Async_alloc_base(void* __object_, const _Vtable* __static_vtabl_) noexcept + : _Alloc_base<_Vtable>(__object_, __static_vtabl_) + {} + + void* allocate_async(size_t __bytes, size_t __alignment, cuda::stream_ref __stream) + { + return this->__static_vtable->__async_alloc_fn(this->__object, __bytes, __alignment, __stream); + } + + void* allocate_async(size_t __bytes, cuda::stream_ref __stream) + { + return this->__static_vtable->__async_alloc_fn(this->__object, __bytes, alignof(max_align_t), __stream); + } + + void deallocate_async(void* _Ptr, size_t __bytes, cuda::stream_ref __stream) + { + this->__static_vtable->__async_dealloc_fn(this->__object, _Ptr, __bytes, alignof(max_align_t), __stream); + } + + void deallocate_async(void* _Ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) + { + this->__static_vtable->__async_dealloc_fn(this->__object, _Ptr, __bytes, __alignment, __stream); + } +}; + +template <_AllocType _Alloc_type> +using _Resource_ref_base = _CUDA_VSTD:: + _If<_Alloc_type == _AllocType::_Default, _Alloc_base<_Alloc_vtable>, _Async_alloc_base<_Async_alloc_vtable>>; + +template <_AllocType _Alloc_type> +using _Vtable_store = _CUDA_VSTD::_If<_Alloc_type == _AllocType::_Default, _Alloc_vtable, _Async_alloc_vtable>; + +template <_AllocType _Alloc_type, class _Resource> +_LIBCUDACXX_INLINE_VAR constexpr _Vtable_store<_Alloc_type> + __alloc_vtable = _Resource_vtable_builder::template _Create<_Resource, _Alloc_type>(); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool _Is_basic_resource_ref = false; + +template <_AllocType _Alloc_type, class... _Properties> // +class basic_resource_ref + : public _Resource_ref_base<_Alloc_type> + , private _Filtered_vtable<_Properties...> +{ +private: + template <_AllocType, class...> + friend class basic_resource_ref; + + template + friend struct _Resource_vtable; + +public: + // clang-format off + _LIBCUDACXX_TEMPLATE(class _Resource) + _LIBCUDACXX_REQUIRES( (!_Is_basic_resource_ref<_Resource> + && (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) // + ||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) // + basic_resource_ref(_Resource& __res) noexcept + : _Resource_ref_base<_Alloc_type>(_CUDA_VSTD::addressof(__res), &__alloc_vtable<_Alloc_type, _Resource>) + , _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>()) + {} + + _LIBCUDACXX_TEMPLATE(class _Resource) + _LIBCUDACXX_REQUIRES( (!_Is_basic_resource_ref<_Resource> + && (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) // + ||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) // + basic_resource_ref(_Resource* __res) noexcept + : _Resource_ref_base<_Alloc_type>(__res, &__alloc_vtable<_Alloc_type, _Resource>) + , _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>()) + {} + + #if _LIBCUDACXX_STD_VER > 14 + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...)) + #else + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant< + _CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>) + #endif + basic_resource_ref( + basic_resource_ref<_Alloc_type, _OtherProperties...> __ref) noexcept + : _Resource_ref_base<_Alloc_type>(__ref.__object, __ref.__static_vtable) + , _Filtered_vtable<_Properties...>(__ref) + {} + + #if _LIBCUDACXX_STD_VER > 14 + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (_Alloc_type == _AllocType::_Default) + && (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...)) + #else + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (_Alloc_type == _AllocType::_Default) + && _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant< + _CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>) + #endif + basic_resource_ref( + basic_resource_ref<_AllocType::_Async, _OtherProperties...> __ref) noexcept + : _Resource_ref_base<_Alloc_type>(__ref.__object, __ref.__static_vtable) + , _Filtered_vtable<_Properties...>(__ref) + {} + + #if _LIBCUDACXX_STD_VER > 14 + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES((sizeof...(_Properties) == sizeof...(_OtherProperties)) + && (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...)) + #else + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties)) + && _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant< + _CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>) + #endif + bool operator==( + const basic_resource_ref<_Alloc_type, _OtherProperties...> &__right) const { + return (this->__static_vtable->__equal_fn == __right.__static_vtable->__equal_fn) // + && this->__static_vtable->__equal_fn(this->__object, __right.__object); + } + + #if _LIBCUDACXX_STD_VER > 14 + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties)) + && (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...)) + #else + _LIBCUDACXX_TEMPLATE(class... _OtherProperties) + _LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties)) + && _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant< + _CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>) + #endif + bool operator!=( + const basic_resource_ref<_Alloc_type, _OtherProperties...> &__right) const { + return !(*this == __right); + } + + _LIBCUDACXX_TEMPLATE(class _Property) + _LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND _CUDA_VSTD::_One_of<_Property, _Properties...>) // + friend void get_property(const basic_resource_ref &, _Property) noexcept {} + + _LIBCUDACXX_TEMPLATE(class _Property) + _LIBCUDACXX_REQUIRES( property_with_value<_Property> _LIBCUDACXX_AND _CUDA_VSTD::_One_of<_Property, _Properties...>) // + friend __property_value_t<_Property> get_property( + const basic_resource_ref &__res, _Property) noexcept { + return __res._Property_vtable<_Property>::__property_fn(__res.__object); + } + // clang-format on +}; + +template <_AllocType _Alloc_type, class... _Properties> +_LIBCUDACXX_INLINE_VAR constexpr bool _Is_basic_resource_ref> = true; + +template // +using resource_ref = basic_resource_ref<_AllocType::_Default, _Properties...>; + +template // +using async_resource_ref = basic_resource_ref<_AllocType::_Async, _Properties...>; + +/// \struct device_accessible +/// \brief The \c device_accessible property signals that the allocated memory is device accessible +struct device_accessible{}; + +/// \struct host_accessible +/// \brief The \c host_accessible property signals that the allocated memory is host accessible +struct host_accessible{}; + +} // namespace mr +_LIBCUDACXX_END_NAMESPACE_CUDA +#endif // _LIBCUDACXX_STD_VER > 11 + +#include + +#endif // LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE + +#endif //_LIBCUDACXX_BEGIN_NAMESPACE_CUDA diff --git a/miniCUDA124/include/cuda/pipeline b/miniCUDA124/include/cuda/pipeline new file mode 100644 index 0000000000000000000000000000000000000000..b00b65e72d30b7bf4c8cb44e8f73bef18fef8dc8 --- /dev/null +++ b/miniCUDA124/include/cuda/pipeline @@ -0,0 +1,585 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ +#ifndef _CUDA_PIPELINE +#define _CUDA_PIPELINE + +#include "barrier" +#include "atomic" +#include "std/chrono" + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + // Forward declaration in barrier of pipeline + enum class pipeline_role { + producer, + consumer + }; + + template + struct __pipeline_stage { + barrier<_Scope> __produced; + barrier<_Scope> __consumed; + }; + + template + class pipeline_shared_state { + public: + pipeline_shared_state() = default; + pipeline_shared_state(const pipeline_shared_state &) = delete; + pipeline_shared_state(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(const pipeline_shared_state &) = delete; + + private: + __pipeline_stage<_Scope> __stages[_Stages_count]; + atomic __refcount; + + template + friend class pipeline; + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + struct __pipeline_asm_helper { + _LIBCUDACXX_DEVICE + static inline uint32_t __lane_id() + { + NV_IF_ELSE_TARGET( + NV_IS_DEVICE, + ( + uint32_t __lane_id; + asm volatile ("mov.u32 %0, %%laneid;" : "=r"(__lane_id)); + return __lane_id; + ), + ( + return 0; + ) + ) + } + }; + + template + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() + { + if (__active) { + (void)quit(); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + bool __elected; + uint32_t __sub_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(__shared_state_get_refcount())); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __sub_count = __popc(__match_mask); +, + __elected = true; + __sub_count = 1; +) + bool __released = false; + if (__elected) { + const uint32_t __old = __shared_state_get_refcount()->fetch_sub(__sub_count); + const bool __last = (__old == __sub_count); + if (__last) { + for (uint8_t __stage = 0; __stage < __stages_count; ++__stage) { + __shared_state_get_stage(__stage)->__produced.~barrier(); + __shared_state_get_stage(__stage)->__consumed.~barrier(); + } + __released = true; + } + } + __active = false; + return __released; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__consumed; + __stage_barrier.wait_parity(__consumed_phase_parity); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__produced; + (void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __stage_barrier); + (void)__stage_barrier.arrive(); + if (++__head == __stages_count) { + __head = 0; + __consumed_phase_parity = !__consumed_phase_parity; + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + __stage_barrier.wait_parity(__produced_phase_parity); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() + { + (void)__shared_state_get_stage(__tail)->__consumed.arrive(); + if (++__tail == __stages_count) { + __tail = 0; + __produced_phase_parity = !__produced_phase_parity; + } + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + return _CUDA_VSTD::__libcpp_thread_poll_with_backoff( + _CUDA_VSTD::__barrier_poll_tester_parity>( + &__stage_barrier, + __produced_phase_parity), + _CUDA_VSTD::chrono::duration_cast<_CUDA_VSTD::chrono::nanoseconds>(__duration) + ); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + return consumer_wait_for(__time_point - _Clock::now()); + } + + private: + uint8_t __head : 8; + uint8_t __tail : 8; + const uint8_t __stages_count : 8; + bool __consumed_phase_parity : 1; + bool __produced_phase_parity : 1; + bool __active : 1; + // TODO: Remove partitioned on next ABI break + const bool __partitioned : 1; + char * const __shared_state; + + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline(char * __shared_state, uint8_t __stages_count, bool __partitioned) + : __head(0) + , __tail(0) + , __stages_count(__stages_count) + , __consumed_phase_parity(true) + , __produced_phase_parity(false) + , __active(true) + , __partitioned(__partitioned) + , __shared_state(__shared_state) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + __pipeline_stage<_Scope> * __shared_state_get_stage(uint8_t __stage) + { + ptrdiff_t __stage_offset = __stage * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast<__pipeline_stage<_Scope>*>(__shared_state + __stage_offset); + } + + _LIBCUDACXX_INLINE_VISIBILITY + atomic * __shared_state_get_refcount() + { + ptrdiff_t __refcount_offset = __stages_count * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast*>(__shared_state + __refcount_offset); + } + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __group_size); + init(&__shared_state->__stages[__stage].__produced, __group_size); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, false); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, size_t __producer_count) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + const size_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, pipeline_role __role) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + __shared_state->__refcount.store(0, std::memory_order_relaxed); + } + __group.sync(); + + if (__role == pipeline_role::producer) { + bool __elected; + uint32_t __add_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(&__shared_state->__refcount)); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __add_count = __popc(__match_mask); +, + __elected = true; + __add_count = 1; +) + if (__elected) { + (void)__shared_state->__refcount.fetch_add(__add_count, std::memory_order_relaxed); + } + } + __group.sync(); + + if (__thread_rank == 0) { + const uint32_t __producer_count = __shared_state->__refcount.load(std::memory_order_relaxed); + const uint32_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); + } + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + + template + _LIBCUDACXX_DEVICE + void __pipeline_consumer_wait(pipeline & __pipeline); + + _LIBCUDACXX_DEVICE + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior); + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + template<> + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + return true; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() {} + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("cp.async.commit_group;"); + ++__head; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + if (__head == __tail) { + return; + } + + const uint8_t __prior = __head - __tail - 1; + device::__pipeline_consumer_wait(*this, __prior); + ++__tail; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + (void)__duration; + consumer_wait(); + return true; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + (void)__time_point; + consumer_wait(); + return true; + } + + private: + uint8_t __head; + uint8_t __tail; + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline() + : __head(0) + , __tail(0) + {} + + friend _LIBCUDACXX_INLINE_VISIBILITY inline pipeline make_pipeline(); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> __make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + }; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + + template + _LIBCUDACXX_DEVICE + void __pipeline_consumer_wait(pipeline & __pipeline) + { + (void)__pipeline; +NV_IF_TARGET(NV_PROVIDES_SM_80, + constexpr uint8_t __max_prior = 8; + + asm volatile ("cp.async.wait_group %0;" + : + : "n"(_Prior < __max_prior ? _Prior : __max_prior)); +) + } + + _LIBCUDACXX_DEVICE + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior) + { + switch (__prior) { + case 0: device::__pipeline_consumer_wait<0>(__pipeline); break; + case 1: device::__pipeline_consumer_wait<1>(__pipeline); break; + case 2: device::__pipeline_consumer_wait<2>(__pipeline); break; + case 3: device::__pipeline_consumer_wait<3>(__pipeline); break; + case 4: device::__pipeline_consumer_wait<4>(__pipeline); break; + case 5: device::__pipeline_consumer_wait<5>(__pipeline); break; + case 6: device::__pipeline_consumer_wait<6>(__pipeline); break; + case 7: device::__pipeline_consumer_wait<7>(__pipeline); break; + default: device::__pipeline_consumer_wait<8>(__pipeline); break; + } + } + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + _LIBCUDACXX_INLINE_VISIBILITY + inline pipeline make_pipeline() + { + return pipeline(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline) + { + NV_IF_TARGET(NV_PROVIDES_SM_80, + device::__pipeline_consumer_wait<_Prior>(__pipeline); + __pipeline.__tail = __pipeline.__head - _Prior; + ) + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_producer_commit(pipeline & __pipeline, barrier<_Scope> & __barrier) + { + (void)__pipeline; + NV_IF_TARGET(NV_PROVIDES_SM_80,( + (void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __barrier); + )); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment __memcpy_async_pipeline(_Group const & __group, _Tp * __destination, _Tp const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + // 1. Set the completion mechanisms that can be used. + // + // Do not (yet) allow async_bulk_group completion. Do not allow + // mbarrier_complete_tx completion, even though it may be possible if + // the pipeline has stage barriers in shared memory. + _CUDA_VSTD::uint32_t __allowed_completions = _CUDA_VSTD::uint32_t(__completion_mechanism::__async_group); + + // Alignment: Use the maximum of the alignment of _Tp and that of a possible cuda::aligned_size_t. + constexpr _CUDA_VSTD::size_t __size_align = __get_size_align<_Size>::align; + constexpr _CUDA_VSTD::size_t __align = (alignof(_Tp) < __size_align) ? __size_align : alignof(_Tp); + // Cast to char pointers. We don't need the type for alignment anymore and + // erasing the types reduces the number of instantiations of down-stream + // functions. + char * __dest_char = reinterpret_cast(__destination); + char const * __src_char = reinterpret_cast(__source); + + // 2. Issue actual copy instructions. + auto __cm = __dispatch_memcpy_async<__align>(__group, __dest_char, __src_char, __size, __allowed_completions); + + // 3. No need to synchronize with copy instructions. + return __memcpy_completion_impl::__defer(__cm, __group, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); + } + + template _Alignment) ? alignof(_Type) : _Alignment> + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Type * __destination, _Type const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__single_thread_group{}, __destination, __source, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(void * __destination, void const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__single_thread_group{}, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#endif //_CUDA_PIPELINE diff --git a/miniCUDA124/include/cuda/ptx b/miniCUDA124/include/cuda/ptx new file mode 100644 index 0000000000000000000000000000000000000000..ab6ed62d9d288424c22e4b98f241582defc45068 --- /dev/null +++ b/miniCUDA124/include/cuda/ptx @@ -0,0 +1,23 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_PTX +#define _CUDA_PTX + +#include "std/detail/__config" + +#include "std/detail/__pragma_push" + +#include "std/detail/libcxx/include/__cuda/ptx.h" + +#include "std/detail/__pragma_pop" + +#endif // _CUDA_PTX diff --git a/miniCUDA124/include/cuda/semaphore b/miniCUDA124/include/cuda/semaphore new file mode 100644 index 0000000000000000000000000000000000000000..e2d06d15c3198d0f7f63727856bea84ad463c9f4 --- /dev/null +++ b/miniCUDA124/include/cuda/semaphore @@ -0,0 +1,16 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_SEMAPHORE +#define _CUDA_SEMAPHORE + +#include "std/semaphore" + +#endif // _CUDA_SEMAPHORE diff --git a/miniCUDA124/include/cuda/stream_ref b/miniCUDA124/include/cuda/stream_ref new file mode 100644 index 0000000000000000000000000000000000000000..b2a83c463a943701233ab472a78fff7456ff16b3 --- /dev/null +++ b/miniCUDA124/include/cuda/stream_ref @@ -0,0 +1,191 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_STREAM_REF +#define _CUDA_STREAM_REF + +// clang-format off +/* + stream_ref synopsis +namespace cuda { +class stream_ref { + using value_type = cudaStream_t; + + stream_ref() = default; + stream_ref(cudaStream_t stream_) noexcept : stream(stream_) {} + + stream_ref(int) = delete; + stream_ref(nullptr_t) = delete; + + [[nodiscard]] value_type get() const noexcept; + + void wait() const; + + [[nodiscard]] bool ready() const; + + [[nodiscard]] friend bool operator==(stream_ref, stream_ref); + [[nodiscard]] friend bool operator!=(stream_ref, stream_ref); + +private: + cudaStream_t stream = 0; // exposition only +}; +} // cuda +*/ + +#ifdef LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE + +#include // cuda_runtime_api needs to come first +// clang-format on + +#include + +#include + +#include +#include + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +/** + * \brief A non-owning wrapper for a `cudaStream_t`. + * + * `stream_view` is a non-owning "view" type similar to `std::span` or + * `std::string_view`. \see https://en.cppreference.com/w/cpp/container/span and + * \see https://en.cppreference.com/w/cpp/string/basic_string_view + * + */ +class stream_ref +{ +private: + ::cudaStream_t __stream{0}; + +public: + using value_type = ::cudaStream_t; + + /** + * \brief Constructs a `stream_view` of the "default" CUDA stream. + * + * For behavior of the default stream, + * \see + * https://docs.nvidia.com/cuda/cuda-runtime-api/stream-sync-behavior.html + * + */ + stream_ref() = default; + + /** + * \brief Constructs a `stream_view` from a `cudaStream_t` handle. + * + * This constructor provides implicit conversion from `cudaStream_t`. + * + * \note: It is the callers responsibilty to ensure the `stream_view` does not + * outlive the stream identified by the `cudaStream_t` handle. + * + */ + constexpr stream_ref(value_type __stream_) noexcept + : __stream{__stream_} + {} + + /// Disallow construction from an `int`, e.g., `0`. + stream_ref(int) = delete; + + /// Disallow construction from `nullptr`. + stream_ref(_CUDA_VSTD::nullptr_t) = delete; + + /** + * \brief Compares two `stream_view`s for equality + * + * \note Allows comparison with `cudaStream_t` due to implicit conversion to + * `stream_view`. + * + * \param lhs The first `stream_view` to compare + * \param rhs The second `stream_view` to compare + * \return true if equal, false if unequal + */ + _LIBCUDACXX_NODISCARD_FRIEND constexpr bool operator==(const stream_ref& __lhs, const stream_ref& __rhs) noexcept + { + return __lhs.__stream == __rhs.__stream; + } + + /** + * \brief Compares two `stream_view`s for inequality + * + * \note Allows comparison with `cudaStream_t` due to implicit conversion to + * `stream_view`. + * + * \param lhs The first `stream_view` to compare + * \param rhs The second `stream_view` to compare + * \return true if unequal, false if equal + */ + _LIBCUDACXX_NODISCARD_FRIEND constexpr bool operator!=(const stream_ref& __lhs, const stream_ref& __rhs) noexcept + { + return __lhs.__stream != __rhs.__stream; + } + + /// Returns the wrapped `cudaStream_t` handle. + _LIBCUDACXX_NODISCARD_ATTRIBUTE constexpr value_type get() const noexcept { return __stream; } + + /** + * \brief Synchronizes the wrapped stream. + * + * \throws cuda::cuda_error if synchronization fails. + * + */ + void wait() const + { + const auto __result = ::cudaStreamSynchronize(get()); + switch (__result) + { + case ::cudaSuccess: + return; + default: + ::cudaGetLastError(); // Clear CUDA error state +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + throw cuda::cuda_error{__result, "Failed to synchronize stream."}; +#else + _LIBCUDACXX_UNREACHABLE(); +#endif + } + } + + /** + * \brief Queries if all operations on the wrapped stream have completed. + * + * \throws cuda::cuda_error if the query fails. + * + * \return `true` if all operations have completed, or `false` if not. + */ + _LIBCUDACXX_NODISCARD_ATTRIBUTE bool ready() const + { + const auto __result = ::cudaStreamQuery(get()); + switch (__result) + { + case ::cudaSuccess: + return true; + case ::cudaErrorNotReady: + return false; + default: + ::cudaGetLastError(); // Clear CUDA error state +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + throw cuda::cuda_error{__result, ""}; +#else + // _LIBCUDACXX_UNREACHABLE(); +#endif + return false; + } + } +}; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#include + +#endif // LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE + +#endif //_CUDA_STREAM_REF diff --git a/miniCUDA124/include/cuda/version b/miniCUDA124/include/cuda/version new file mode 100644 index 0000000000000000000000000000000000000000..cd5367497804f582d6bca27781ab6d3f3f68eaf2 --- /dev/null +++ b/miniCUDA124/include/cuda/version @@ -0,0 +1,16 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_VERSION +#define _CUDA_VERSION + +#include "std/version" + +#endif // _CUDA_VERSION diff --git a/miniCUDA124/include/nv/target b/miniCUDA124/include/nv/target new file mode 100644 index 0000000000000000000000000000000000000000..f9e7d48c16f7bdbc9423f21961731a4eb43fa415 --- /dev/null +++ b/miniCUDA124/include/nv/target @@ -0,0 +1,203 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// This header contains a preview of a portability system that enables +// CUDA C++ development with NVC++, NVCC, and supported host compilers. +// These interfaces are not guaranteed to be stable. + +#ifndef __NV_TARGET_H +#define __NV_TARGET_H + +#if defined(__NVCC__) || defined(__CUDACC_RTC__) +# define _NV_COMPILER_NVCC +#elif defined(__NVCOMPILER) && __cplusplus >= 201103L +# define _NV_COMPILER_NVCXX +#elif defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__) +// clang compiling CUDA code, device mode. +# define _NV_COMPILER_CLANG_CUDA +#endif + +#if (!defined(__ibmxl__)) && \ + ((defined(__cplusplus) && __cplusplus >= 201103L) || \ + (defined(_MSC_VER) && _MSVC_LANG >= 201103L)) +# define _NV_TARGET_CPP11 +#endif + + +// Hide `if target` support from NVRTC +#if defined(_NV_TARGET_CPP11) && !defined(__CUDACC_RTC__) + +#if defined(_NV_COMPILER_NVCXX) +# define _NV_BITSET_ATTRIBUTE [[nv::__target_bitset]] +#else +# define _NV_BITSET_ATTRIBUTE +#endif + +namespace nv { + namespace target { + namespace detail { + + typedef unsigned long long base_int_t; + + // No host specialization + constexpr base_int_t all_hosts = 1; + + // NVIDIA GPUs + constexpr base_int_t sm_35_bit = 1 << 1; + constexpr base_int_t sm_37_bit = 1 << 2; + constexpr base_int_t sm_50_bit = 1 << 3; + constexpr base_int_t sm_52_bit = 1 << 4; + constexpr base_int_t sm_53_bit = 1 << 5; + constexpr base_int_t sm_60_bit = 1 << 6; + constexpr base_int_t sm_61_bit = 1 << 7; + constexpr base_int_t sm_62_bit = 1 << 8; + constexpr base_int_t sm_70_bit = 1 << 9; + constexpr base_int_t sm_72_bit = 1 << 10; + constexpr base_int_t sm_75_bit = 1 << 11; + constexpr base_int_t sm_80_bit = 1 << 12; + constexpr base_int_t sm_86_bit = 1 << 13; + constexpr base_int_t sm_87_bit = 1 << 14; + constexpr base_int_t sm_89_bit = 1 << 15; + constexpr base_int_t sm_90_bit = 1 << 16; + constexpr base_int_t all_devices = + sm_35_bit | sm_37_bit | + sm_50_bit | sm_52_bit | sm_53_bit | + sm_60_bit | sm_61_bit | sm_62_bit | + sm_70_bit | sm_72_bit | sm_75_bit | + sm_80_bit | sm_86_bit | sm_87_bit | + sm_89_bit | sm_90_bit; + + // Store a set of targets as a set of bits + struct _NV_BITSET_ATTRIBUTE target_description { + base_int_t targets; + + constexpr target_description(base_int_t a) : targets(a) { } + }; + + // The type of the user-visible names of the NVIDIA GPU targets + enum class sm_selector : base_int_t { + sm_35 = 35, sm_37 = 37, + sm_50 = 50, sm_52 = 52, sm_53 = 53, + sm_60 = 60, sm_61 = 61, sm_62 = 62, + sm_70 = 70, sm_72 = 72, sm_75 = 75, + sm_80 = 80, sm_86 = 86, sm_87 = 87, + sm_89 = 89, sm_90 = 90, + }; + + constexpr base_int_t toint(sm_selector a) { + return static_cast(a); + } + + constexpr base_int_t bitexact(sm_selector a) { + return toint(a) == 35 ? sm_35_bit : + toint(a) == 37 ? sm_37_bit : + toint(a) == 50 ? sm_50_bit : + toint(a) == 52 ? sm_52_bit : + toint(a) == 53 ? sm_53_bit : + toint(a) == 60 ? sm_60_bit : + toint(a) == 61 ? sm_61_bit : + toint(a) == 62 ? sm_62_bit : + toint(a) == 70 ? sm_70_bit : + toint(a) == 72 ? sm_72_bit : + toint(a) == 75 ? sm_75_bit : + toint(a) == 80 ? sm_80_bit : + toint(a) == 86 ? sm_86_bit : + toint(a) == 87 ? sm_87_bit : + toint(a) == 89 ? sm_89_bit : + toint(a) == 90 ? sm_90_bit : 0; + } + + constexpr base_int_t bitrounddown(sm_selector a) { + return toint(a) >= 90 ? sm_90_bit : + toint(a) >= 89 ? sm_89_bit : + toint(a) >= 87 ? sm_87_bit : + toint(a) >= 86 ? sm_86_bit : + toint(a) >= 80 ? sm_80_bit : + toint(a) >= 75 ? sm_75_bit : + toint(a) >= 72 ? sm_72_bit : + toint(a) >= 70 ? sm_70_bit : + toint(a) >= 62 ? sm_62_bit : + toint(a) >= 61 ? sm_61_bit : + toint(a) >= 60 ? sm_60_bit : + toint(a) >= 53 ? sm_53_bit : + toint(a) >= 52 ? sm_52_bit : + toint(a) >= 50 ? sm_50_bit : + toint(a) >= 37 ? sm_37_bit : + toint(a) >= 35 ? sm_35_bit : 0; + } + + // Public API for NVIDIA GPUs + + constexpr target_description is_exactly(sm_selector a) { + return target_description(bitexact(a)); + } + + constexpr target_description provides(sm_selector a) { + return target_description(~(bitrounddown(a) - 1) & all_devices); + } + + // Boolean operations on target sets + + constexpr target_description operator&&(target_description a, + target_description b) { + return target_description(a.targets & b.targets); + } + + constexpr target_description operator||(target_description a, + target_description b) { + return target_description(a.targets | b.targets); + } + + constexpr target_description operator!(target_description a) { + return target_description(~a.targets & (all_devices | all_hosts)); + } + } + + using detail::target_description; + using detail::sm_selector; + + // The predicates for basic host/device selection + constexpr target_description is_host = + target_description(detail::all_hosts); + constexpr target_description is_device = + target_description(detail::all_devices); + constexpr target_description any_target = + target_description(detail::all_hosts | detail::all_devices); + constexpr target_description no_target = + target_description(0); + + // The public names for NVIDIA GPU architectures + constexpr sm_selector sm_35 = sm_selector::sm_35; + constexpr sm_selector sm_37 = sm_selector::sm_37; + constexpr sm_selector sm_50 = sm_selector::sm_50; + constexpr sm_selector sm_52 = sm_selector::sm_52; + constexpr sm_selector sm_53 = sm_selector::sm_53; + constexpr sm_selector sm_60 = sm_selector::sm_60; + constexpr sm_selector sm_61 = sm_selector::sm_61; + constexpr sm_selector sm_62 = sm_selector::sm_62; + constexpr sm_selector sm_70 = sm_selector::sm_70; + constexpr sm_selector sm_72 = sm_selector::sm_72; + constexpr sm_selector sm_75 = sm_selector::sm_75; + constexpr sm_selector sm_80 = sm_selector::sm_80; + constexpr sm_selector sm_86 = sm_selector::sm_86; + constexpr sm_selector sm_87 = sm_selector::sm_87; + constexpr sm_selector sm_89 = sm_selector::sm_89; + constexpr sm_selector sm_90 = sm_selector::sm_90; + + using detail::is_exactly; + using detail::provides; + } +} + +#endif // C++11 && !defined(__CUDACC_RTC__) + +#include "detail/__target_macros" + +#endif // __NV_TARGET_H diff --git a/miniCUDA124/include/nvtx3/nvToolsExt.h b/miniCUDA124/include/nvtx3/nvToolsExt.h new file mode 100644 index 0000000000000000000000000000000000000000..51afce9dbae0d3a0f2236184e02dd4fc2026b6d9 --- /dev/null +++ b/miniCUDA124/include/nvtx3/nvToolsExt.h @@ -0,0 +1,1499 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +/** \file nvToolsExt.h + */ + +/* ========================================================================= */ +/** \mainpage + * \tableofcontents + * \section INTRODUCTION Introduction + * + * The NVIDIA Tools Extension library is a set of functions that a + * developer can use to provide additional information to tools. + * The additional information is used by the tool to improve + * analysis and visualization of data. + * + * The library introduces close to zero overhead if no tool is + * attached to the application. The overhead when a tool is + * attached is specific to the tool. + * + * \section INITIALIZATION_SECTION Initialization + * + * Typically the tool's library that plugs into NVTX is indirectly + * loaded via enviromental properties that are platform specific. + * For some platform or special cases, the user may be required + * to instead explicity initialize instead though. This can also + * be helpful to control when the API loads a tool's library instead + * of what would typically be the first function call to emit info. + * For these rare case, see \ref INITIALIZATION for additional information. + * + * \section MARKERS_AND_RANGES Markers and Ranges + * + * Markers and ranges are used to describe events at a specific time (markers) + * or over a time span (ranges) during the execution of the application + * respectively. + * + * \subsection MARKERS Markers + * + * Markers denote specific moments in time. + * + * + * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on + * how to specify the domain. + * + * \subsection THREAD_RANGES Thread Ranges + * + * Thread ranges denote nested time ranges. Nesting is maintained per thread + * per domain and does not require any additional correlation mechanism. The + * duration of a thread range is defined by the corresponding pair of + * nvtxRangePush* to nvtxRangePop API calls. + * + * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on + * how to specify the domain. + * + * \subsection PROCESS_RANGES Process Ranges + * + * Process ranges denote a time span that can expose arbitrary concurrency, as + * opposed to thread ranges that only support nesting. In addition the range + * start event can happen on a different thread than the end marker. For the + * correlation of a start/end pair an unique correlation ID is used that is + * returned from the start API call and needs to be passed into the end API + * call. + * + * \subsection EVENT_ATTRIBUTES Event Attributes + * + * \ref MARKERS_AND_RANGES can be annotated with various attributes to provide + * additional information for an event or to guide the tool's visualization of + * the data. Each of the attributes is optional and if left unused the + * attributes fall back to a default value. The attributes include: + * - color + * - category + * + * To specify any attribute other than the text message, the \ref + * EVENT_ATTRIBUTE_STRUCTURE "Event Attribute Structure" must be used. + * + * \section DOMAINS Domains + * + * Domains enable developers to scope annotations. By default all events and + * annotations are in the default domain. Additional domains can be registered. + * This allows developers to scope markers, ranges, and resources names to + * avoid conflicts. + * + * The function ::nvtxDomainCreateA or ::nvtxDomainCreateW is used to create + * a named domain. + * + * Each domain maintains its own + * - categories + * - thread range stacks + * - registered strings + * + * The function ::nvtxDomainDestroy marks the end of the domain. Destroying + * a domain unregisters and destroys all objects associated with it such as + * registered strings, resource objects, named categories, and started ranges. + * + * \section RESOURCE_NAMING Resource Naming + * + * This section covers calls that allow to annotate objects with user-provided + * names in order to allow for a better analysis of complex trace data. All of + * the functions take the handle or the ID of the object to name and the name. + * The functions can be called multiple times during the execution of an + * application, however, in that case it is implementation dependent which + * name will be reported by the tool. + * + * \subsection CATEGORY_NAMING Category Naming + * + * Some function in this library support associating an integer category + * to enable filtering and sorting. The category naming functions allow + * the application to associate a user friendly name with the integer + * category. Support for domains have been added in NVTX_VERSION_2 to + * avoid collisions when domains are developed independantly. + * + * \subsection RESOURCE_OBJECTS Resource Objects + * + * Resource objects are a generic mechanism for attaching data to an application + * resource. The identifier field makes the association to a pointer or handle, + * while the type field helps provide deeper understanding of the identifier as + * well as enabling differentiation in cases where handles generated by different + * APIs may collide. The resource object may also have an associated message to + * associate with the application resource, enabling further annotation of this + * object and how it is used. + * + * The resource object was introduced in NVTX_VERSION_2 to supersede existing naming + * functions and allow the application resource identified by those functions to be + * associated to a domain. The other naming functions are still supported for backward + * compatibility but will be associated only to the default domain. + * + * \subsection RESOURCE_NAMING_OS Resource Naming + * + * Some operating system resources creation APIs do not support providing a user friendly + * name, such as some OS thread creation APIs. This API support resource naming though + * both through resource objects and functions following the pattern + * nvtxName[RESOURCE_TYPE][A|W](identifier, name). Resource objects introduced in NVTX_VERSION 2 + * supersede the other functions with a a more general method of assigning names to OS resources, + * along with associating them to domains too. The older nvtxName* functions are only associated + * with the default domain. + * \section EXTENSIONS Optional Extensions + * Optional extensions will either appear within the existing sections the extend or appear + * in the "Related Pages" when they introduce new concepts. + */ + + /** + * Tools Extension API version + */ +#if defined(NVTX_VERSION) && NVTX_VERSION < 3 +#error "Trying to #include NVTX version 3 in a source file where an older NVTX version has already been included. If you are not directly using NVTX (the NVIDIA Tools Extension library), you are getting this error because libraries you are using have included different versions of NVTX. Suggested solutions are: (1) reorder #includes so the newest NVTX version is included first, (2) avoid using the conflicting libraries in the same .c/.cpp file, or (3) update the library using the older NVTX version to use the newer version instead." +#endif + +/* Header guard */ +#if !defined(NVTX_VERSION) +#define NVTX_VERSION 3 + +#if defined(_MSC_VER) +#define NVTX_API __stdcall +#define NVTX_INLINE_STATIC __inline static +#else /*defined(__GNUC__)*/ +#define NVTX_API +#define NVTX_INLINE_STATIC inline static +#endif /* Platform */ + +#if defined(NVTX_NO_IMPL) +/* When omitting implementation, avoid declaring functions inline */ +/* without definitions, since this causes compiler warnings. */ +#define NVTX_DECLSPEC +#elif defined(NVTX_EXPORT_API) +/* Allow overriding definition of NVTX_DECLSPEC when exporting API. */ +/* Default is empty, meaning non-inline with external linkage. */ +#if !defined(NVTX_DECLSPEC) +#define NVTX_DECLSPEC +#endif +#else +/* Normal NVTX usage defines the NVTX API inline with static */ +/* (internal) linkage. */ +#define NVTX_DECLSPEC NVTX_INLINE_STATIC +#endif + +#include "nvtxDetail/nvtxLinkOnce.h" + +#define NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) NAME##_v##VERSION +#define NVTX_VERSIONED_IDENTIFIER_L2(NAME, VERSION) NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) +#define NVTX_VERSIONED_IDENTIFIER(NAME) NVTX_VERSIONED_IDENTIFIER_L2(NAME, NVTX_VERSION) + +/** + * The nvToolsExt library depends on stdint.h. If the build tool chain in use + * does not include stdint.h then define NVTX_STDINT_TYPES_ALREADY_DEFINED + * and define the following types: + *
    + *
  • uint8_t + *
  • int8_t + *
  • uint16_t + *
  • int16_t + *
  • uint32_t + *
  • int32_t + *
  • uint64_t + *
  • int64_t + *
  • uintptr_t + *
  • intptr_t + *
+ * #define NVTX_STDINT_TYPES_ALREADY_DEFINED if you are using your own header file. + */ +#ifndef NVTX_STDINT_TYPES_ALREADY_DEFINED +#include +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** +* Result Codes +*/ + +#define NVTX_SUCCESS 0 +#define NVTX_FAIL 1 +#define NVTX_ERR_INIT_LOAD_PROPERTY 2 +#define NVTX_ERR_INIT_ACCESS_LIBRARY 3 +#define NVTX_ERR_INIT_LOAD_LIBRARY 4 +#define NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT 5 +#define NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT 6 +#define NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE 7 + +/** + * Size of the nvtxEventAttributes_t structure. + */ +#define NVTX_EVENT_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxEventAttributes_t) ) ) + +#define NVTX_NO_PUSH_POP_TRACKING ((int)-2) + +typedef uint64_t nvtxRangeId_t; + +/* Forward declaration of opaque domain registration structure */ +struct nvtxDomainRegistration_st; +typedef struct nvtxDomainRegistration_st nvtxDomainRegistration; + +/* \brief Domain Handle Structure. +* \anchor DOMAIN_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a domain. This type is returned from tools when using the NVTX API to +* create a domain. +* +*/ +typedef nvtxDomainRegistration* nvtxDomainHandle_t; + +/* Forward declaration of opaque string registration structure */ +struct nvtxStringRegistration_st; +typedef struct nvtxStringRegistration_st nvtxStringRegistration; + +/* \brief Registered String Handle Structure. +* \anchor REGISTERED_STRING_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a registered string. This type is returned from tools when using the NVTX +* API to create a registered string. +* +*/ +typedef nvtxStringRegistration* nvtxStringHandle_t; + +/* ========================================================================= */ +/** \defgroup GENERAL General + * @{ + */ + +/** --------------------------------------------------------------------------- + * Color Types + * ------------------------------------------------------------------------- */ +typedef enum nvtxColorType_t +{ + NVTX_COLOR_UNKNOWN = 0, /**< Color attribute is unused. */ + NVTX_COLOR_ARGB = 1 /**< An ARGB color is provided. */ +} nvtxColorType_t; + +/** --------------------------------------------------------------------------- + * Message Types + * ------------------------------------------------------------------------- */ +typedef enum nvtxMessageType_t +{ + NVTX_MESSAGE_UNKNOWN = 0, /**< Message payload is unused. */ + NVTX_MESSAGE_TYPE_ASCII = 1, /**< A character sequence is used as payload. */ + NVTX_MESSAGE_TYPE_UNICODE = 2, /**< A wide character sequence is used as payload. */ + /* NVTX_VERSION_2 */ + NVTX_MESSAGE_TYPE_REGISTERED = 3, /**< A unique string handle that was registered + with \ref nvtxDomainRegisterStringA() or + \ref nvtxDomainRegisterStringW(). */ +} nvtxMessageType_t; + +typedef union nvtxMessageValue_t +{ + const char* ascii; + const wchar_t* unicode; + /* NVTX_VERSION_2 */ + nvtxStringHandle_t registered; +} nvtxMessageValue_t; + + +/** @} */ /*END defgroup*/ +/* ------------------------------------------------------------------------- */ +/** \brief Force initialization (optional) +* +* Force NVTX library to initialize. The first call to any NVTX API function +* will automatically initialize the entire API. This can make the first call +* much slower than subsequent calls. In applications where the first call to +* NVTX may be in a performance-critical section, calling nvtxInitialize before +* any performance-critical sections will ensure NVTX initialization occurs at +* an acceptable time. Since nvtxInitialize takes no parameters and has no +* expected behavior besides initialization, it is convenient to add a call to +* nvtxInitialize in NVTX-instrumented applications that need to force earlier +* initialization without changing any other code. For example, if an app's +* first NVTX call is nvtxDomainCreate, and it is difficult to move that call +* earlier because the domain handle must be stored in an object only created +* at that point, adding a call to nvtxInitialize at the top of main() will +* ensure the later call to nvtxDomainCreate is as fast as possible. +* +* \version \NVTX_VERSION_3 +* +* \param reserved - must be zero or NULL. +* +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved); +/** @} */ + + +/** @} */ /*END defgroup*/ + +/* ========================================================================= */ +/** \defgroup EVENT_ATTRIBUTES Event Attributes +* @{ +*/ + +/** --------------------------------------------------------------------------- +* Payload Types +* ------------------------------------------------------------------------- */ +typedef enum nvtxPayloadType_t +{ + NVTX_PAYLOAD_UNKNOWN = 0, /**< Color payload is unused. */ + NVTX_PAYLOAD_TYPE_UNSIGNED_INT64 = 1, /**< A 64 bit unsigned integer value is used as payload. */ + NVTX_PAYLOAD_TYPE_INT64 = 2, /**< A 64 bit signed integer value is used as payload. */ + NVTX_PAYLOAD_TYPE_DOUBLE = 3, /**< A 64 bit floating point value is used as payload. */ + /* NVTX_VERSION_2 */ + NVTX_PAYLOAD_TYPE_UNSIGNED_INT32 = 4, /**< A 32 bit floating point value is used as payload. */ + NVTX_PAYLOAD_TYPE_INT32 = 5, /**< A 32 bit floating point value is used as payload. */ + NVTX_PAYLOAD_TYPE_FLOAT = 6 /**< A 32 bit floating point value is used as payload. */ +} nvtxPayloadType_t; + +/** \brief Event Attribute Structure. + * \anchor EVENT_ATTRIBUTE_STRUCTURE + * + * This structure is used to describe the attributes of an event. The layout of + * the structure is defined by a specific version of the tools extension + * library and can change between different versions of the Tools Extension + * library. + * + * \par Initializing the Attributes + * + * The caller should always perform the following three tasks when using + * attributes: + *
    + *
  • Zero the structure + *
  • Set the version field + *
  • Set the size field + *
+ * + * Zeroing the structure sets all the event attributes types and values + * to the default value. + * + * The version and size field are used by the Tools Extension + * implementation to handle multiple versions of the attributes structure. + * + * It is recommended that the caller use one of the following to methods + * to initialize the event attributes structure: + * + * \par Method 1: Initializing nvtxEventAttributes for future compatibility + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * \endcode + * + * \par Method 2: Initializing nvtxEventAttributes for a specific version + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = 1; + * eventAttrib.size = (uint16_t)(sizeof(nvtxEventAttributes_v1)); + * \endcode + * + * If the caller uses Method 1 it is critical that the entire binary + * layout of the structure be configured to 0 so that all fields + * are initialized to the default value. + * + * The caller should either use both NVTX_VERSION and + * NVTX_EVENT_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values + * and a versioned type (Method 2). Using a mix of the two methods + * will likely cause either source level incompatibility or binary + * incompatibility in the future. + * + * \par Settings Attribute Types and Values + * + * + * \par Example: + * \code + * // Initialize + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * + * // Configure the Attributes + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF880000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example"; + * \endcode + * + * In the example the caller does not have to set the value of + * \ref ::nvtxEventAttributes_v2::category or + * \ref ::nvtxEventAttributes_v2::payload as these fields were set to + * the default value by {0}. + * \sa + * ::nvtxDomainMarkEx + * ::nvtxDomainRangeStartEx + * ::nvtxDomainRangePushEx + */ +typedef struct nvtxEventAttributes_v2 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of the event attribute + * structure used to specify the event. + */ + uint16_t size; + + /** + * \brief ID of the category the event is assigned to. + * + * A category is a user-controlled ID that can be used to group + * events. The tool may use category IDs to improve filtering or + * enable grouping of events in the same category. The functions + * \ref ::nvtxNameCategoryA or \ref ::nvtxNameCategoryW can be used + * to name a category. + * + * Default Value is 0 + */ + uint32_t category; + + /** \brief Color type specified in this attribute structure. + * + * Defines the color format of the attribute structure's \ref COLOR_FIELD + * "color" field. + * + * Default Value is NVTX_COLOR_UNKNOWN + */ + int32_t colorType; /* nvtxColorType_t */ + + /** \brief Color assigned to this event. \anchor COLOR_FIELD + * + * The color that the tool should use to visualize the event. + */ + uint32_t color; + + /** + * \brief Payload type specified in this attribute structure. + * + * Defines the payload format of the attribute structure's \ref PAYLOAD_FIELD + * "payload" field. + * + * Default Value is NVTX_PAYLOAD_UNKNOWN + */ + int32_t payloadType; /* nvtxPayloadType_t */ + + int32_t reserved0; + + /** + * \brief Payload assigned to this event. \anchor PAYLOAD_FIELD + * + * A numerical value that can be used to annotate an event. The tool could + * use the payload data to reconstruct graphs and diagrams. + */ + union payload_t + { + uint64_t ullValue; + int64_t llValue; + double dValue; + /* NVTX_VERSION_2 */ + uint32_t uiValue; + int32_t iValue; + float fValue; + } payload; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref MESSAGE_FIELD + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. \anchor MESSAGE_FIELD + * + * The text message that is attached to an event. + */ + nvtxMessageValue_t message; + +} nvtxEventAttributes_v2; + +typedef struct nvtxEventAttributes_v2 nvtxEventAttributes_t; + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup MARKERS_AND_RANGES Markers and Ranges + * + * See \ref MARKERS_AND_RANGES for more details + * + * @{ + */ + +/** \name Marker */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. +* +* A marker can contain a text message or specify additional information +* using the event attributes structure. These attributes include a text +* message, color, category, and a payload. Each of the attributes is optional +* and can only be sent out using the \ref nvtxDomainMarkEx function. +* +* nvtxDomainMarkEx(NULL, event) is equivalent to calling +* nvtxMarkEx(event). +* +* \param domain - The domain of scoping the category. +* \param eventAttrib - The event attribute structure defining the marker's +* attribute types and attribute values. +* +* \sa +* ::nvtxMarkEx +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. + * + * A marker can contain a text message or specify additional information + * using the event attributes structure. These attributes include a text + * message, color, category, and a payload. Each of the attributes is optional + * and can only be sent out using the \ref nvtxMarkEx function. + * If \ref nvtxMarkA or \ref nvtxMarkW are used to specify the marker + * or if an attribute is unspecified then a default value will be used. + * + * \param eventAttrib - The event attribute structure defining the marker's + * attribute types and attribute values. + * + * \par Example: + * \code + * // zero the structure + * nvtxEventAttributes_t eventAttrib = {0}; + * // set the version and the size information + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * // configure the attributes. 0 is the default for all attributes. + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF880000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example nvtxMarkEx"; + * nvtxMarkEx(&eventAttrib); + * \endcode + * + * \sa + * ::nvtxDomainMarkEx + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. + * + * A marker created using \ref nvtxMarkA or \ref nvtxMarkW contains only a + * text message. + * + * \param message - The message associated to this marker event. + * + * \par Example: + * \code + * nvtxMarkA("Example nvtxMarkA"); + * nvtxMarkW(L"Example nvtxMarkW"); + * \endcode + * + * \sa + * ::nvtxDomainMarkEx + * ::nvtxMarkEx + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message); +NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message); +/** @} */ + + +/** \name Process Ranges */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range in a domain. +* +* \param domain - The domain of scoping the category. +* \param eventAttrib - The event attribute structure defining the range's +* attribute types and attribute values. +* +* \return The unique ID used to correlate a pair of Start and End events. +* +* \remarks Ranges defined by Start/End can overlap. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "my range"; +* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib); +* // ... +* nvtxDomainRangeEnd(rangeId); +* \endcode +* +* \sa +* ::nvtxDomainRangeEnd +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range. + * + * \param eventAttrib - The event attribute structure defining the range's + * attribute types and attribute values. + * + * \return The unique ID used to correlate a pair of Start and End events. + * + * \remarks Ranges defined by Start/End can overlap. + * + * \par Example: + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * eventAttrib.category = 3; + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF0088FF; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example Range"; + * nvtxRangeId_t rangeId = nvtxRangeStartEx(&eventAttrib); + * // ... + * nvtxRangeEnd(rangeId); + * \endcode + * + * \sa + * ::nvtxRangeEnd + * ::nvtxDomainRangeStartEx + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range. + * + * \param message - The event message associated to this range event. + * + * \return The unique ID used to correlate a pair of Start and End events. + * + * \remarks Ranges defined by Start/End can overlap. + * + * \par Example: + * \code + * nvtxRangeId_t r1 = nvtxRangeStartA("Range 1"); + * nvtxRangeId_t r2 = nvtxRangeStartW(L"Range 2"); + * nvtxRangeEnd(r1); + * nvtxRangeEnd(r2); + * \endcode + * + * \sa + * ::nvtxRangeEnd + * ::nvtxRangeStartEx + * ::nvtxDomainRangeStartEx + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message); +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a process range. +* +* \param domain - The domain +* \param id - The correlation ID returned from a nvtxRangeStart call. +* +* \remarks This function is offered completeness but is an alias for ::nvtxRangeEnd. +* It does not need a domain param since that is associated iwth the range ID at ::nvtxDomainRangeStartEx +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "my range"; +* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib); +* // ... +* nvtxDomainRangeEnd(rangeId); +* \endcode +* +* \sa +* ::nvtxDomainRangeStartEx +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a process range. + * + * \param id - The correlation ID returned from an nvtxRangeStart call. + * + * \sa + * ::nvtxDomainRangeStartEx + * ::nvtxRangeStartEx + * ::nvtxRangeStartA + * ::nvtxRangeStartW + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id); +/** @} */ + +/** \name Thread Ranges */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. +* +* \param domain - The domain of scoping. +* \param eventAttrib - The event attribute structure defining the range's +* attribute types and attribute values. +* +* \return The 0 based level of range being started. This value is scoped to the domain. +* If an error occurs, a negative value is returned. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.colorType = NVTX_COLOR_ARGB; +* eventAttrib.color = 0xFFFF0000; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "Level 0"; +* nvtxDomainRangePushEx(domain, &eventAttrib); +* +* // Re-use eventAttrib +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE; +* eventAttrib.message.unicode = L"Level 1"; +* nvtxDomainRangePushEx(domain, &eventAttrib); +* +* nvtxDomainRangePop(domain); //level 1 +* nvtxDomainRangePop(domain); //level 0 +* \endcode +* +* \sa +* ::nvtxDomainRangePop +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. + * + * \param eventAttrib - The event attribute structure defining the range's + * attribute types and attribute values. + * + * \return The 0 based level of range being started. This level is per domain. + * If an error occurs a negative value is returned. + * + * \par Example: + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFFFF0000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Level 0"; + * nvtxRangePushEx(&eventAttrib); + * + * // Re-use eventAttrib + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE; + * eventAttrib.message.unicode = L"Level 1"; + * nvtxRangePushEx(&eventAttrib); + * + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxDomainRangePushEx + * ::nvtxRangePop + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. + * + * \param message - The event message associated to this range event. + * + * \return The 0 based level of range being started. If an error occurs a + * negative value is returned. + * + * \par Example: + * \code + * nvtxRangePushA("Level 0"); + * nvtxRangePushW(L"Level 1"); + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxDomainRangePushEx + * ::nvtxRangePop + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message); +NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message); +/** @} */ + + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a nested thread range. +* +* \return The level of the range being ended. If an error occurs a negative +* value is returned on the current thread. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreate("example library"); +* nvtxDomainRangePushA(domain, "Level 0"); +* nvtxDomainRangePushW(domain, L"Level 1"); +* nvtxDomainRangePop(domain); +* nvtxDomainRangePop(domain); +* \endcode +* +* \sa +* ::nvtxRangePushEx +* ::nvtxRangePushA +* ::nvtxRangePushW +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a nested thread range. + * + * \return The level of the range being ended. If an error occurs a negative + * value is returned on the current thread. + * + * \par Example: + * \code + * nvtxRangePushA("Level 0"); + * nvtxRangePushW(L"Level 1"); + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxRangePushEx + * ::nvtxRangePushA + * ::nvtxRangePushW + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePop(void); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup RESOURCE_NAMING Resource Naming + * + * See \ref RESOURCE_NAMING for more details + * + * @{ + */ + + +/* ------------------------------------------------------------------------- */ +/** \name Functions for Generic Resource Naming*/ +/* ------------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------------- */ +/** \cond SHOW_HIDDEN +* \brief Resource typing helpers. +* +* Classes are used to make it easy to create a series of resource types +* per API without collisions +*/ +#define NVTX_RESOURCE_MAKE_TYPE(CLASS, INDEX) ((((uint32_t)(NVTX_RESOURCE_CLASS_ ## CLASS))<<16)|((uint32_t)(INDEX))) +#define NVTX_RESOURCE_CLASS_GENERIC 1 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Generic resource type for when a resource class is not available. +* +* \sa +* ::nvtxDomainResourceCreate +* +* \version \NVTX_VERSION_2 +*/ +typedef enum nvtxResourceGenericType_t +{ + NVTX_RESOURCE_TYPE_UNKNOWN = 0, + NVTX_RESOURCE_TYPE_GENERIC_POINTER = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 1), /**< Generic pointer assumed to have no collisions with other pointers. */ + NVTX_RESOURCE_TYPE_GENERIC_HANDLE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 2), /**< Generic handle assumed to have no collisions with other handles. */ + NVTX_RESOURCE_TYPE_GENERIC_THREAD_NATIVE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 3), /**< OS native thread identifier. */ + NVTX_RESOURCE_TYPE_GENERIC_THREAD_POSIX = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 4) /**< POSIX pthread identifier. */ +} nvtxResourceGenericType_t; + + + +/** \brief Resource Attribute Structure. +* \anchor RESOURCE_ATTRIBUTE_STRUCTURE +* +* This structure is used to describe the attributes of a resource. The layout of +* the structure is defined by a specific version of the tools extension +* library and can change between different versions of the Tools Extension +* library. +* +* \par Initializing the Attributes +* +* The caller should always perform the following three tasks when using +* attributes: +*
    +*
  • Zero the structure +*
  • Set the version field +*
  • Set the size field +*
+* +* Zeroing the structure sets all the resource attributes types and values +* to the default value. +* +* The version and size field are used by the Tools Extension +* implementation to handle multiple versions of the attributes structure. +* +* It is recommended that the caller use one of the following to methods +* to initialize the event attributes structure: +* +* \par Method 1: Initializing nvtxEventAttributes for future compatibility +* \code +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* \endcode +* +* \par Method 2: Initializing nvtxEventAttributes for a specific version +* \code +* nvtxResourceAttributes_v0 attribs = {0}; +* attribs.version = 2; +* attribs.size = (uint16_t)(sizeof(nvtxResourceAttributes_v0)); +* \endcode +* +* If the caller uses Method 1 it is critical that the entire binary +* layout of the structure be configured to 0 so that all fields +* are initialized to the default value. +* +* The caller should either use both NVTX_VERSION and +* NVTX_RESOURCE_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values +* and a versioned type (Method 2). Using a mix of the two methods +* will likely cause either source level incompatibility or binary +* incompatibility in the future. +* +* \par Settings Attribute Types and Values +* +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* +* // Initialize +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* +* // Configure the Attributes +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* \endcode +* +* \sa +* ::nvtxDomainResourceCreate +*/ +typedef struct nvtxResourceAttributes_v0 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of this attribute + * structure. + */ + uint16_t size; + + /** + * \brief Identifier type specifies how to interpret the identifier field + * + * Defines the identifier format of the attribute structure's \ref RESOURCE_IDENTIFIER_FIELD + * "identifier" field. + * + * Default Value is NVTX_RESOURCE_TYPE_UNKNOWN + */ + int32_t identifierType; /* values from enums following the pattern nvtxResource[name]Type_t */ + + /** + * \brief Identifier for the resource. + * \anchor RESOURCE_IDENTIFIER_FIELD + * + * An identifier may be a pointer or a handle to an OS or middleware API object. + * The resource type will assist in avoiding collisions where handles values may collide. + */ + union identifier_t + { + const void* pValue; + uint64_t ullValue; + } identifier; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref RESOURCE_MESSAGE_FIELD + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. \anchor RESOURCE_MESSAGE_FIELD + * + * The text message that is attached to a resource. + */ + nvtxMessageValue_t message; + +} nvtxResourceAttributes_v0; + +typedef struct nvtxResourceAttributes_v0 nvtxResourceAttributes_t; + +/* \cond SHOW_HIDDEN +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxResourceAttributes_v0) ) ) +typedef struct nvtxResourceHandle* nvtxResourceHandle_t; +/** \endcond */ + + + +/* ------------------------------------------------------------------------- */ +/** \brief Create a resource object to track and associate data with OS and middleware objects +* +* Allows users to associate an API handle or pointer with a user-provided name. +* +* +* \param domain - Domain to own the resource object +* \param attribs - Attributes to be associated with the resource +* +* \return A handle that represents the newly created resource object. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* \endcode +* +* \sa +* ::nvtxResourceAttributes_t +* ::nvtxDomainResourceDestroy +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Destroy a resource object to track and associate data with OS and middleware objects +* +* Allows users to associate an API handle or pointer with a user-provided name. +* +* \param resource - Handle to the resource in which to operate. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* nvtxDomainResourceDestroy(handle); +* \endcode +* +* \sa +* ::nvtxDomainResourceCreate +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource); +/** @} */ + + +/** \name Functions for NVTX Category Naming*/ + +/* ------------------------------------------------------------------------- */ +/** +* \brief Annotate an NVTX category used within a domain. +* +* Categories are used to group sets of events. Each category is identified +* through a unique ID and that ID is passed into any of the marker/range +* events to assign that event to a specific category. The nvtxDomainNameCategory +* function calls allow the user to assign a name to a category ID that is +* specific to the domain. +* +* nvtxDomainNameCategory(NULL, category, name) is equivalent to calling +* nvtxNameCategory(category, name). +* +* \param domain - The domain of scoping the category. +* \param category - The category ID to name. +* \param name - The name of the category. +* +* \remarks The category names are tracked per domain. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example"); +* nvtxDomainNameCategoryA(domain, 1, "Memory Allocation"); +* nvtxDomainNameCategoryW(domain, 2, L"Memory Transfer"); +* \endcode +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name); +/** @} */ + +/** \brief Annotate an NVTX category. + * + * Categories are used to group sets of events. Each category is identified + * through a unique ID and that ID is passed into any of the marker/range + * events to assign that event to a specific category. The nvtxNameCategory + * function calls allow the user to assign a name to a category ID. + * + * \param category - The category ID to name. + * \param name - The name of the category. + * + * \remarks The category names are tracked per process. + * + * \par Example: + * \code + * nvtxNameCategory(1, "Memory Allocation"); + * nvtxNameCategory(2, "Memory Transfer"); + * nvtxNameCategory(3, "Memory Object Lifetime"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name); +/** @} */ + +/** \name Functions for OS Threads Naming*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotate an OS thread. + * + * Allows the user to name an active thread of the current process. If an + * invalid thread ID is provided or a thread ID from a different process is + * used the behavior of the tool is implementation dependent. + * + * Tools expect thread ID to be a number that uniquely identifies the thread + * at the time of the call. Note that a thread's ID can be reused after + * it is destroyed. Tools may choose how to handle aliasing of thread IDs. + * + * POSIX pthread_t type returned by pthread_self() may not comply with these + * expectations. Please use OS-specific thread ID instead of pthread_t. + * + * The thread name is associated to the default domain. To support domains + * use resource objects via ::nvtxDomainResourceCreate. + * + * \param threadId - The ID of the thread to name. + * \param name - The name of the thread. + * + * \par Examples: + * MS Windows: + * \code + * #include + * nvtxNameOsThread(GetCurrentThreadId(), "Current thread"); + * nvtxNameOsThread(GetThreadId(SomeThreadHandle), "Other thread"); + * \endcode + * + * Android: + * \code + * #include + * nvtxNameOsThreadA(gettid(), "Current thread"); + * nvtxNameOsThreadA(getpid(), "Main thread"); + * \endcode + * + * Linux: + * \code + * #include + * nvtxNameOsThreadA(syscall(SYS_gettid), "Current thread"); + * \endcode + * \code + * #include + * nvtxNameOsThreadA(getpid(), "Main thread"); + * \endcode + * + * OS X: + * \code + * #include + * nvtxNameOsThreadA(syscall(SYS_thread_selfid), "Current thread"); + * \endcode + * \code + * #include + * __uint64_t id; + * pthread_threadid_np(pthread_self(), &id); + * nvtxNameOsThreadA(id, "Current thread"); + * pthread_threadid_np(somePThreadId, &id); + * nvtxNameOsThreadA(id, "Other thread"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup STRING_REGISTRATION String Registration +* +* Registered strings are intended to increase performance by lowering instrumentation +* overhead. String may be registered once and the handle may be passed in place of +* a string where an the APIs may allow. +* +* See \ref STRING_REGISTRATION for more details +* +* @{ +*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Register a string. + +* Registers an immutable string with NVTX. Once registered the pointer used +* to register the domain name can be used in nvtxEventAttributes_t +* \ref MESSAGE_FIELD. This allows NVTX implementation to skip copying the +* contents of the message on each event invocation. +* +* String registration is an optimization. It is recommended to use string +* registration if the string will be passed to an event many times. +* +* String are not unregistered, except that by unregistering the entire domain +* +* \param domain - Domain handle. If NULL then the global domain is used. +* \param string - A unique pointer to a sequence of characters. +* +* \return A handle representing the registered string. +* +* \par Example: +* \code +* nvtxDomainCreateA("com.nvidia.nvtx.example"); +* nvtxStringHandle_t message = nvtxDomainRegisterStringA(domain, "registered string"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_REGISTERED; +* eventAttrib.message.registered = message; +* \endcode +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string); +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string); +/** @} */ + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup DOMAINS Domains +* +* Domains are used to group events to a developer defined scope. Middleware +* vendors may also scope their own events to avoid collisions with the +* the application developer's events, so that the application developer may +* inspect both parts and easily differentiate or filter them. By default +* all events are scoped to a global domain where NULL is provided or when +* using APIs provided b versions of NVTX below v2 +* +* Domains are intended to be typically long lived objects with the intention +* of logically separating events of large modules from each other such as +* middleware libraries from each other and the main application. +* +* See \ref DOMAINS for more details +* +* @{ +*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Register a NVTX domain. +* +* Domains are used to scope annotations. All NVTX_VERSION_0 and NVTX_VERSION_1 +* annotations are scoped to the global domain. The function nvtxDomainCreate +* creates a new named domain. +* +* Each domain maintains its own nvtxRangePush and nvtxRangePop stack. +* +* \param name - A unique string representing the domain. +* +* \return A handle representing the domain. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example"); +* +* nvtxMarkA("nvtxMarkA to global domain"); +* +* nvtxEventAttributes_t eventAttrib1 = {0}; +* eventAttrib1.version = NVTX_VERSION; +* eventAttrib1.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib1.message.ascii = "nvtxDomainMarkEx to global domain"; +* nvtxDomainMarkEx(NULL, &eventAttrib1); +* +* nvtxEventAttributes_t eventAttrib2 = {0}; +* eventAttrib2.version = NVTX_VERSION; +* eventAttrib2.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib2.message.ascii = "nvtxDomainMarkEx to com.nvidia.nvtx.example"; +* nvtxDomainMarkEx(domain, &eventAttrib2); +* nvtxDomainDestroy(domain); +* \endcode +* +* \sa +* ::nvtxDomainDestroy +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* name); +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Unregister a NVTX domain. +* +* Unregisters the domain handle and frees all domain specific resources. +* +* \param domain - the domain handle +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example"); +* nvtxDomainDestroy(domain); +* \endcode +* +* \sa +* ::nvtxDomainCreateA +* ::nvtxDomainCreateW +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \cond SHOW_HIDDEN */ + +#ifdef UNICODE + #define nvtxMark nvtxMarkW + #define nvtxRangeStart nvtxRangeStartW + #define nvtxRangePush nvtxRangePushW + #define nvtxNameCategory nvtxNameCategoryW + #define nvtxNameOsThread nvtxNameOsThreadW + /* NVTX_VERSION_2 */ + #define nvtxDomainCreate nvtxDomainCreateW + #define nvtxDomainRegisterString nvtxDomainRegisterStringW + #define nvtxDomainNameCategory nvtxDomainNameCategoryW +#else + #define nvtxMark nvtxMarkA + #define nvtxRangeStart nvtxRangeStartA + #define nvtxRangePush nvtxRangePushA + #define nvtxNameCategory nvtxNameCategoryA + #define nvtxNameOsThread nvtxNameOsThreadA + /* NVTX_VERSION_2 */ + #define nvtxDomainCreate nvtxDomainCreateA + #define nvtxDomainRegisterString nvtxDomainRegisterStringA + #define nvtxDomainNameCategory nvtxDomainNameCategoryA +#endif + +/** \endcond */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#define NVTX_IMPL_GUARD /* Ensure other headers cannot included directly */ + +#include "nvtxDetail/nvtxTypes.h" + +#ifndef NVTX_NO_IMPL +#include "nvtxDetail/nvtxImpl.h" +#endif /*NVTX_NO_IMPL*/ + +#undef NVTX_IMPL_GUARD + +#endif /* !defined(NVTX_VERSION) */ diff --git a/miniCUDA124/include/nvtx3/nvToolsExtCuda.h b/miniCUDA124/include/nvtx3/nvToolsExtCuda.h new file mode 100644 index 0000000000000000000000000000000000000000..b1922634312986f7ffd2f23887c564dfd12914ea --- /dev/null +++ b/miniCUDA124/include/nvtx3/nvToolsExtCuda.h @@ -0,0 +1,170 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include "cuda.h" + +#ifndef NVTOOLSEXT_CUDA_V3 +#define NVTOOLSEXT_CUDA_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for CUDA Resource Naming +*/ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_CUDA CUDA Resource Naming + * + * This section covers the API functions that allow to annotate CUDA resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_CUDA 4 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for CUDA +*/ +typedef enum nvtxResourceCUDAType_t +{ + NVTX_RESOURCE_TYPE_CUDA_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDA, 1), /* CUdevice */ + NVTX_RESOURCE_TYPE_CUDA_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 2), /* CUcontext */ + NVTX_RESOURCE_TYPE_CUDA_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDA, 3), /* CUstream */ + NVTX_RESOURCE_TYPE_CUDA_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 4), /* CUevent */ +} nvtxResourceCUDAType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA device. + * + * Allows the user to associate a CUDA device with a user-provided name. + * + * \param device - The handle of the CUDA device to name. + * \param name - The name of the CUDA device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA context. + * + * Allows the user to associate a CUDA context with a user-provided name. + * + * \param context - The handle of the CUDA context to name. + * \param name - The name of the CUDA context. + * + * \par Example: + * \code + * CUresult status = cuCtxCreate( &cuContext, 0, cuDevice ); + * if ( CUDA_SUCCESS != status ) + * goto Error; + * nvtxNameCuContext(cuContext, "CTX_NAME"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA stream. + * + * Allows the user to associate a CUDA stream with a user-provided name. + * + * \param stream - The handle of the CUDA stream to name. + * \param name - The name of the CUDA stream. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA event. + * + * Allows the user to associate a CUDA event with a user-provided name. + * + * \param event - The handle of the CUDA event to name. + * \param name - The name of the CUDA event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameCuDevice nvtxNameCuDeviceW + #define nvtxNameCuContext nvtxNameCuContextW + #define nvtxNameCuStream nvtxNameCuStreamW + #define nvtxNameCuEvent nvtxNameCuEventW +#else + #define nvtxNameCuDevice nvtxNameCuDeviceA + #define nvtxNameCuContext nvtxNameCuContextA + #define nvtxNameCuStream nvtxNameCuStreamA + #define nvtxNameCuEvent nvtxNameCuEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_CUDA /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplCuda_v3.h" +#undef NVTX_IMPL_GUARD_CUDA +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_CUDA_V3 */ diff --git a/miniCUDA124/include/nvtx3/nvToolsExtCudaRt.h b/miniCUDA124/include/nvtx3/nvToolsExtCudaRt.h new file mode 100644 index 0000000000000000000000000000000000000000..ca2e3de731fbf5ddfadb1c978bf12c6e78b5fbb6 --- /dev/null +++ b/miniCUDA124/include/nvtx3/nvToolsExtCudaRt.h @@ -0,0 +1,146 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include "cuda.h" +#include "driver_types.h" + +#ifndef NVTOOLSEXT_CUDART_V3 +#define NVTOOLSEXT_CUDART_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for CUDA Resource Naming +*/ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_CUDART CUDA Runtime Resource Naming + * + * This section covers the API functions that allow to annotate CUDA resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_CUDART 5 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for CUDART +*/ +typedef enum nvtxResourceCUDARTType_t +{ + NVTX_RESOURCE_TYPE_CUDART_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDART, 0), /* int device */ + NVTX_RESOURCE_TYPE_CUDART_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDART, 1), /* cudaStream_t */ + NVTX_RESOURCE_TYPE_CUDART_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDART, 2), /* cudaEvent_t */ +} nvtxResourceCUDARTType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA device. + * + * Allows the user to associate a CUDA device with a user-provided name. + * + * \param device - The id of the CUDA device to name. + * \param name - The name of the CUDA device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA stream. + * + * Allows the user to associate a CUDA stream with a user-provided name. + * + * \param stream - The handle of the CUDA stream to name. + * \param name - The name of the CUDA stream. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA event. + * + * Allows the user to associate a CUDA event with a user-provided name. + * + * \param event - The handle of the CUDA event to name. + * \param name - The name of the CUDA event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameCudaDevice nvtxNameCudaDeviceW + #define nvtxNameCudaStream nvtxNameCudaStreamW + #define nvtxNameCudaEvent nvtxNameCudaEventW +#else + #define nvtxNameCudaDevice nvtxNameCudaDeviceA + #define nvtxNameCudaStream nvtxNameCudaStreamA + #define nvtxNameCudaEvent nvtxNameCudaEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_CUDART /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplCudaRt_v3.h" +#undef NVTX_IMPL_GUARD_CUDART +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_CUDART_V3 */ diff --git a/miniCUDA124/include/nvtx3/nvToolsExtOpenCL.h b/miniCUDA124/include/nvtx3/nvToolsExtOpenCL.h new file mode 100644 index 0000000000000000000000000000000000000000..917024ab796a7148ac51ffb4ddb262284be35e44 --- /dev/null +++ b/miniCUDA124/include/nvtx3/nvToolsExtOpenCL.h @@ -0,0 +1,220 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include + +#ifndef NVTOOLSEXT_OPENCL_V3 +#define NVTOOLSEXT_OPENCL_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for OpenCL Resource Naming + */ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_OPENCL OpenCL Resource Naming + * + * This section covers the API functions that allow to annotate OpenCL resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_OPENCL 6 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for OpenCL +*/ +typedef enum nvtxResourceOpenCLType_t +{ + NVTX_RESOURCE_TYPE_OPENCL_DEVICE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 1), + NVTX_RESOURCE_TYPE_OPENCL_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 2), + NVTX_RESOURCE_TYPE_OPENCL_COMMANDQUEUE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 3), + NVTX_RESOURCE_TYPE_OPENCL_MEMOBJECT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 4), + NVTX_RESOURCE_TYPE_OPENCL_SAMPLER = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 5), + NVTX_RESOURCE_TYPE_OPENCL_PROGRAM = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 6), + NVTX_RESOURCE_TYPE_OPENCL_EVENT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 7), +} nvtxResourceOpenCLType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL device. + * + * Allows to associate an OpenCL device with a user-provided name. + * + * \param device - The handle of the OpenCL device to name. + * \param name - The name of the OpenCL device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL context. + * + * Allows to associate an OpenCL context with a user-provided name. + * + * \param context - The handle of the OpenCL context to name. + * \param name - The name of the OpenCL context. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL command queue. + * + * Allows to associate an OpenCL command queue with a user-provided name. + * + * \param command_queue - The handle of the OpenCL command queue to name. + * \param name - The name of the OpenCL command queue. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL memory object. + * + * Allows to associate an OpenCL memory object with a user-provided name. + * + * \param memobj - The handle of the OpenCL memory object to name. + * \param name - The name of the OpenCL memory object. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL sampler. + * + * Allows to associate an OpenCL sampler with a user-provided name. + * + * \param sampler - The handle of the OpenCL sampler to name. + * \param name - The name of the OpenCL sampler. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL program. + * + * Allows to associate an OpenCL program with a user-provided name. + * + * \param program - The handle of the OpenCL program to name. + * \param name - The name of the OpenCL program. + * + * \code + * cpProgram = clCreateProgramWithSource(cxGPUContext, 1, + * (const char **) &cSourceCL, &program_length, &ciErrNum); + * shrCheckErrorEX(ciErrNum, CL_SUCCESS, pCleanup); + * nvtxNameClProgram(cpProgram, L"PROGRAM_NAME"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL event. + * + * Allows to associate an OpenCL event with a user-provided name. + * + * \param evnt - The handle of the OpenCL event to name. + * \param name - The name of the OpenCL event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameClDevice nvtxNameClDeviceW + #define nvtxNameClContext nvtxNameClContextW + #define nvtxNameClCommandQueue nvtxNameClCommandQueueW + #define nvtxNameClMemObject nvtxNameClMemObjectW + #define nvtxNameClSampler nvtxNameClSamplerW + #define nvtxNameClProgram nvtxNameClProgramW + #define nvtxNameClEvent nvtxNameClEventW +#else + #define nvtxNameClDevice nvtxNameClDeviceA + #define nvtxNameClContext nvtxNameClContextA + #define nvtxNameClCommandQueue nvtxNameClCommandQueueA + #define nvtxNameClMemObject nvtxNameClMemObjectA + #define nvtxNameClSampler nvtxNameClSamplerA + #define nvtxNameClProgram nvtxNameClProgramA + #define nvtxNameClEvent nvtxNameClEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_OPENCL /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplOpenCL_v3.h" +#undef NVTX_IMPL_GUARD_OPENCL +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_OPENCL_V3 */ diff --git a/miniCUDA124/include/nvtx3/nvToolsExtSync.h b/miniCUDA124/include/nvtx3/nvToolsExtSync.h new file mode 100644 index 0000000000000000000000000000000000000000..a39e45aad14954cf261176286f9f48c3843e59e0 --- /dev/null +++ b/miniCUDA124/include/nvtx3/nvToolsExtSync.h @@ -0,0 +1,411 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#ifndef NVTOOLSEXT_SYNC_V3 +#define NVTOOLSEXT_SYNC_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* \cond SHOW_HIDDEN +* \version \NVTX_VERSION_2 +*/ +#define NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxSyncUserAttributes_v0) ) ) +/** \endcond */ + + +/** +* \page PAGE_SYNCHRONIZATION Synchronization +* +* This section covers a subset of the API that allow users to track additional +* synchronization details of their application. Naming OS synchronization primitives +* may allow users to better understand the data collected by traced synchronization +* APIs. Additionally, a user defined synchronization object can allow the users to +* to tell the tools when the user is building their own synchronization system +* that do not rely on the OS to provide behaviors and instead use techniques like +* atomic operations and spinlocks. +* +* See module \ref SYNCHRONIZATION for details. +* +* \par Example: +* \code +* class MyMutex +* { +* volatile long bLocked; +* nvtxSyncUser_t hSync; +* public: +* MyMutex(const char* name, nvtxDomainHandle_t d){ +* bLocked = 0; +* +* nvtxSyncUserAttributes_t attribs = { 0 }; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = name; +* hSync = nvtxDomainSyncUserCreate(d, &attribs); +* } +* +* ~MyMutex() { +* nvtxDomainSyncUserDestroy(hSync); +* } +* +* bool Lock() { +* nvtxDomainSyncUserAcquireStart(hSync); +* bool acquired = __sync_bool_compare_and_swap(&bLocked, 0, 1);//atomic compiler intrinsic + +* if (acquired) { +* nvtxDomainSyncUserAcquireSuccess(hSync); +* } +* else { +* nvtxDomainSyncUserAcquireFailed(hSync); +* } +* return acquired; +* } + +* void Unlock() { +* nvtxDomainSyncUserReleasing(hSync); +* bLocked = false; +* } +* }; +* \endcode +* +* \version \NVTX_VERSION_2 +*/ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_SYNC_OS 2 /**< Synchronization objects that are OS specific. */ +#define NVTX_RESOURCE_CLASS_SYNC_PTHREAD 3 /**< Synchronization objects that are from the POSIX Threads API (pthread)*/ +/** \endcond */ + + +/* ------------------------------------------------------------------------- */ +/** \defgroup SYNCHRONIZATION Synchronization +* See page \ref PAGE_SYNCHRONIZATION. +* @{ +*/ + +/** \brief Resource type values for OSs with POSIX Thread API support + */ +typedef enum nvtxResourceSyncPosixThreadType_t +{ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 1), /* pthread_mutex_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_CONDITION = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 2), /* pthread_cond_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_RWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 3), /* pthread_rwlock_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_BARRIER = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 4), /* pthread_barrier_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 5), /* pthread_spinlock_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_ONCE = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 6) /* pthread_once_t */ +} nvtxResourceSyncPosixThreadType_t; + +/** \brief Resource type values for Windows OSs +*/ +typedef enum nvtxResourceSyncWindowsType_t +{ + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_EVENT = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_CRITICAL_SECTION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SRWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5) +} nvtxResourceSyncWindowsType_t; + +/** \brief Resource type values for Linux and Linux derived OSs such as Android +* \sa +* ::nvtxResourceSyncPosixThreadType_t +*/ +typedef enum nvtxResourceSyncLinuxType_t +{ + NVTX_RESOURCE_TYPE_SYNC_LINUX_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1), + NVTX_RESOURCE_TYPE_SYNC_LINUX_FUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3), + NVTX_RESOURCE_TYPE_SYNC_LINUX_COMPLETION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SEQLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 6), + NVTX_RESOURCE_TYPE_SYNC_LINUX_RCU = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 7) +} nvtxResourceSyncLinuxType_t; + +/** \brief Resource type values for Android come from Linux. +* \sa +* ::nvtxResourceSyncLinuxType_t +* ::nvtxResourceSyncPosixThreadType_t +*/ +typedef enum nvtxResourceSyncLinuxType_t nvtxResourceSyncAndroidType_t; + +/** \brief User Defined Synchronization Object Handle . +* \anchor SYNCUSER_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a user defined syncrhonization object. The tools will return a pointer through the API for the application +* to hold on it's behalf to reference the string in the future. +* +*/ +typedef struct nvtxSyncUser* nvtxSyncUser_t; + +/** \brief User Defined Synchronization Object Attributes Structure. +* \anchor USERDEF_SYNC_ATTRIBUTES_STRUCTURE +* +* This structure is used to describe the attributes of a user defined synchronization +* object. The layout of the structure is defined by a specific version of the tools +* extension library and can change between different versions of the Tools Extension +* library. +* +* \par Initializing the Attributes +* +* The caller should always perform the following three tasks when using +* attributes: +*
    +*
  • Zero the structure +*
  • Set the version field +*
  • Set the size field +*
+* +* Zeroing the structure sets all the event attributes types and values +* to the default value. +* +* The version and size field are used by the Tools Extension +* implementation to handle multiple versions of the attributes structure. +* +* It is recommended that the caller use one of the following to methods +* to initialize the event attributes structure: +* +* \par Method 1: Initializing nvtxEventAttributes for future compatibility +* \code +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* \endcode +* +* \par Method 2: Initializing nvtxSyncUserAttributes_t for a specific version +* \code +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = 1; +* attribs.size = (uint16_t)(sizeof(nvtxSyncUserAttributes_t)); +* \endcode +* +* If the caller uses Method 1 it is critical that the entire binary +* layout of the structure be configured to 0 so that all fields +* are initialized to the default value. +* +* The caller should either use both NVTX_VERSION and +* NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values +* and a versioned type (Method 2). Using a mix of the two methods +* will likely cause either source level incompatibility or binary +* incompatibility in the future. +* +* \par Settings Attribute Types and Values +* +* +* \par Example: +* \code +* // Initialize +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* +* // Configure the Attributes +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Example"; +* \endcode +* +* \sa +* ::nvtxDomainSyncUserCreate +*/ +typedef struct nvtxSyncUserAttributes_v0 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of the event attribute + * structure used to specify the event. + */ + uint16_t size; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref nvtxSyncUserAttributes_v0::message + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. + * + * The text message that is attached to an event. + */ + nvtxMessageValue_t message; + +} nvtxSyncUserAttributes_v0; + +typedef struct nvtxSyncUserAttributes_v0 nvtxSyncUserAttributes_t; + +/* ------------------------------------------------------------------------- */ +/** \brief Create a user defined synchronization object +* This is used to track non-OS synchronization working with spinlocks and atomics +* +* \param domain - Domain to own the resource +* \param attribs - A structure to assign multiple attributes to the object. +* +* \return A handle that represents the newly created user defined synchronization object. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC nvtxSyncUser_t NVTX_API nvtxDomainSyncUserCreate(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs); + +/* ------------------------------------------------------------------------- */ +/** \brief Destroy a user defined synchronization object +* This is used to track non-OS synchronization working with spinlocks and atomics +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserDestroy(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools that an attempt to acquire a user defined synchronization object +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireStart(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of failure in acquiring a user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireStart +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireFailed(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of success in acquiring a user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireStart. +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireSuccess(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of releasing a reservation on user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireSuccess. +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserReleasing(nvtxSyncUser_t handle); + + +/** @} */ /*END defgroup*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_SYNC /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplSync_v3.h" +#undef NVTX_IMPL_GUARD_SYNC +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_SYNC_V3 */ diff --git a/miniCUDA124/include/thrust/addressof.h b/miniCUDA124/include/thrust/addressof.h new file mode 100644 index 0000000000000000000000000000000000000000..a5901546a0c871df3784eff3cba41363b8bb22bf --- /dev/null +++ b/miniCUDA124/include/thrust/addressof.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 NVIDIA Corporation +// Author: Bryce Adelstein Lelbach +// +// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#if THRUST_CPP_DIALECT >= 2011 +# include +#endif + +THRUST_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// + +/*! Obtains the actual address of the object or function arg, even in presence of overloaded operator&. + */ +template +__host__ __device__ +T* addressof(T& arg) +{ + return reinterpret_cast( + &const_cast(reinterpret_cast(arg)) + ); +} + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/adjacent_difference.h b/miniCUDA124/include/thrust/adjacent_difference.h new file mode 100644 index 0000000000000000000000000000000000000000..68bca18d9a73b252a1e0af8c9d1a1e7d8e0016c9 --- /dev/null +++ b/miniCUDA124/include/thrust/adjacent_difference.h @@ -0,0 +1,252 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file adjacent_difference.h + * \brief Compute difference between consecutive elements of a range + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup transformations Transformations + * \{ + */ + + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, \*first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), the difference of \*i and *(i - 1) + * is assigned to \*(result + (i - first)). + * + * This version of \p adjacent_difference uses operator- to calculate + * differences. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The iterator result + (last - first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and the return type of x - y is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the difference between adjacent elements of a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin()); + * + * // d_result is now [1, 1, -1, 1, -1, 1, -1, 1] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +__host__ __device__ +OutputIterator adjacent_difference(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, *first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), binary_op(\*i, \*(i - 1)) is assigned to + * \*(result + (i - first)). + * + * This version of \p adjacent_difference uses the binary function \p binary_op to + * calculate differences. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_op The binary function used to compute differences. + * \return The iterator result + (last - first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the sum between adjacent elements of a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin(), thrust::plus()); + * + * // d_result is now [1, 3, 3, 3, 3, 3, 3, 3] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +__host__ __device__ +OutputIterator adjacent_difference(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result, + BinaryFunction binary_op); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, \*first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), the difference of \*i and *(i - 1) + * is assigned to \*(result + (i - first)). + * + * This version of \p adjacent_difference uses operator- to calculate + * differences. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The iterator result + (last - first) + * + * \tparam InputIterator is a model of Input Iterator, + * and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and the return type of x - y is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the difference between adjacent elements of a range. + * + * \code + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin()); + * + * // d_result is now [1, 1, -1, 1, -1, 1, -1, 1] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +OutputIterator adjacent_difference(InputIterator first, InputIterator last, + OutputIterator result); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, *first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), binary_op(\*i, \*(i - 1)) is assigned to + * \*(result + (i - first)). + * + * This version of \p adjacent_difference uses the binary function \p binary_op to + * calculate differences. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_op The binary function used to compute differences. + * \return The iterator result + (last - first) + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the sum between adjacent elements of a range. + * + * \code + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin(), thrust::plus()); + * + * // d_result is now [1, 3, 3, 3, 3, 3, 3, 3] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +OutputIterator adjacent_difference(InputIterator first, InputIterator last, + OutputIterator result, + BinaryFunction binary_op); + +/*! \} + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/advance.h b/miniCUDA124/include/thrust/advance.h new file mode 100644 index 0000000000000000000000000000000000000000..f7e88f3b6a5a7bd56c2ac34aa54650ddcd82a449 --- /dev/null +++ b/miniCUDA124/include/thrust/advance.h @@ -0,0 +1,148 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file advance.h + * \brief Advance an iterator by a given distance. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup iterators + * \{ + */ + +/*! \p advance(i, n) increments the iterator \p i by the distance \p n. + * If n > 0 it is equivalent to executing ++i \p n + * times, and if n < 0 it is equivalent to executing --i + * \p n times. If n == 0, the call has no effect. + * + * \param i The iterator to be advanced. + * \param n The distance by which to advance the iterator. + * + * \tparam InputIterator is a model of Input Iterator. + * \tparam Distance is an integral type that is convertible to \p InputIterator's distance type. + * + * \pre \p n shall be negative only for bidirectional and random access iterators. + * + * The following code snippet demonstrates how to use \p advance to increment + * an iterator a given number of times. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec(13); + * thrust::device_vector::iterator iter = vec.begin(); + * + * thrust::advance(iter, 7); + * + * // iter - vec.begin() == 7 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/iterator/advance + */ +template +__host__ __device__ +void advance(InputIterator& i, Distance n); + +/*! \p next(i, n) returns the \p n th successor of the iterator \p i. + * + * \param i An iterator. + * \param n The number of elements to advance. + * + * \tparam InputIterator must meet the InputIterator. + * + * \pre \p n shall be negative only for bidirectional and random access iterators. + * + * The following code snippet demonstrates how to use \p next. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec(13); + * thrust::device_vector::iterator i0 = vec.begin(); + * + * auto i1 = thrust::next(i0); + * + * // i0 - vec.begin() == 0 + * // i1 - vec.begin() == 1 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/iterator/next + */ +#if 0 // Doxygen only +template +__host__ __device__ +InputIterator next( + InputIterator i +, typename iterator_traits::difference_type n = 1 +); +#endif + +/*! \p prev(i, n) returns the \p n th predecessor of the iterator \p i. + * + * \param i An iterator. + * \param n The number of elements to descend. + * + * \tparam BidirectionalIterator must meet the BidirectionalIterator. + * + * The following code snippet demonstrates how to use \p prev. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec(13); + * thrust::device_vector::iterator i0 = vec.end(); + * + * auto i1 = thrust::prev(i0); + * + * // vec.end() - i0 == 0 + * // vec.end() - i1 == 1 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/iterator/prev + */ +#if 0 // Doxygen only +template +__host__ __device__ +BidirectionalIterator prev( + BidirectionalIterator i +, typename iterator_traits::difference_type n = 1 +); +#endif + +/*! \} // end iterators + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/allocate_unique.h b/miniCUDA124/include/thrust/allocate_unique.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b08d2be9e6a884790a6a3fe51cbaa346d480c4 --- /dev/null +++ b/miniCUDA124/include/thrust/allocate_unique.h @@ -0,0 +1,451 @@ +// Copyright (c) 2018 NVIDIA Corporation +// Author: Bryce Adelstein Lelbach +// +// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +// wg21.link/p0316r0 + +/////////////////////////////////////////////////////////////////////////////// + +namespace detail +{ + +template +void allocator_delete_impl( + Allocator const& alloc, Pointer p, std::false_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::destroy(alloc_T, thrust::raw_pointer_cast(p)); + traits::deallocate(alloc_T, p, 1); + } +} + +template +void allocator_delete_impl( + Allocator const& alloc, Pointer p, std::true_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::deallocate(alloc_T, p, 1); + } +} + +} // namespace detail + +template +struct allocator_delete final +{ + using allocator_type + = typename std::remove_cv< + typename std::remove_reference::type + >::type::template rebind::other; + using pointer = typename detail::allocator_traits::pointer; + + template + allocator_delete(UAllocator&& other) noexcept + : alloc_(THRUST_FWD(other)) + {} + + template + allocator_delete( + allocator_delete const& other + ) noexcept + : alloc_(other.get_allocator()) + {} + template + allocator_delete( + allocator_delete&& other + ) noexcept + : alloc_(std::move(other.get_allocator())) + {} + + template + allocator_delete& operator=( + allocator_delete const& other + ) noexcept + { + alloc_ = other.get_allocator(); + return *this; + } + template + allocator_delete& operator=( + allocator_delete&& other + ) noexcept + { + alloc_ = std::move(other.get_allocator()); + return *this; + } + + void operator()(pointer p) + { + std::integral_constant ic; + + detail::allocator_delete_impl(get_allocator(), p, ic); + } + + allocator_type& get_allocator() noexcept { return alloc_; } + allocator_type const& get_allocator() const noexcept { return alloc_; } + + void swap(allocator_delete& other) noexcept + { + using std::swap; + swap(alloc_, other.alloc_); + } + +private: + allocator_type alloc_; +}; + +template +using uninitialized_allocator_delete = allocator_delete; + +namespace detail { + +template +void array_allocator_delete_impl( + Allocator const& alloc, Pointer p, Size count, std::false_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + destroy_n(alloc_T, p, count); + traits::deallocate(alloc_T, p, count); + } +} + +template +void array_allocator_delete_impl( + Allocator const& alloc, Pointer p, Size count, std::true_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::deallocate(alloc_T, p, count); + } +} + +} // namespace detail + +template +struct array_allocator_delete final +{ + using allocator_type + = typename std::remove_cv< + typename std::remove_reference::type + >::type::template rebind::other; + using pointer = typename detail::allocator_traits::pointer; + + template + array_allocator_delete(UAllocator&& other, std::size_t n) noexcept + : alloc_(THRUST_FWD(other)), count_(n) + {} + + template + array_allocator_delete( + array_allocator_delete const& other + ) noexcept + : alloc_(other.get_allocator()), count_(other.count_) + {} + template + array_allocator_delete( + array_allocator_delete&& other + ) noexcept + : alloc_(std::move(other.get_allocator())), count_(other.count_) + {} + + template + array_allocator_delete& operator=( + array_allocator_delete const& other + ) noexcept + { + alloc_ = other.get_allocator(); + count_ = other.count_; + return *this; + } + template + array_allocator_delete& operator=( + array_allocator_delete&& other + ) noexcept + { + alloc_ = std::move(other.get_allocator()); + count_ = other.count_; + return *this; + } + + void operator()(pointer p) + { + std::integral_constant ic; + + detail::array_allocator_delete_impl(get_allocator(), p, count_, ic); + } + + allocator_type& get_allocator() noexcept { return alloc_; } + allocator_type const& get_allocator() const noexcept { return alloc_; } + + void swap(array_allocator_delete& other) noexcept + { + using std::swap; + swap(alloc_, other.alloc_); + swap(count_, other.count_); + } + +private: + allocator_type alloc_; + std::size_t count_; +}; + +template +using uninitialized_array_allocator_delete + = array_allocator_delete; + +/////////////////////////////////////////////////////////////////////////////// + +template +struct tagged_deleter : Lambda +{ + __host__ __device__ + tagged_deleter(Lambda&& l) : Lambda(THRUST_FWD(l)) {} + + using pointer = Pointer; +}; + +template +__host__ __device__ +tagged_deleter +make_tagged_deleter(Lambda&& l) +{ + return tagged_deleter(THRUST_FWD(l)); +} + +/////////////////////////////////////////////////////////////////////////////// + +template +__host__ +std::unique_ptr< + T, + allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +allocate_unique( + Allocator const& alloc, Args&&... args +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [&alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, 1); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); + + traits::construct( + alloc_T, thrust::raw_pointer_cast(hold.get()), THRUST_FWD(args)... + ); + auto deleter = allocator_delete(alloc); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T, + uninitialized_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +uninitialized_allocate_unique( + Allocator const& alloc +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [&alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, 1); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); + + auto deleter = uninitialized_allocator_delete< + T, typename traits::allocator_type + >(alloc_T); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T[], + array_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +allocate_unique_n( + Allocator const& alloc, Size n, Args&&... args +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [n, &alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, n); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); + + uninitialized_construct_n_with_allocator( + alloc_T, hold.get(), n, THRUST_FWD(args)... + ); + auto deleter = array_allocator_delete< + T, typename traits::allocator_type + >(alloc_T, n); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T[], + uninitialized_array_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +uninitialized_allocate_unique_n( + Allocator const& alloc, Size n +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [n, &alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, n); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); + + auto deleter = uninitialized_array_allocator_delete< + T, typename traits::allocator_type + >(alloc_T, n); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END + +#endif // THRUST_CPP_DIALECT >= 2011 + diff --git a/miniCUDA124/include/thrust/binary_search.h b/miniCUDA124/include/thrust/binary_search.h new file mode 100644 index 0000000000000000000000000000000000000000..d0d0d5631a87fa1ce51e2146678519352cc941ae --- /dev/null +++ b/miniCUDA124/include/thrust/binary_search.h @@ -0,0 +1,1907 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file binary_search.h + * \brief Search for values in sorted ranges. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + + +/*! \addtogroup searching + * \ingroup algorithms + * \{ + */ + + +/*! \addtogroup binary_search Binary Search + * \ingroup searching + * \{ + */ + + +////////////////////// +// Scalar Functions // +////////////////////// + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), *j < value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that *i < value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin() + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 1 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 8); // returns input.begin() + 4 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable &value); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), *j < value. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that *i < value. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator lower_bound(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison + * and returns the furthermost iterator \c i in [first, last) + * such that, for every iterator \c j in [first, i), + * comp(*j, value) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(*i, value) is \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value, + StrictWeakOrdering comp); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison + * and returns the furthermost iterator \c i in [first, last) + * such that, for every iterator \c j in [first, i), + * comp(*j, value) is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(*i, value) is \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator lower_bound(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), value < *j + * is \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that value < *i is \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelism: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 8); // returns input.end() + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable &value); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), value < *j + * is \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that value < *i is \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(input.begin(), input.end(), 0); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 2); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 8); // returns input.end() + * thrust::upper_bound(input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator upper_bound(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), comp(value, *j) + * is \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(value, *i) is \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns input.end() + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value, + StrictWeakOrdering comp); + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), comp(value, *j) + * is \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(value, *i) is \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.end() + * thrust::upper_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator upper_bound(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * *i < value and value < *i are both \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(thrust::device, input.begin(), input.end(), 0); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 1); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 2); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 3); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 8); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 9); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +bool binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * *i < value and value < *i are both \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(input.begin(), input.end(), 0); // returns true + * thrust::binary_search(input.begin(), input.end(), 1); // returns false + * thrust::binary_search(input.begin(), input.end(), 2); // returns true + * thrust::binary_search(input.begin(), input.end(), 3); // returns false + * thrust::binary_search(input.begin(), input.end(), 8); // returns true + * thrust::binary_search(input.begin(), input.end(), 9); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +bool binary_search(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * comp(*i, value) and comp(value, *i) are both \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +bool binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * comp(*i, value) and comp(value, *i) are both \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(input.begin(), input.end(), 0, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 1, thrust::less()); // returns false + * thrust::binary_search(input.begin(), input.end(), 2, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 3, thrust::less()); // returns false + * thrust::binary_search(input.begin(), input.end(), 8, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 9, thrust::less()); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +bool binary_search(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j), where \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), *k < value. \c j is the furthermost + * iterator in [first, last) such that, for every iterator + * \c k in [first, j), value < *k is \c false. + * For every iterator \c k in [i, j), neither + * value < *k nor *k < value is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(thrust::device, input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 9); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +__host__ __device__ +thrust::pair +equal_range(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j), where \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), *k < value. \c j is the furthermost + * iterator in [first, last) such that, for every iterator + * \c k in [first, j), value < *k is \c false. + * For every iterator \c k in [i, j), neither + * value < *k nor *k < value is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end) + * thrust::equal_range(input.begin(), input.end(), 9); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +thrust::pair +equal_range(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j). \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), comp(*k, value) is \c true. + * \c j is the furthermost iterator in [first, last) such + * that, for every iterator \c k in [first, last), + * comp(value, *k) is \c false. For every iterator \c k + * in [i, j), neither comp(value, *k) nor + * comp(*k, value) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns [input.begin() + 4, input.end) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +__host__ __device__ +thrust::pair +equal_range(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j). \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), comp(*k, value) is \c true. + * \c j is the furthermost iterator in [first, last) such + * that, for every iterator \c k in [first, last), + * comp(value, *k) is \c false. For every iterator \c k + * in [i, j), neither comp(value, *k) nor + * comp(*k, value) is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(input.begin(), input.end(), 0, thrust::less()); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 1, thrust::less()); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 2, thrust::less()); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 3, thrust::less()); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 8, thrust::less()); // returns [input.begin() + 4, input.end) + * thrust::equal_range(input.begin(), input.end(), 9, thrust::less()); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +thrust::pair +equal_range(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \addtogroup vectorized_binary_search Vectorized Searches + * \ingroup binary_search + * \{ + */ + + +////////////////////// +// Vector Functions // +////////////////////// + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator lower_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator lower_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of last position where value could + * be inserted without violating the ordering. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of last position where value could + * be inserted without violating the ordering. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator upper_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator upper_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +OutputIterator binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +OutputIterator binary_search(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. This version of \p binary_search uses function object + * \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +OutputIterator binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. This version of \p binary_search uses function object + * \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +OutputIterator binary_search(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \} // end vectorized_binary_search + */ + + +/*! \} // end binary_search + */ + + +/*! \} // end searching + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/complex.h b/miniCUDA124/include/thrust/complex.h new file mode 100644 index 0000000000000000000000000000000000000000..f72661abd8342f39f09badec0d5890800214ef55 --- /dev/null +++ b/miniCUDA124/include/thrust/complex.h @@ -0,0 +1,1055 @@ +/* + * Copyright 2008-2019 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file complex.h + * \brief Complex numbers + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 +# define THRUST_STD_COMPLEX_REAL(z) \ + reinterpret_cast< \ + const typename thrust::detail::remove_reference::type::value_type (&)[2] \ + >(z)[0] +# define THRUST_STD_COMPLEX_IMAG(z) \ + reinterpret_cast< \ + const typename thrust::detail::remove_reference::type::value_type (&)[2] \ + >(z)[1] +# define THRUST_STD_COMPLEX_DEVICE __device__ +#else +# define THRUST_STD_COMPLEX_REAL(z) (z).real() +# define THRUST_STD_COMPLEX_IMAG(z) (z).imag() +# define THRUST_STD_COMPLEX_DEVICE +#endif + +THRUST_NAMESPACE_BEGIN + +/* + * Calls to the standard math library from inside the thrust namespace + * with real arguments require explicit scope otherwise they will fail + * to resolve as it will find the equivalent complex function but then + * fail to match the template, and give up looking for other scopes. + */ + + +/*! \addtogroup numerics + * \{ + */ + +/*! \addtogroup complex_numbers Complex Numbers + * \{ + */ + +/*! \cond + */ + +namespace detail +{ + +template +struct complex_storage; + +#if THRUST_CPP_DIALECT >= 2011 \ + && (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \ + && (THRUST_GCC_VERSION >= 40800) + // C++11 implementation, excluding GCC 4.7, which doesn't have `alignas`. + template + struct complex_storage + { + struct alignas(Align) type { T x; T y; }; + }; +#elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) \ + || ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \ + && (THRUST_GCC_VERSION < 40600)) + // C++03 implementation for MSVC and GCC <= 4.5. + // + // We have to implement `aligned_type` with specializations for MSVC + // and GCC 4.2 and older because they require literals as arguments to + // their alignment attribute. + + #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) + // MSVC implementation. + #define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \ + template \ + struct complex_storage \ + { \ + __declspec(align(X)) struct type { T x; T y; }; \ + }; \ + /**/ + #else + // GCC <= 4.2 implementation. + #define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \ + template \ + struct complex_storage \ + { \ + struct type { T x; T y; } __attribute__((aligned(X))); \ + }; \ + /**/ + #endif + + // The primary template is a fallback, which doesn't specify any alignment. + // It's only used when T is very large and we're using an older compilers + // which we have to fully specialize each alignment case. + template + struct complex_storage + { + T x; T y; + }; + + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(1); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(2); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(4); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(8); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(16); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(32); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(64); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(128); + + #undef THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION +#else + // C++03 implementation for GCC > 4.5, Clang, PGI, ICPC, and xlC. + template + struct complex_storage + { + struct type { T x; T y; } __attribute__((aligned(Align))); + }; +#endif + +} // end namespace detail + +/*! \endcond + */ + +/*! \p complex is the Thrust equivalent to std::complex. It is + * functionally identical to it, but can also be used in device code which + * std::complex currently cannot. + * + * \tparam T The type used to hold the real and imaginary parts. Should be + * float or double. Others types are not supported. + * + */ +template +struct complex +{ +public: + + /*! \p value_type is the type of \p complex's real and imaginary parts. + */ + typedef T value_type; + + + + /* --- Constructors --- */ + + /*! Construct a complex number with an imaginary part of 0. + * + * \param re The real part of the number. + */ + __host__ __device__ + complex(const T& re); + + /*! Construct a complex number from its real and imaginary parts. + * + * \param re The real part of the number. + * \param im The imaginary part of the number. + */ + __host__ __device__ + complex(const T& re, const T& im); + +#if THRUST_CPP_DIALECT >= 2011 + /*! Default construct a complex number. + */ + complex() = default; + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + complex(const complex& z) = default; +#else + /*! Default construct a complex number. + */ + __host__ __device__ + complex(); + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + __host__ __device__ + complex(const complex& z); +#endif + + /*! This converting copy constructor copies from a \p complex with a type + * that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex(const complex& z); + + /*! This converting copy constructor copies from a std::complex with + * a type that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + __host__ THRUST_STD_COMPLEX_DEVICE + complex(const std::complex& z); + + /*! This converting copy constructor copies from a std::complex with + * a type that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ THRUST_STD_COMPLEX_DEVICE + complex(const std::complex& z); + + + + /* --- Assignment Operators --- */ + + /*! Assign `re` to the real part of this \p complex and set the imaginary part + * to 0. + * + * \param re The real part of the number. + */ + __host__ __device__ + complex& operator=(const T& re); + +#if THRUST_CPP_DIALECT >= 2011 + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + complex& operator=(const complex& z) = default; +#else + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + __host__ __device__ + complex& operator=(const complex& z); +#endif + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator=(const complex& z); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + __host__ THRUST_STD_COMPLEX_DEVICE + complex& operator=(const std::complex& z); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ THRUST_STD_COMPLEX_DEVICE + complex& operator=(const std::complex& z); + + + /* --- Compound Assignment Operators --- */ + + /*! Adds a \p complex to this \p complex and assigns the result to this + * \p complex. + * + * \param z The \p complex to be added. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator+=(const complex& z); + + /*! Subtracts a \p complex from this \p complex and assigns the result to + * this \p complex. + * + * \param z The \p complex to be subtracted. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator-=(const complex& z); + + /*! Multiplies this \p complex by another \p complex and assigns the result + * to this \p complex. + * + * \param z The \p complex to be multiplied. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator*=(const complex& z); + + /*! Divides this \p complex by another \p complex and assigns the result to + * this \p complex. + * + * \param z The \p complex to be divided. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator/=(const complex& z); + + /*! Adds a scalar to this \p complex and assigns the result to this + * \p complex. + * + * \param z The \p complex to be added. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator+=(const U& z); + + /*! Subtracts a scalar from this \p complex and assigns the result to + * this \p complex. + * + * \param z The scalar to be subtracted. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator-=(const U& z); + + /*! Multiplies this \p complex by a scalar and assigns the result + * to this \p complex. + * + * \param z The scalar to be multiplied. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator*=(const U& z); + + /*! Divides this \p complex by a scalar and assigns the result to + * this \p complex. + * + * \param z The scalar to be divided. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator/=(const U& z); + + + + /* --- Getter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ + T real() const volatile { return data.x; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ + T imag() const volatile { return data.y; } + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ + T real() const { return data.x; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ + T imag() const { return data.y; } + + + + /* --- Setter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ + void real(T re) volatile { data.x = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex.e + */ + __host__ __device__ + void imag(T im) volatile { data.y = im; } + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ + void real(T re) { data.x = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex. + */ + __host__ __device__ + void imag(T im) { data.y = im; } + + + + /* --- Casting functions --- */ + + /*! Casts this \p complex to a std::complex of the same type. + */ + __host__ + operator std::complex() const { return std::complex(real(), imag()); } + +private: + typename detail::complex_storage::type data; +}; + + +/* --- General Functions --- */ + +/*! Returns the magnitude (also known as absolute value) of a \p complex. + * + * \param z The \p complex from which to calculate the absolute value. + */ +template +__host__ __device__ +T abs(const complex& z); + +/*! Returns the phase angle (also known as argument) in radians of a \p complex. + * + * \param z The \p complex from which to calculate the phase angle. + */ +template +__host__ __device__ +T arg(const complex& z); + +/*! Returns the square of the magnitude of a \p complex. + * + * \param z The \p complex from which to calculate the norm. + */ +template +__host__ __device__ +T norm(const complex& z); + +/*! Returns the complex conjugate of a \p complex. + * + * \param z The \p complex from which to calculate the complex conjugate. + */ +template +__host__ __device__ +complex conj(const complex& z); + +/*! Returns a \p complex with the specified magnitude and phase. + * + * \param m The magnitude of the returned \p complex. + * \param theta The phase of the returned \p complex in radians. + */ +template +__host__ __device__ +complex::type> +polar(const T0& m, const T1& theta = T1()); + +/*! Returns the projection of a \p complex on the Riemann sphere. + * For all finite \p complex it returns the argument. For \p complexs + * with a non finite part returns (INFINITY,+/-0) where the sign of + * the zero matches the sign of the imaginary part of the argument. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex proj(const T& z); + + + +/* --- Binary Arithmetic operators --- */ + +/*! Adds two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +complex::type> +operator+(const complex& x, const complex& y); + +/*! Adds a scalar to a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +complex::type> +operator+(const complex& x, const T1& y); + +/*! Adds a \p complex number to a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +complex::type> +operator+(const T0& x, const complex& y); + +/*! Subtracts two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex (minuend). + * \param y The second \p complex (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const complex& x, const complex& y); + +/*! Subtracts a scalar from a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The \p complex (minuend). + * \param y The scalar (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const complex& x, const T1& y); + +/*! Subtracts a \p complex number from a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar (minuend). + * \param y The \p complex (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const T0& x, const complex& y); + +/*! Multiplies two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +complex::type> +operator*(const complex& x, const complex& y); + +/*! Multiplies a \p complex number by a scalar. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +complex::type> +operator*(const complex& x, const T1& y); + +/*! Multiplies a scalar by a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +complex::type> +operator*(const T0& x, const complex& y); + +/*! Divides two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The numerator (dividend). + * \param y The denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const complex& x, const complex& y); + +/*! Divides a \p complex number by a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The complex numerator (dividend). + * \param y The scalar denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const complex& x, const T1& y); + +/*! Divides a scalar by a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar numerator (dividend). + * \param y The complex denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const T0& x, const complex& y); + + + +/* --- Unary Arithmetic operators --- */ + +/*! Unary plus, returns its \p complex argument. + * + * \param y The \p complex argument. + */ +template +__host__ __device__ +complex +operator+(const complex& y); + +/*! Unary minus, returns the additive inverse (negation) of its \p complex + * argument. + * + * \param y The \p complex argument. + */ +template +__host__ __device__ +complex +operator-(const complex& y); + + + +/* --- Exponential Functions --- */ + +/*! Returns the complex exponential of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex exp(const complex& z); + +/*! Returns the complex natural logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex log(const complex& z); + +/*! Returns the complex base 10 logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex log10(const complex& z); + + + +/* --- Power Functions --- */ + +/*! Returns a \p complex number raised to another. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const complex& x, const complex& y); + +/*! Returns a \p complex number raised to a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const complex& x, const T1& y); + +/*! Returns a scalar raised to a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const T0& x, const complex& y); + +/*! Returns the complex square root of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sqrt(const complex& z); + + +/* --- Trigonometric Functions --- */ + +/*! Returns the complex cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex cos(const complex& z); + +/*! Returns the complex sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sin(const complex& z); + +/*! Returns the complex tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex tan(const complex& z); + + + +/* --- Hyperbolic Functions --- */ + +/*! Returns the complex hyperbolic cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex cosh(const complex& z); + +/*! Returns the complex hyperbolic sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sinh(const complex& z); + +/*! Returns the complex hyperbolic tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex tanh(const complex& z); + + + +/* --- Inverse Trigonometric Functions --- */ + +/*! Returns the complex arc cosine of a \p complex number. + * + * The range of the real part of the result is [0, Pi] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex acos(const complex& z); + +/*! Returns the complex arc sine of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex asin(const complex& z); + +/*! Returns the complex arc tangent of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex atan(const complex& z); + + + +/* --- Inverse Hyperbolic Functions --- */ + +/*! Returns the complex inverse hyperbolic cosine of a \p complex number. + * + * The range of the real part of the result is [0, +inf] and + * the range of the imaginary part is [-Pi, Pi] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex acosh(const complex& z); + +/*! Returns the complex inverse hyperbolic sine of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex asinh(const complex& z); + +/*! Returns the complex inverse hyperbolic tangent of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex atanh(const complex& z); + + + +/* --- Stream Operators --- */ + +/*! Writes to an output stream a \p complex number in the form (real, imaginary). + * + * \param os The output stream. + * \param z The \p complex number to output. + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const complex& z); + +/*! Reads a \p complex number from an input stream. + * + * The recognized formats are: + * - real + * - (real) + * - (real, imaginary) + * + * The values read must be convertible to the \p complex's \c value_type + * + * \param is The input stream. + * \param z The \p complex number to set. + */ +template +__host__ +std::basic_istream& +operator>>(std::basic_istream& is, complex& z); + + + +/* --- Equality Operators --- */ + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +bool operator==(const complex& x, const complex& y); + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator==(const complex& x, const std::complex& y); + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator==(const std::complex& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is zero and + * the real part is equal to the scalar. Returns false otherwise. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +bool operator==(const T0& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is zero and + * the real part is equal to the scalar. Returns false otherwise. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +bool operator==(const complex& x, const T1& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +bool operator!=(const complex& x, const complex& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator!=(const complex& x, const std::complex& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator!=(const std::complex& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +bool operator!=(const T0& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +bool operator!=(const complex& x, const T1& y); + +THRUST_NAMESPACE_END + +#include + +#undef THRUST_STD_COMPLEX_REAL +#undef THRUST_STD_COMPLEX_IMAG +#undef THRUST_STD_COMPLEX_DEVICE + +/*! \} // complex_numbers + */ + +/*! \} // numerics + */ + diff --git a/miniCUDA124/include/thrust/copy.h b/miniCUDA124/include/thrust/copy.h new file mode 100644 index 0000000000000000000000000000000000000000..40006160a117ccb75ad129a7a2a26a7d1d662134 --- /dev/null +++ b/miniCUDA124/include/thrust/copy.h @@ -0,0 +1,520 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file thrust/copy.h + * \brief Copies elements from one range to another + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup copying + * \ingroup algorithms + * \{ + */ + + +/*! \p copy copies elements from the range [\p first, \p last) to the range + * [\p result, \p result + (\p last - \p first)). That is, it performs + * the assignments *\p result = *\p first, *(\p result + \c 1) = *(\p first + \c 1), + * and so on. Generally, for every integer \c n from \c 0 to \p last - \p first, \p copy + * performs the assignment *(\p result + \c n) = *(\p first + \c n). Unlike + * \c std::copy, \p copy offers no guarantee on order of operation. As a result, + * calling \p copy with overlapping source and destination ranges has undefined + * behavior. + * + * The return value is \p result + (\p last - \p first). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to copy. + * \param last The end of the sequence to copy. + * \param result The destination sequence. + * \return The end of the destination sequence. + * \see https://en.cppreference.com/w/cpp/algorithm/copy + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator must be a model of Output Iterator. + * + * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, last) otherwise. + * + * The following code snippet demonstrates how to use \p copy + * to copy from one range to another using the \p thrust::device parallelization policy: + * + * \code + * #include + * #include + * #include + * ... + * + * thrust::device_vector vec0(100); + * thrust::device_vector vec1(100); + * ... + * + * thrust::copy(thrust::device, vec0.begin(), vec0.end(), vec1.begin()); + * + * // vec1 is now a copy of vec0 + * \endcode + */ +template +__host__ __device__ + OutputIterator copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p copy_n copies elements from the range [first, first + n) to the range + * [result, result + n). That is, it performs the assignments *result = *first, *(result + 1) = *(first + 1), + * and so on. Generally, for every integer \c i from \c 0 to \c n, \p copy + * performs the assignment *(\p result + \c i) = *(\p first + \c i). Unlike + * \c std::copy_n, \p copy_n offers no guarantee on order of operation. As a result, + * calling \p copy_n with overlapping source and destination ranges has undefined + * behavior. + * + * The return value is \p result + \p n. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range to copy. + * \param n The number of elements to copy. + * \param result The beginning destination range. + * \return The end of the destination range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. + * \tparam Size is an integral type. + * \tparam OutputIterator must be a model of Output Iterator. + * + * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, first + n) otherwise. + * + * The following code snippet demonstrates how to use \p copy + * to copy from one range to another using the \p thrust::device parallelization policy: + * + * \code + * #include + * #include + * #include + * ... + * size_t n = 100; + * thrust::device_vector vec0(n); + * thrust::device_vector vec1(n); + * ... + * thrust::copy_n(thrust::device, vec0.begin(), n, vec1.begin()); + * + * // vec1 is now a copy of vec0 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/copy_n + * \see thrust::copy + */ +template +__host__ __device__ + OutputIterator copy_n(const thrust::detail::execution_policy_base &exec, + InputIterator first, + Size n, + OutputIterator result); + + + +/*! \p copy copies elements from the range [\p first, \p last) to the range + * [\p result, \p result + (\p last - \p first)). That is, it performs + * the assignments *\p result = *\p first, *(\p result + \c 1) = *(\p first + \c 1), + * and so on. Generally, for every integer \c n from \c 0 to \p last - \p first, \p copy + * performs the assignment *(\p result + \c n) = *(\p first + \c n). Unlike + * \c std::copy, \p copy offers no guarantee on order of operation. As a result, + * calling \p copy with overlapping source and destination ranges has undefined + * behavior. + * + * The return value is \p result + (\p last - \p first). + * + * \param first The beginning of the sequence to copy. + * \param last The end of the sequence to copy. + * \param result The destination sequence. + * \return The end of the destination sequence. + * \see https://en.cppreference.com/w/cpp/algorithm/copy + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator must be a model of Output Iterator. + * + * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, last) otherwise. + * + * The following code snippet demonstrates how to use \p copy + * to copy from one range to another. + * + * \code + * #include + * #include + * ... + * + * thrust::device_vector vec0(100); + * thrust::device_vector vec1(100); + * ... + * + * thrust::copy(vec0.begin(), vec0.end(), + * vec1.begin()); + * + * // vec1 is now a copy of vec0 + * \endcode + */ +template + OutputIterator copy(InputIterator first, + InputIterator last, + OutputIterator result); + +/*! \p copy_n copies elements from the range [first, first + n) to the range + * [result, result + n). That is, it performs the assignments *result = *first, *(result + 1) = *(first + 1), + * and so on. Generally, for every integer \c i from \c 0 to \c n, \p copy + * performs the assignment *(\p result + \c i) = *(\p first + \c i). Unlike + * \c std::copy_n, \p copy_n offers no guarantee on order of operation. As a result, + * calling \p copy_n with overlapping source and destination ranges has undefined + * behavior. + * + * The return value is \p result + \p n. + * + * \param first The beginning of the range to copy. + * \param n The number of elements to copy. + * \param result The beginning destination range. + * \return The end of the destination range. + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. + * \tparam Size is an integral type. + * \tparam OutputIterator must be a model of Output Iterator. + * + * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, first + n) otherwise. + * + * The following code snippet demonstrates how to use \p copy + * to copy from one range to another. + * + * \code + * #include + * #include + * ... + * size_t n = 100; + * thrust::device_vector vec0(n); + * thrust::device_vector vec1(n); + * ... + * thrust::copy_n(vec0.begin(), n, vec1.begin()); + * + * // vec1 is now a copy of vec0 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/copy_n + * \see thrust::copy + */ +template + OutputIterator copy_n(InputIterator first, + Size n, + OutputIterator result); + +/*! \} // end copying + */ + +/*! \addtogroup stream_compaction + * \{ + */ + + +/*! This version of \p copy_if copies elements from the range [first,last) + * to a range beginning at \p result, except that any element which causes \p pred + * to be \c false is not copied. \p copy_if is stable, meaning that the relative + * order of elements that are copied is unchanged. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p copy_if performs the assignment *result = *(first+n) and \p result + * is advanced one position if pred(*(first+n)). Otherwise, no assignment + * occurs and \p result is not advanced. + * + * The algorithm's execution is parallelized as determined by \p system. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence from which to copy. + * \param last The end of the sequence from which to copy. + * \param result The beginning of the sequence into which to copy. + * \param pred The predicate to test on every value of the range [first, last). + * \return result + n, where \c n is equal to the number of times \p pred + * evaluated to \c true in the range [first, last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p copy_if to perform stream compaction + * to copy even numbers to an output range using the \p thrust::host parallelization policy: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[4]; + * + * thrust::copy_if(thrust::host, V, V + N, result, is_even()); + * + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-2, 0, 0, 2} + * \endcode + * + * \see \c remove_copy_if + */ +template +__host__ __device__ + OutputIterator copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + Predicate pred); + + + +/*! This version of \p copy_if copies elements from the range [first,last) + * to a range beginning at \p result, except that any element which causes \p pred + * to \c false is not copied. \p copy_if is stable, meaning that the relative + * order of elements that are copied is unchanged. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p copy_if performs the assignment *result = *(first+n) and \p result + * is advanced one position if pred(*(first+n)). Otherwise, no assignment + * occurs and \p result is not advanced. + * + * \param first The beginning of the sequence from which to copy. + * \param last The end of the sequence from which to copy. + * \param result The beginning of the sequence into which to copy. + * \param pred The predicate to test on every value of the range [first, last). + * \return result + n, where \c n is equal to the number of times \p pred + * evaluated to \c true in the range [first, last). + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p copy_if to perform stream compaction + * to copy even numbers to an output range. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[4]; + * + * thrust::copy_if(V, V + N, result, is_even()); + * + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-2, 0, 0, 2} + * \endcode + * + * \see \c remove_copy_if + */ +template + OutputIterator copy_if(InputIterator first, + InputIterator last, + OutputIterator result, + Predicate pred); + + +/*! This version of \p copy_if copies elements from the range [first,last) + * to a range beginning at \p result, except that any element whose corresponding stencil + * element causes \p pred to be \c false is not copied. \p copy_if is stable, meaning + * that the relative order of elements that are copied is unchanged. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p copy_if performs the assignment *result = *(first+n) and \p result + * is advanced one position if pred(*(stencil+n)). Otherwise, no assignment + * occurs and \p result is not advanced. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence from which to copy. + * \param last The end of the sequence from which to copy. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the sequence into which to copy. + * \param pred The predicate to test on every value of the range [stencil, stencil + (last-first)). + * \return result + n, where \c n is equal to the number of times \p pred + * evaluated to \c true in the range [stencil, stencil + (last-first)). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. + * \pre The ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p copy_if to perform stream compaction + * to copy numbers to an output range when corresponding stencil elements are even using the \p thrust::host execution policy: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int N = 6; + * int data[N] = { 0, 1, 2, 3, 4, 5}; + * int stencil[N] = {-2, 0, -1, 0, 1, 2}; + * int result[4]; + * + * thrust::copy_if(thrust::host, data, data + N, stencil, result, is_even()); + * + * // data remains = { 0, 1, 2, 3, 4, 5}; + * // stencil remains = {-2, 0, -1, 0, 1, 2}; + * // result is now { 0, 1, 3, 5} + * \endcode + * + * \see \c remove_copy_if + */ +template +__host__ __device__ + OutputIterator copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred); + + +/*! This version of \p copy_if copies elements from the range [first,last) + * to a range beginning at \p result, except that any element whose corresponding stencil + * element causes \p pred to be \c false is not copied. \p copy_if is stable, meaning + * that the relative order of elements that are copied is unchanged. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p copy_if performs the assignment *result = *(first+n) and \p result + * is advanced one position if pred(*(stencil+n)). Otherwise, no assignment + * occurs and \p result is not advanced. + * + * \param first The beginning of the sequence from which to copy. + * \param last The end of the sequence from which to copy. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the sequence into which to copy. + * \param pred The predicate to test on every value of the range [stencil, stencil + (last-first)). + * \return result + n, where \c n is equal to the number of times \p pred + * evaluated to \c true in the range [stencil, stencil + (last-first)). + * + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. + * \pre The ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p copy_if to perform stream compaction + * to copy numbers to an output range when corresponding stencil elements are even: + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int N = 6; + * int data[N] = { 0, 1, 2, 3, 4, 5}; + * int stencil[N] = {-2, 0, -1, 0, 1, 2}; + * int result[4]; + * + * thrust::copy_if(data, data + N, stencil, result, is_even()); + * + * // data remains = { 0, 1, 2, 3, 4, 5}; + * // stencil remains = {-2, 0, -1, 0, 1, 2}; + * // result is now { 0, 1, 3, 5} + * \endcode + * + * \see \c remove_copy_if + */ +template + OutputIterator copy_if(InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred); + +/*! \} // end stream_compaction + */ + +THRUST_NAMESPACE_END + +#include +#include + diff --git a/miniCUDA124/include/thrust/count.h b/miniCUDA124/include/thrust/count.h new file mode 100644 index 0000000000000000000000000000000000000000..899f88e5ca27ff2d1620c74b293eac7bbc763bb6 --- /dev/null +++ b/miniCUDA124/include/thrust/count.h @@ -0,0 +1,239 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file count.h + * \brief Counting elements in a range + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup reductions + * \ingroup algorithms + * \{ + */ + +/*! \addtogroup counting + * \ingroup reductions + * \{ + */ + + +/*! \p count finds the number of elements in [first,last) that are equal + * to \p value. More precisely, \p count returns the number of iterators \c i in + * [first, last) such that *i == value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be counted. + * \return The number of elements equal to \p value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. + * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type + * + * The following code snippet demonstrates how to use \p count to + * count the number of instances in a range of a value of interest using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * // put 3 1s in a device_vector + * thrust::device_vector vec(5,0); + * vec[1] = 1; + * vec[3] = 1; + * vec[4] = 1; + * + * // count the 1s + * int result = thrust::count(thrust::device, vec.begin(), vec.end(), 1); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + count(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, const EqualityComparable& value); + + + +/*! \p count finds the number of elements in [first,last) that are equal + * to \p value. More precisely, \p count returns the number of iterators \c i in + * [first, last) such that *i == value. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be counted. + * \return The number of elements equal to \p value. + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. + * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type + * + * The following code snippet demonstrates how to use \p count to + * count the number of instances in a range of a value of interest. + * \code + * #include + * #include + * ... + * // put 3 1s in a device_vector + * thrust::device_vector vec(5,0); + * vec[1] = 1; + * vec[3] = 1; + * vec[4] = 1; + * + * // count the 1s + * int result = thrust::count(vec.begin(), vec.end(), 1); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template + typename thrust::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const EqualityComparable& value); + + +/*! \p count_if finds the number of elements in [first,last) for which + * a predicate is \c true. More precisely, \p count_if returns the number of iterators + * \c i in [first, last) such that pred(*i) == true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param pred The predicate. + * \return The number of elements where \p pred is \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam Predicate must be a model of Predicate. + * + * The following code snippet demonstrates how to use \p count to + * count the number of odd numbers in a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int &x) + * { + * return x & 1; + * } + * }; + * ... + * // fill a device_vector with even & odd numbers + * thrust::device_vector vec(5); + * vec[0] = 0; + * vec[1] = 1; + * vec[2] = 2; + * vec[3] = 3; + * vec[4] = 4; + * + * // count the odd elements in vec + * int result = thrust::count_if(thrust::device, vec.begin(), vec.end(), is_odd()); + * // result == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + count_if(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, Predicate pred); + + +/*! \p count_if finds the number of elements in [first,last) for which + * a predicate is \c true. More precisely, \p count_if returns the number of iterators + * \c i in [first, last) such that pred(*i) == true. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param pred The predicate. + * \return The number of elements where \p pred is \c true. + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam Predicate must be a model of Predicate. + * + * The following code snippet demonstrates how to use \p count to + * count the number of odd numbers in a range. + * \code + * #include + * #include + * ... + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int &x) + * { + * return x & 1; + * } + * }; + * ... + * // fill a device_vector with even & odd numbers + * thrust::device_vector vec(5); + * vec[0] = 0; + * vec[1] = 1; + * vec[2] = 2; + * vec[3] = 3; + * vec[4] = 4; + * + * // count the odd elements in vec + * int result = thrust::count_if(vec.begin(), vec.end(), is_odd()); + * // result == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template + typename thrust::iterator_traits::difference_type + count_if(InputIterator first, InputIterator last, Predicate pred); + + +/*! \} // end counting + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include diff --git a/miniCUDA124/include/thrust/device_allocator.h b/miniCUDA124/include/thrust/device_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..14f63baa7868a32c5f8acb67a8212f78bccd9352 --- /dev/null +++ b/miniCUDA124/include/thrust/device_allocator.h @@ -0,0 +1,148 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file + * \brief An allocator which creates new elements in memory accessible by + * devices. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/** \addtogroup allocators Allocators + * \ingroup memory_management + * \{ + */ + +/*! Memory resource adaptor that turns any memory resource that returns a fancy + * with the same tag as \p device_ptr, and adapts it to a resource that returns + * a \p device_ptr. + */ +template +class device_ptr_memory_resource final + : public thrust::mr::memory_resource< + device_ptr + > +{ + typedef typename Upstream::pointer upstream_ptr; + +public: + /*! Initialize the adaptor with the global instance of the upstream resource. Obtains + * the global instance by calling \p get_global_resource. + */ + __host__ + device_ptr_memory_resource() : m_upstream(mr::get_global_resource()) + { + } + + /*! Initialize the adaptor with an upstream resource. + * + * \param upstream the upstream memory resource to adapt. + */ + __host__ + device_ptr_memory_resource(Upstream * upstream) : m_upstream(upstream) + { + } + + THRUST_NODISCARD __host__ + virtual pointer do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) override + { + return pointer(m_upstream->do_allocate(bytes, alignment).get()); + } + + __host__ + virtual void do_deallocate(pointer p, std::size_t bytes, std::size_t alignment) override + { + m_upstream->do_deallocate(upstream_ptr(p.get()), bytes, alignment); + } + +private: + Upstream * m_upstream; +}; + +/*! \brief An allocator which creates new elements in memory accessible by + * devices. + * + * \see https://en.cppreference.com/w/cpp/named_req/Allocator + */ +template +class device_allocator + : public thrust::mr::stateless_resource_allocator< + T, + device_ptr_memory_resource + > +{ + typedef thrust::mr::stateless_resource_allocator< + T, + device_ptr_memory_resource + > base; + +public: + /*! The \p rebind metafunction provides the type of a \p device_allocator + * instantiated with another type. + * + * \tparam U the other type to use for instantiation. + */ + template + struct rebind + { + /*! The typedef \p other gives the type of the rebound \p device_allocator. + */ + typedef device_allocator other; + }; + + /*! Default constructor has no effect. */ + __host__ __device__ + device_allocator() {} + + /*! Copy constructor has no effect. */ + __host__ __device__ + device_allocator(const device_allocator& other) : base(other) {} + + /*! Constructor from other \p device_allocator has no effect. */ + template + __host__ __device__ + device_allocator(const device_allocator& other) : base(other) {} + + device_allocator & operator=(const device_allocator &) = default; + + /*! Destructor has no effect. */ + __host__ __device__ + ~device_allocator() {} +}; + +/*! \} // allocators + */ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/device_delete.h b/miniCUDA124/include/thrust/device_delete.h new file mode 100644 index 0000000000000000000000000000000000000000..c0a55076206774c57987211b05ae507a53527847 --- /dev/null +++ b/miniCUDA124/include/thrust/device_delete.h @@ -0,0 +1,61 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief Deletes variables in device memory. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_delete deletes a \p device_ptr allocated with + * \p device_new. + * + * \param ptr The \p device_ptr to delete, assumed to have + * been allocated with \p device_new. + * \param n The number of objects to destroy at \p ptr. Defaults to \c 1 + * similar to \p device_new. + * + * \see device_ptr + * \see device_new + */ +template + inline void device_delete(thrust::device_ptr ptr, + const size_t n = 1); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/device_free.h b/miniCUDA124/include/thrust/device_free.h new file mode 100644 index 0000000000000000000000000000000000000000..d16957ef0a9656673ac819b8b3564eb861c18d7d --- /dev/null +++ b/miniCUDA124/include/thrust/device_free.h @@ -0,0 +1,73 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief Deallocates storage allocated by \p device_malloc. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_free deallocates memory allocated by the function \p device_malloc. + * + * \param ptr A \p device_ptr pointing to memory to be deallocated. + * + * The following code snippet demonstrates how to use \p device_free to + * deallocate memory allocated by \p device_malloc. + * + * \code + * #include + * #include + * ... + * // allocate some integers with device_malloc + * const int N = 100; + * thrust::device_ptr int_array = thrust::device_malloc(N); + * + * // manipulate integers + * ... + * + * // deallocate with device_free + * thrust::device_free(int_array); + * \endcode + * + * \see device_ptr + * \see device_malloc + */ +inline void device_free(thrust::device_ptr ptr); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/device_make_unique.h b/miniCUDA124/include/thrust/device_make_unique.h new file mode 100644 index 0000000000000000000000000000000000000000..3d493690b8ff13cb63019ebfe1f5fb8d3d8e0405 --- /dev/null +++ b/miniCUDA124/include/thrust/device_make_unique.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file device_make_unique.h + * \brief A factory function for creating `unique_ptr`s to device objects. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// + +template +__host__ +auto device_make_unique(Args&&... args) + THRUST_TRAILING_RETURN(decltype( + uninitialized_allocate_unique(device_allocator{}) + )) +{ +#if !defined(THRUST_DOXYGEN) // This causes Doxygen to choke for some reason. + // FIXME: This is crude - we construct an unnecessary T on the host for + // `device_new`. We need a proper dispatched `construct` algorithm to + // do this properly. + auto p = uninitialized_allocate_unique(device_allocator{}); + device_new(p.get(), T(THRUST_FWD(args)...)); + return p; +#endif +} + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END + +#endif // THRUST_CPP_DIALECT >= 2011 diff --git a/miniCUDA124/include/thrust/device_malloc.h b/miniCUDA124/include/thrust/device_malloc.h new file mode 100644 index 0000000000000000000000000000000000000000..be9e303138a32a3202448073281f85c2ff3d2b16 --- /dev/null +++ b/miniCUDA124/include/thrust/device_malloc.h @@ -0,0 +1,108 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief Allocates storage in device memory. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include // for std::size_t + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! This version of \p device_malloc allocates sequential device storage + * for bytes. + * + * \param n The number of bytes to allocate sequentially + * in device memory. + * \return A \p device_ptr to the newly allocated memory. + * + * The following code snippet demonstrates how to use \p device_malloc to + * allocate a range of device memory. + * + * \code + * #include + * #include + * ... + * // allocate some memory with device_malloc + * const int N = 100; + * thrust::device_ptr void_ptr = thrust::device_malloc(N); + * + * // manipulate memory + * ... + * + * // deallocate with device_free + * thrust::device_free(void_ptr); + * \endcode + * + * \see device_ptr + * \see device_free + */ +inline thrust::device_ptr device_malloc(const std::size_t n); + +/*! This version of \p device_malloc allocates sequential device storage for + * new objects of the given type. + * + * \param n The number of objects of type T to allocate + * sequentially in device memory. + * \return A \p device_ptr to the newly allocated memory. + * + * The following code snippet demonstrates how to use \p device_malloc to + * allocate a range of device memory. + * + * \code + * #include + * #include + * ... + * // allocate some integers with device_malloc + * const int N = 100; + * thrust::device_ptr int_array = thrust::device_malloc(N); + * + * // manipulate integers + * ... + * + * // deallocate with device_free + * thrust::device_free(int_array); + * \endcode + * + * \see device_ptr + * \see device_free + */ +template + inline thrust::device_ptr device_malloc(const std::size_t n); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/miniCUDA124/include/thrust/device_malloc_allocator.h b/miniCUDA124/include/thrust/device_malloc_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..c4170e99d74588ea0696212527d50b34e65d6ca3 --- /dev/null +++ b/miniCUDA124/include/thrust/device_malloc_allocator.h @@ -0,0 +1,188 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief An allocator which allocates storage with \p device_malloc. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +// forward declarations to WAR circular #includes +template class device_ptr; +template device_ptr device_malloc(const std::size_t n); + +/*! \addtogroup allocators Allocators + * \ingroup memory_management + * \{ + */ + +/*! \p device_malloc_allocator is a device memory allocator that employs the + * \p device_malloc function for allocation. + * + * \p device_malloc_allocator is deprecated in favor of thrust::mr + * memory resource-based allocators. + * + * \see device_malloc + * \see device_ptr + * \see device_allocator + * \see https://en.cppreference.com/w/cpp/memory/allocator + */ +template + class device_malloc_allocator +{ + public: + /*! Type of element allocated, \c T. */ + typedef T value_type; + + /*! Pointer to allocation, \c device_ptr. */ + typedef device_ptr pointer; + + /*! \c const pointer to allocation, \c device_ptr. */ + typedef device_ptr const_pointer; + + /*! Reference to allocated element, \c device_reference. */ + typedef device_reference reference; + + /*! \c const reference to allocated element, \c device_reference. */ + typedef device_reference const_reference; + + /*! Type of allocation size, \c std::size_t. */ + typedef std::size_t size_type; + + /*! Type of allocation difference, \c pointer::difference_type. */ + typedef typename pointer::difference_type difference_type; + + /*! The \p rebind metafunction provides the type of a \p device_malloc_allocator + * instantiated with another type. + * + * \tparam U The other type to use for instantiation. + */ + template + struct rebind + { + /*! The typedef \p other gives the type of the rebound \p device_malloc_allocator. + */ + typedef device_malloc_allocator other; + }; // end rebind + + /*! No-argument constructor has no effect. */ + __host__ __device__ + inline device_malloc_allocator() {} + + /*! No-argument destructor has no effect. */ + __host__ __device__ + inline ~device_malloc_allocator() {} + + /*! Copy constructor has no effect. */ + __host__ __device__ + inline device_malloc_allocator(device_malloc_allocator const&) {} + + /*! Constructor from other \p device_malloc_allocator has no effect. */ + template + __host__ __device__ + inline device_malloc_allocator(device_malloc_allocator const&) {} + +#if THRUST_CPP_DIALECT >= 2011 + device_malloc_allocator & operator=(const device_malloc_allocator &) = default; +#endif + + /*! Returns the address of an allocated object. + * \return &r. + */ + __host__ __device__ + inline pointer address(reference r) { return &r; } + + /*! Returns the address an allocated object. + * \return &r. + */ + __host__ __device__ + inline const_pointer address(const_reference r) { return &r; } + + /*! Allocates storage for \p cnt objects. + * \param cnt The number of objects to allocate. + * \return A \p pointer to uninitialized storage for \p cnt objects. + * \note Memory allocated by this function must be deallocated with \p deallocate. + */ + __host__ + inline pointer allocate(size_type cnt, + const_pointer = const_pointer(static_cast(0))) + { + if(cnt > this->max_size()) + { + throw std::bad_alloc(); + } // end if + + return pointer(device_malloc(cnt)); + } // end allocate() + + /*! Deallocates storage for objects allocated with \p allocate. + * \param p A \p pointer to the storage to deallocate. + * \param cnt The size of the previous allocation. + * \note Memory deallocated by this function must previously have been + * allocated with \p allocate. + */ + __host__ + inline void deallocate(pointer p, size_type cnt) + { + // silence unused parameter warning while still leaving the parameter name for Doxygen + (void)(cnt); + + device_free(p); + } // end deallocate() + + /*! Returns the largest value \c n for which allocate(n) might succeed. + * \return The largest value \c n for which allocate(n) might succeed. + */ + inline size_type max_size() const + { + return (std::numeric_limits::max)() / sizeof(T); + } // end max_size() + + /*! Compares against another \p device_malloc_allocator for equality. + * \return \c true + */ + __host__ __device__ + inline bool operator==(device_malloc_allocator const&) const { return true; } + + /*! Compares against another \p device_malloc_allocator for inequality. + * \return \c false + */ + __host__ __device__ + inline bool operator!=(device_malloc_allocator const &a) const {return !operator==(a); } +}; // end device_malloc_allocator + +/*! \} // allocators + */ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/device_new.h b/miniCUDA124/include/thrust/device_new.h new file mode 100644 index 0000000000000000000000000000000000000000..29c0fafbbc5d7201d837b89ae203bd16118beead --- /dev/null +++ b/miniCUDA124/include/thrust/device_new.h @@ -0,0 +1,94 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file device_new.h + * \brief Constructs new elements in device memory + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +// #include this for size_t +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! + * \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_new implements the placement \c new operator for types + * resident in device memory. \p device_new calls T's null + * constructor on a array of objects in device memory. + * No memory is allocated by this function. + * + * \param p A \p device_ptr to a region of device memory into which + * to construct one or many Ts. + * \param n The number of objects to construct at \p p. + * \return p, casted to T's type. + * + * \see device_ptr + */ +template + device_ptr device_new(device_ptr p, + const size_t n = 1); + +/*! \p device_new implements the placement new operator for types + * resident in device memory. \p device_new calls T's copy + * constructor on a array of objects in device memory. No memory is + * allocated by this function. + * + * \param p A \p device_ptr to a region of device memory into which to + * construct one or many Ts. + * \param exemplar The value from which to copy. + * \param n The number of objects to construct at \p p. + * \return p, casted to T's type. + * + * \see device_ptr + * \see fill + */ +template + device_ptr device_new(device_ptr p, + const T &exemplar, + const size_t n = 1); + +/*! \p device_new implements the new operator for types resident in device memory. + * It allocates device memory large enough to hold \p n new objects of type \c T. + * + * \param n The number of objects to allocate. Defaults to \c 1. + * \return A \p device_ptr to the newly allocated region of device memory. + */ +template + device_ptr device_new(const size_t n = 1); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include diff --git a/miniCUDA124/include/thrust/device_new_allocator.h b/miniCUDA124/include/thrust/device_new_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..7b3e6ea203e30073a6c986bc7a82683b7fc28940 --- /dev/null +++ b/miniCUDA124/include/thrust/device_new_allocator.h @@ -0,0 +1,180 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief An allocator which allocates storage with \p device_new. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include +#include + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup allocators Allocators + * \ingroup memory_management + * \{ + */ + +/*! \p device_new_allocator is a device memory allocator that employs the + * \p device_new function for allocation. + * + * \see device_new + * \see device_ptr + * \see https://en.cppreference.com/w/cpp/memory/allocator + */ +template + class device_new_allocator +{ + public: + /*! Type of element allocated, \c T. */ + typedef T value_type; + + /*! Pointer to allocation, \c device_ptr. */ + typedef device_ptr pointer; + + /*! \c const pointer to allocation, \c device_ptr. */ + typedef device_ptr const_pointer; + + /*! Reference to allocated element, \c device_reference. */ + typedef device_reference reference; + + /*! \c const reference to allocated element, \c device_reference. */ + typedef device_reference const_reference; + + /*! Type of allocation size, \c ::cuda::std::size_t. */ + typedef ::cuda::std::size_t size_type; + + /*! Type of allocation difference, \c pointer::difference_type. */ + typedef typename pointer::difference_type difference_type; + + /*! The \p rebind metafunction provides the type of a \p device_new_allocator + * instantiated with another type. + * + * \tparam U The other type to use for instantiation. + */ + template + struct rebind + { + /*! The typedef \p other gives the type of the rebound \p device_new_allocator. + */ + typedef device_new_allocator other; + }; // end rebind + + /*! No-argument constructor has no effect. */ + __host__ __device__ + inline device_new_allocator() {} + + /*! No-argument destructor has no effect. */ + __host__ __device__ + inline ~device_new_allocator() {} + + /*! Copy constructor has no effect. */ + __host__ __device__ + inline device_new_allocator(device_new_allocator const&) {} + + /*! Constructor from other \p device_malloc_allocator has no effect. */ + template + __host__ __device__ + inline device_new_allocator(device_new_allocator const&) {} + + /*! Returns the address of an allocated object. + * \return &r. + */ + __host__ __device__ + inline pointer address(reference r) { return &r; } + + /*! Returns the address an allocated object. + * \return &r. + */ + __host__ __device__ + inline const_pointer address(const_reference r) { return &r; } + + /*! Allocates storage for \p cnt objects. + * \param cnt The number of objects to allocate. + * \return A \p pointer to uninitialized storage for \p cnt objects. + * \note Memory allocated by this function must be deallocated with \p deallocate. + */ + __host__ + inline pointer allocate(size_type cnt, + const_pointer = const_pointer(static_cast(0))) + { + if(cnt > this->max_size()) + { + throw std::bad_alloc(); + } // end if + + // use "::operator new" rather than keyword new + return pointer(device_new(cnt)); + } // end allocate() + + /*! Deallocates storage for objects allocated with \p allocate. + * \param p A \p pointer to the storage to deallocate. + * \param cnt The size of the previous allocation. + * \note Memory deallocated by this function must previously have been + * allocated with \p allocate. + */ + __host__ + inline void deallocate(pointer p, size_type cnt) + { + // use "::operator delete" rather than keyword delete + (void)cnt; + device_delete(p); + } // end deallocate() + + /*! Returns the largest value \c n for which allocate(n) might succeed. + * \return The largest value \c n for which allocate(n) might succeed. + */ + __host__ __device__ + inline size_type max_size() const + { + return ::cuda::std::numeric_limits::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T); + } // end max_size() + + /*! Compares against another \p device_malloc_allocator for equality. + * \return \c true + */ + __host__ __device__ + inline bool operator==(device_new_allocator const&) { return true; } + + /*! Compares against another \p device_malloc_allocator for inequality. + * \return \c false + */ + __host__ __device__ + inline bool operator!=(device_new_allocator const &a) {return !operator==(a); } +}; // end device_new_allocator + +/*! \} // allocators + */ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/device_ptr.h b/miniCUDA124/include/thrust/device_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..f014dbb8003e0cd2ab7a188a0f73da2ccaf16bd4 --- /dev/null +++ b/miniCUDA124/include/thrust/device_ptr.h @@ -0,0 +1,219 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief A pointer to an object which resides in memory associated with the + * \c device system. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +template class device_reference; + +/*! \brief \c device_ptr is a pointer-like object which points to an object that + * resides in memory associated with the \ref device system. + * + * \c device_ptr has pointer semantics: it may be dereferenced safely from + * anywhere, including the \ref host, and may be manipulated with pointer + * arithmetic. + * + * \c device_ptr can be created with \ref device_new, \ref device_malloc, + * \ref device_malloc_allocator, \ref device_allocator, or + * \ref device_pointer_cast, or by explicitly calling its constructor with a + * raw pointer. + * + * The raw pointer contained in a \c device_ptr may be obtained via \c get + * member function or the \ref raw_pointer_cast free function. + * + * \ref algorithms operating on \c device_ptr types will automatically be + * dispatched to the \ref device system. + * + * \note \c device_ptr is not a smart pointer; it is the programmer's + * responsibility to deallocate memory pointed to by \c device_ptr. + * + * \see device_new + * \see device_malloc + * \see device_malloc_allocator + * \see device_allocator + * \see device_pointer_cast + * \see raw_pointer_cast + */ +template +class device_ptr + : public thrust::pointer< + T, + thrust::device_system_tag, + thrust::device_reference, + thrust::device_ptr + > +{ + private: + using super_t = thrust::pointer< + T, + thrust::device_system_tag, + thrust::device_reference, + thrust::device_ptr + >; + + public: + /*! \brief Construct a null \c device_ptr. + * + * \post get() == nullptr. + */ + __host__ __device__ + device_ptr() : super_t() {} + + /*! \brief Construct a null \c device_ptr. + * + * \param ptr A null pointer. + * + * \post get() == nullptr. + */ + __host__ __device__ + device_ptr(std::nullptr_t) : super_t(nullptr) {} + + /*! \brief Construct a \c device_ptr from a raw pointer which is + * convertible to \c T*. + * + * \tparam U A type whose pointer is convertible to \c T*. + * \param ptr A raw pointer to a \c U in device memory to construct from. + * + * \pre std::is_convertible_v == true. + * + * \pre \c ptr points to a location in device memory. + * + * \post get() == nullptr. + */ + template + __host__ __device__ + explicit device_ptr(U* ptr) : super_t(ptr) {} + + /*! \brief Copy construct a \c device_ptr from another \c device_ptr whose + * pointer type is convertible to \c T*. + * + * \tparam U A type whose pointer is convertible to \c T*. + * \param other A \c device_ptr to a \c U to construct from. + * + * \pre std::is_convertible_v == true. + * + * \post get() == other.get(). + */ + template + __host__ __device__ + device_ptr(device_ptr const& other) : super_t(other) {} + + /*! \brief Set this \c device_ptr to point to the same object as another + * \c device_ptr whose pointer type is convertible to \c T*. + * + * \tparam U A type whose pointer is convertible to \c T*. + * \param other A \c device_ptr to a \c U to assign from. + * + * \pre std::is_convertible_v == true. + * + * \post get() == other.get(). + * + * \return \c *this. + */ + template + __host__ __device__ + device_ptr &operator=(device_ptr const& other) + { + super_t::operator=(other); + return *this; + } + + /*! \brief Set this \c device_ptr to null. + * + * \param ptr A null pointer. + * + * \post get() == nullptr. + * + * \return \c *this. + */ + __host__ __device__ + device_ptr& operator=(std::nullptr_t) + { + super_t::operator=(nullptr); + return *this; + } + +#if THRUST_DOXYGEN + /*! \brief Return the raw pointer that this \c device_ptr points to. + */ + __host__ __device__ + T* get() const; +#endif +}; + +#if THRUST_DOXYGEN +/*! Write the address that a \c device_ptr points to to an output stream. + * + * \param os The output stream. + * \param dp The \c device_ptr to output. + * + * \return \c os. + */ +template +__host__ std::basic_ostream& +operator<<(std::basic_ostream& os, device_ptr const& dp); +#endif + +/*! \brief Create a \c device_ptr from a raw pointer. + * + * \tparam T Any type. + * \param ptr A raw pointer to a \c T in device memory. + * + * \pre \c ptr points to a location in device memory. + * + * \return A \c device_ptr pointing to \c ptr. + */ +template +__host__ __device__ +device_ptr device_pointer_cast(T* ptr); + +/*! \brief Create a \c device_ptr from another \c device_ptr. + * + * \tparam T Any type. + * \param dptr A \c device_ptr to a \c T. + */ +template +__host__ __device__ +device_ptr device_pointer_cast(device_ptr const& dptr); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include +#include diff --git a/miniCUDA124/include/thrust/device_reference.h b/miniCUDA124/include/thrust/device_reference.h new file mode 100644 index 0000000000000000000000000000000000000000..ddde5bf506d995d6dff05531da7eb6d94f43cc4c --- /dev/null +++ b/miniCUDA124/include/thrust/device_reference.h @@ -0,0 +1,995 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief A reference to an object which resides in memory associated with the + * device system. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_reference acts as a reference-like object to an object stored in device memory. + * \p device_reference is not intended to be used directly; rather, this type + * is the result of deferencing a \p device_ptr. Similarly, taking the address of + * a \p device_reference yields a \p device_ptr. + * + * \p device_reference may often be used from host code in place of operations defined on + * its associated \c value_type. For example, when \p device_reference refers to an + * arithmetic type, arithmetic operations on it are legal: + * + * \code + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * thrust::device_reference ref_to_thirteen = vec[0]; + * + * int x = ref_to_thirteen + 1; + * + * // x is 14 + * + * return 0; + * } + * \endcode + * + * Similarly, we can print the value of \c ref_to_thirteen in the above code by using an + * \c iostream: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * thrust::device_reference ref_to_thirteen = vec[0]; + * + * std::cout << ref_to_thirteen << std::endl; + * + * // 13 is printed + * + * return 0; + * } + * \endcode + * + * Of course, we needn't explicitly create a \p device_reference in the previous + * example, because one is returned by \p device_vector's bracket operator. A more natural + * way to print the value of a \p device_vector element might be: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * std::cout << vec[0] << std::endl; + * + * // 13 is printed + * + * return 0; + * } + * \endcode + * + * These kinds of operations should be used sparingly in performance-critical code, because + * they imply a potentially expensive copy between host and device space. + * + * Some operations which are possible with regular objects are impossible with their + * corresponding \p device_reference objects due to the requirements of the C++ language. For + * example, because the member access operator cannot be overloaded, member variables and functions + * of a referent object cannot be directly accessed through its \p device_reference. + * + * The following code, which generates a compiler error, illustrates: + * + * \code + * #include + * + * struct foo + * { + * int x; + * }; + * + * int main(void) + * { + * thrust::device_vector foo_vec(1); + * + * thrust::device_reference foo_ref = foo_vec[0]; + * + * foo_ref.x = 13; // ERROR: x cannot be accessed through foo_ref + * + * return 0; + * } + * \endcode + * + * Instead, a host space copy must be created to access \c foo's \c x member: + * + * \code + * #include + * + * struct foo + * { + * int x; + * }; + * + * int main(void) + * { + * thrust::device_vector foo_vec(1); + * + * // create a local host-side foo object + * foo host_foo; + * host_foo.x = 13; + * + * thrust::device_reference foo_ref = foo_vec[0]; + * + * foo_ref = host_foo; + * + * // foo_ref's x member is 13 + * + * return 0; + * } + * \endcode + * + * Another common case where a \p device_reference cannot directly be used in place of + * its referent object occurs when passing them as parameters to functions like \c printf + * which have varargs parameters. Because varargs parameters must be Plain Old Data, a + * \p device_reference to a POD type requires a cast when passed to \c printf: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1,13); + * + * // vec[0] must be cast to int when passing to printf + * printf("%d\n", (int) vec[0]); + * + * return 0; + * } + * \endcode + * + * \see device_ptr + * \see device_vector + */ +template + class device_reference + : public thrust::reference< + T, + thrust::device_ptr, + thrust::device_reference + > +{ + private: + typedef thrust::reference< + T, + thrust::device_ptr, + thrust::device_reference + > super_t; + + public: + /*! The type of the value referenced by this type of \p device_reference. + */ + typedef typename super_t::value_type value_type; + + /*! The type of the expression &ref, where ref is a \p device_reference. + */ + typedef typename super_t::pointer pointer; + + /*! This copy constructor accepts a const reference to another + * \p device_reference. After this \p device_reference is constructed, + * it shall refer to the same object as \p other. + * + * \param other A \p device_reference to copy from. + * + * The following code snippet demonstrates the semantics of this + * copy constructor. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_reference ref = v[0]; + * + * // ref equals the object at v[0] + * assert(ref == v[0]); + * + * // the address of ref equals the address of v[0] + * assert(&ref == &v[0]); + * + * // modifying v[0] modifies ref + * v[0] = 13; + * assert(ref == 13); + * \endcode + * + * \note This constructor is templated primarily to allow initialization of + * device_reference from device_reference. + */ + template + __host__ __device__ + device_reference(const device_reference &other, + typename thrust::detail::enable_if_convertible< + typename device_reference::pointer, + pointer + >::type * = 0) + : super_t(other) + {} + + /*! This copy constructor initializes this \p device_reference + * to refer to an object pointed to by the given \p device_ptr. After + * this \p device_reference is constructed, it shall refer to the + * object pointed to by \p ptr. + * + * \param ptr A \p device_ptr to copy from. + * + * The following code snippet demonstrates the semantic of this + * copy constructor. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals the object pointed to by ptr + * assert(ref == *ptr); + * + * // the address of ref equals ptr + * assert(&ref == ptr); + * + * // modifying *ptr modifies ref + * *ptr = 13; + * assert(ref == 13); + * \endcode + */ + __host__ __device__ + explicit device_reference(const pointer &ptr) + : super_t(ptr) + {} + + /*! This assignment operator assigns the value of the object referenced by + * the given \p device_reference to the object referenced by this + * \p device_reference. + * + * \param other The \p device_reference to assign from. + * \return *this + */ + template + __host__ __device__ + device_reference &operator=(const device_reference &other) + { + return super_t::operator=(other); + } + + /*! Assignment operator assigns the value of the given value to the + * value referenced by this \p device_reference. + * + * \param x The value to assign from. + * \return *this + */ + __host__ __device__ + device_reference &operator=(const value_type &x) + { + return super_t::operator=(x); + } + +// declare these members for the purpose of Doxygenating them +// they actually exist in a derived-from class +#if 0 + /*! Address-of operator returns a \p device_ptr pointing to the object + * referenced by this \p device_reference. It does not return the + * address of this \p device_reference. + * + * \return A \p device_ptr pointing to the object this + * \p device_reference references. + */ + __host__ __device__ + pointer operator&(void) const; + + /*! Conversion operator converts this \p device_reference to T + * by returning a copy of the object referenced by this + * \p device_reference. + * + * \return A copy of the object referenced by this \p device_reference. + */ + __host__ __device__ + operator value_type (void) const; + + /*! swaps the value this \p device_reference references with another. + * \p other The other \p device_reference with which to swap. + */ + __host__ __device__ + void swap(device_reference &other); + + /*! Prefix increment operator increments the object referenced by this + * \p device_reference. + * + * \return *this + * + * The following code snippet demonstrates the semantics of + * \p device_reference's prefix increment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // increment ref + * ++ref; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The increment executes as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator++(void); + + /*! Postfix increment operator copies the object referenced by this + * \p device_reference, increments the object referenced by this + * \p device_reference, and returns the copy. + * + * \return A copy of the object referenced by this \p device_reference + * before being incremented. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's postfix increment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // increment ref + * int x = ref++; + * + * // x equals 0 + * assert(x == 0) + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The increment executes as if it were executed on the host. + * This may change in a later version. + */ + value_type operator++(int); + + /*! Addition assignment operator add-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the add-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's addition assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // add-assign ref + * ref += 5; + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * \endcode + * + * \note The add-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator+=(const T &rhs); + + /*! Prefix decrement operator decrements the object referenced by this + * \p device_reference. + * + * \return *this + * + * The following code snippet demonstrates the semantics of + * \p device_reference's prefix decrement operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // decrement ref + * --ref; + * + * // ref equals -1 + * assert(ref == -1); + * + * // the object pointed to by ptr equals -1 + * assert(*ptr == -1); + * + * // v[0] equals -1 + * assert(v[0] == -1); + * \endcode + * + * \note The decrement executes as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator--(void); + + /*! Postfix decrement operator copies the object referenced by this + * \p device_reference, decrements the object referenced by this + * \p device_reference, and returns the copy. + * + * \return A copy of the object referenced by this \p device_reference + * before being decremented. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's postfix decrement operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // decrement ref + * int x = ref--; + * + * // x equals 0 + * assert(x == 0) + * + * // ref equals -1 + * assert(ref == -1); + * + * // the object pointed to by ptr equals -1 + * assert(*ptr == -1); + * + * // v[0] equals -1 + * assert(v[0] == -1); + * \endcode + * + * \note The decrement executes as if it were executed on the host. + * This may change in a later version. + */ + value_type operator--(int); + + /*! Subtraction assignment operator subtract-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the subtraction-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's addition assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // subtract-assign ref + * ref -= 5; + * + * // ref equals -5 + * assert(ref == -5); + * + * // the object pointed to by ptr equals -5 + * assert(*ptr == -5); + * + * // v[0] equals -5 + * assert(v[0] == -5); + * \endcode + * + * \note The subtract-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator-=(const T &rhs); + + /*! Multiplication assignment operator multiply-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the multiply-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's multiply assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // multiply-assign ref + * ref *= 5; + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * \endcode + * + * \note The multiply-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator*=(const T &rhs); + + /*! Division assignment operator divide-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the divide-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's divide assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,5); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * + * // divide-assign ref + * ref /= 5; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The divide-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator/=(const T &rhs); + + /*! Modulation assignment operator modulus-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the divide-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's divide assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,5); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * + * // modulus-assign ref + * ref %= 5; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The modulus-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator%=(const T &rhs); + + /*! Bitwise left shift assignment operator left shift-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the left shift-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's left shift assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // left shift-assign ref + * ref <<= 1; + * + * // ref equals 2 + * assert(ref == 2); + * + * // the object pointed to by ptr equals 2 + * assert(*ptr == 2); + * + * // v[0] equals 2 + * assert(v[0] == 2); + * \endcode + * + * \note The left shift-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator<<=(const T &rhs); + + /*! Bitwise right shift assignment operator right shift-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the right shift-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's right shift assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,2); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 2 + * assert(ref == 2); + * + * // the object pointed to by ptr equals 2 + * assert(*ptr == 2); + * + * // v[0] equals 2 + * assert(v[0] == 2); + * + * // right shift-assign ref + * ref >>= 1; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The right shift-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator>>=(const T &rhs); + + /*! Bitwise AND assignment operator AND-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the AND-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's AND assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // right AND-assign ref + * ref &= 0; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The AND-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator&=(const T &rhs); + + /*! Bitwise OR assignment operator OR-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the OR-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's OR assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // right OR-assign ref + * ref |= 1; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The OR-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator|=(const T &rhs); + + /*! Bitwise XOR assignment operator XOR-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the XOR-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's XOR assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // right XOR-assign ref + * ref ^= 1; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The XOR-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator^=(const T &rhs); +#endif // end doxygen-only members +}; // end device_reference + +/*! swaps the value of one \p device_reference with another. + * \p x The first \p device_reference of interest. + * \p y The second \p device_reference of interest. + */ +template +__host__ __device__ +void swap(device_reference& x, device_reference& y) +{ + x.swap(y); +} + +// declare these methods for the purpose of Doxygenating them +// they actually are defined for a derived-from class +#if THRUST_DOXYGEN +/*! Writes to an output stream the value of a \p device_reference. + * + * \param os The output stream. + * \param y The \p device_reference to output. + * \return os. + */ +template +std::basic_ostream & +operator<<(std::basic_ostream &os, const device_reference &y); +#endif + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/device_vector.h b/miniCUDA124/include/thrust/device_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..fc3e15c8082337ba25b3afe583c53498f4f0acf7 --- /dev/null +++ b/miniCUDA124/include/thrust/device_vector.h @@ -0,0 +1,519 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file + * \brief A dynamically-sizable array of elements which resides in memory + * accessible to devices. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup containers Containers + * \{ + */ + +/*! A \p device_vector is a container that supports random access to elements, + * constant time removal of elements at the end, and linear time insertion + * and removal of elements at the beginning or in the middle. The number of + * elements in a \p device_vector may vary dynamically; memory management is + * automatic. The memory associated with a \p device_vector resides in the + * memory accessible to devices. + * + * \see https://en.cppreference.com/w/cpp/container/vector + * \see device_allocator + * \see host_vector + * \see universal_vector + */ +template > + class device_vector + : public detail::vector_base +{ + private: + typedef detail::vector_base Parent; + + public: + /*! \cond + */ + typedef typename Parent::size_type size_type; + typedef typename Parent::value_type value_type; + /*! \endcond + */ + + /*! This constructor creates an empty \p device_vector. + */ + device_vector(void) + :Parent() {} + + /*! This constructor creates an empty \p device_vector. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(const Alloc &alloc) + :Parent(alloc) {} + + /*! The destructor erases the elements. + */ + // Define an empty destructor to explicitly specify + // its execution space qualifier, as a workaround for nvcc warning + ~device_vector(void) {} + + /*! This constructor creates a \p device_vector with the given + * size. + * \param n The number of elements to initially create. + */ + explicit device_vector(size_type n) + :Parent(n) {} + + /*! This constructor creates a \p device_vector with the given + * size. + * \param n The number of elements to initially create. + * \param alloc The allocator to use by this device_vector. + */ + explicit device_vector(size_type n, const Alloc &alloc) + :Parent(n,alloc) {} + + /*! This constructor creates a \p device_vector with copies + * of an exemplar element. + * \param n The number of elements to initially create. + * \param value An element to copy. + */ + explicit device_vector(size_type n, const value_type &value) + :Parent(n,value) {} + + /*! This constructor creates a \p device_vector with copies + * of an exemplar element. + * \param n The number of elements to initially create. + * \param value An element to copy. + * \param alloc The allocator to use by this device_vector. + */ + explicit device_vector(size_type n, const value_type &value, const Alloc &alloc) + :Parent(n,value,alloc) {} + + /*! Copy constructor copies from an exemplar \p device_vector. + * \param v The \p device_vector to copy. + */ + device_vector(const device_vector &v) + :Parent(v) {} + + /*! Copy constructor copies from an exemplar \p device_vector. + * \param v The \p device_vector to copy. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(const device_vector &v, const Alloc &alloc) + :Parent(v,alloc) {} + + #if THRUST_CPP_DIALECT >= 2011 + /*! Move constructor moves from another \p device_vector. + * \param v The device_vector to move. + */ + device_vector(device_vector &&v) + :Parent(std::move(v)) {} + + /*! Move constructor moves from another \p device_vector. + * \param v The device_vector to move. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(device_vector &&v, const Alloc &alloc) + :Parent(std::move(v), alloc) {} + #endif // THRUST_CPP_DIALECT >= 2011 + + /*! Copy assign operator copies another \p device_vector with the same type. + * \param v The \p device_vector to copy. + */ + device_vector &operator=(const device_vector &v) + { Parent::operator=(v); return *this; } + + #if THRUST_CPP_DIALECT >= 2011 + /*! Move assign operator moves from another \p device_vector. + * \param v The device_vector to move. + */ + device_vector &operator=(device_vector &&v) + { Parent::operator=(std::move(v)); return *this; } + #endif // THRUST_CPP_DIALECT >= 2011 + + /*! Copy constructor copies from an exemplar \p device_vector with different type. + * \param v The \p device_vector to copy. + */ + template + explicit device_vector(const device_vector &v) + :Parent(v) {} + + /*! Assign operator copies from an exemplar \p device_vector with different type. + * \param v The \p device_vector to copy. + */ + template + device_vector &operator=(const device_vector &v) + { Parent::operator=(v); return *this; } + + /*! Copy constructor copies from an exemplar \c std::vector. + * \param v The std::vector to copy. + */ + template + device_vector(const std::vector &v) + :Parent(v) {} + + /*! Assign operator copies from an exemplar std::vector. + * \param v The std::vector to copy. + */ + template + device_vector &operator=(const std::vector &v) + { Parent::operator=(v); return *this;} + + /*! Copy construct from a \p vector_base whose element type is convertible + * to \c T. + * + * \param v The \p vector_base to copy. + */ + template + device_vector(const detail::vector_base &v) + :Parent(v) {} + + /*! Assign a \p vector_base whose element type is convertible to \c T. + * \param v The \p vector_base to copy. + */ + template + device_vector &operator=(const detail::vector_base &v) + { Parent::operator=(v); return *this; } + + /*! This constructor builds a \p device_vector from an intializer_list. + * \param il The intializer_list. + */ + device_vector(std::initializer_list il) + :Parent(il) {} + + /*! This constructor builds a \p device_vector from an intializer_list. + * \param il The intializer_list. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(std::initializer_list il, const Alloc &alloc) + :Parent(il, alloc) {} + + /*! Assign an \p intializer_list with a matching element type + * \param il The intializer_list. + */ + device_vector &operator=(std::initializer_list il) + { Parent::operator=(il); return *this; } + + /*! This constructor builds a \p device_vector from a range. + * \param first The beginning of the range. + * \param last The end of the range. + */ + template + device_vector(InputIterator first, InputIterator last) + :Parent(first,last) {} + + /*! This constructor builds a \p device_vector from a range. + * \param first The beginning of the range. + * \param last The end of the range. + * \param alloc The allocator to use by this device_vector. + */ + template + device_vector(InputIterator first, InputIterator last, const Alloc &alloc) + :Parent(first,last,alloc) {} + +// declare these members for the purpose of Doxygenating them +// they actually exist in a derived-from class +#if 0 + /*! \brief Resizes this vector to the specified number of elements. + * \param new_size Number of elements this vector should contain. + * \param x Data with which new elements should be populated. + * \throw std::length_error If n exceeds max_size(). + * + * This method will resize this vector to the specified number of + * elements. If the number is smaller than this vector's current + * size this vector is truncated, otherwise this vector is + * extended and new elements are populated with given data. + */ + void resize(size_type new_size, const value_type &x = value_type()); + + /*! Returns the number of elements in this vector. + */ + size_type size(void) const; + + /*! Returns the size() of the largest possible vector. + * \return The largest possible return value of size(). + */ + size_type max_size(void) const; + + /*! \brief If n is less than or equal to capacity(), this call has no effect. + * Otherwise, this method is a request for allocation of additional memory. If + * the request is successful, then capacity() is greater than or equal to + * n; otherwise, capacity() is unchanged. In either case, size() is unchanged. + * \throw std::length_error If n exceeds max_size(). + */ + void reserve(size_type n); + + /*! Returns the number of elements which have been reserved in this + * vector. + */ + size_type capacity(void) const; + + /*! This method shrinks the capacity of this vector to exactly + * fit its elements. + */ + void shrink_to_fit(void); + + /*! \brief Subscript access to the data contained in this vector_dev. + * \param n The index of the element for which data should be accessed. + * \return Read/write reference to data. + * + * This operator allows for easy, array-style, data access. + * Note that data access with this operator is unchecked and + * out_of_range lookups are not defined. + */ + reference operator[](size_type n); + + /*! \brief Subscript read access to the data contained in this vector_dev. + * \param n The index of the element for which data should be accessed. + * \return Read reference to data. + * + * This operator allows for easy, array-style, data access. + * Note that data access with this operator is unchecked and + * out_of_range lookups are not defined. + */ + const_reference operator[](size_type n) const; + + /*! This method returns an iterator pointing to the beginning of + * this vector. + * \return mStart + */ + iterator begin(void); + + /*! This method returns a const_iterator pointing to the beginning + * of this vector. + * \return mStart + */ + const_iterator begin(void) const; + + /*! This method returns a const_iterator pointing to the beginning + * of this vector. + * \return mStart + */ + const_iterator cbegin(void) const; + + /*! This method returns a reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + reverse_iterator rbegin(void); + + /*! This method returns a const_reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A const_reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + const_reverse_iterator rbegin(void) const; + + /*! This method returns a const_reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A const_reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + const_reverse_iterator crbegin(void) const; + + /*! This method returns an iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + iterator end(void); + + /*! This method returns a const_iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + const_iterator end(void) const; + + /*! This method returns a const_iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + const_iterator cend(void) const; + + /*! This method returns a reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + reverse_iterator rend(void); + + /*! This method returns a const_reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + const_reverse_iterator rend(void) const; + + /*! This method returns a const_reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + const_reverse_iterator crend(void) const; + + /*! This method returns a const_reference referring to the first element of this + * vector. + * \return The first element of this vector. + */ + const_reference front(void) const; + + /*! This method returns a reference pointing to the first element of this + * vector. + * \return The first element of this vector. + */ + reference front(void); + + /*! This method returns a const reference pointing to the last element of + * this vector. + * \return The last element of this vector. + */ + const_reference back(void) const; + + /*! This method returns a reference referring to the last element of + * this vector_dev. + * \return The last element of this vector. + */ + reference back(void); + + /*! This method returns a pointer to this vector's first element. + * \return A pointer to the first element of this vector. + */ + pointer data(void); + + /*! This method returns a const_pointer to this vector's first element. + * \return a const_pointer to the first element of this vector. + */ + const_pointer data(void) const; + + /*! This method resizes this vector to 0. + */ + void clear(void); + + /*! This method returns true iff size() == 0. + * \return true if size() == 0; false, otherwise. + */ + bool empty(void) const; + + /*! This method appends the given element to the end of this vector. + * \param x The element to append. + */ + void push_back(const value_type &x); + + /*! This method erases the last element of this vector, invalidating + * all iterators and references to it. + */ + void pop_back(void); + + /*! This method swaps the contents of this device_vector with another vector. + * \param v The vector with which to swap. + */ + void swap(device_vector &v); + + /*! This method removes the element at position pos. + * \param pos The position of the element of interest. + * \return An iterator pointing to the new location of the element that followed the element + * at position pos. + */ + iterator erase(iterator pos); + + /*! This method removes the range of elements [first,last) from this vector. + * \param first The beginning of the range of elements to remove. + * \param last The end of the range of elements to remove. + * \return An iterator pointing to the new location of the element that followed the last + * element in the sequence [first,last). + */ + iterator erase(iterator first, iterator last); + + /*! This method inserts a single copy of a given exemplar value at the + * specified position in this vector. + * \param position The insertion position. + * \param x The exemplar element to copy & insert. + * \return An iterator pointing to the newly inserted element. + */ + iterator insert(iterator position, const T &x); + + /*! This method inserts a copy of an exemplar value to a range at the + * specified position in this vector. + * \param position The insertion position + * \param n The number of insertions to perform. + * \param x The value to replicate and insert. + */ + void insert(iterator position, size_type n, const T &x); + + /*! This method inserts a copy of an input range at the specified position + * in this vector. + * \param position The insertion position. + * \param first The beginning of the range to copy. + * \param last The end of the range to copy. + * + * \tparam InputIterator is a model of Assignable. + */ + template + void insert(iterator position, InputIterator first, InputIterator last); + + /*! This version of \p assign replicates a given exemplar + * \p n times into this vector. + * \param n The number of times to copy \p x. + * \param x The exemplar element to replicate. + */ + void assign(size_type n, const T &x); + + /*! This version of \p assign makes this vector a copy of a given input range. + * \param first The beginning of the range to copy. + * \param last The end of the range to copy. + * + * \tparam InputIterator is a model of Input Iterator. + */ + template + void assign(InputIterator first, InputIterator last); + + /*! This method returns a copy of this vector's allocator. + * \return A copy of the alloctor used by this vector. + */ + allocator_type get_allocator(void) const; +#endif // end doxygen-only members +}; + +/*! Exchanges the values of two vectors. + * \p x The first \p device_vector of interest. + * \p y The second \p device_vector of interest. + */ +template + void swap(device_vector &a, device_vector &b) +{ + a.swap(b); +} + +/*! \} // containres + */ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/distance.h b/miniCUDA124/include/thrust/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..82562bb3738c784a8d03e2c657d92f55e5bc04cc --- /dev/null +++ b/miniCUDA124/include/thrust/distance.h @@ -0,0 +1,82 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file distance.h + * \brief Computes the size of a range + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup iterators + * \{ + */ + +/*! \p distance finds the distance between \p first and \p last, i.e. the + * number of times that \p first must be incremented until it is equal to + * \p last. + * + * \param first The beginning of an input range of interest. + * \param last The end of an input range of interest. + * \return The distance between the beginning and end of the input range. + * + * \tparam InputIterator is a model of Input Iterator. + * + * \pre If \c InputIterator meets the requirements of random access iterator, \p last shall be reachable from \p first or + * \p first shall be reachable from \p last; otherwise, \p last shall be reachable from \p first. + * + * The following code snippet demonstrates how to use \p distance to compute + * the distance to one iterator from another. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec(13); + * thrust::device_vector::iterator iter1 = vec.begin(); + * thrust::device_vector::iterator iter2 = iter1 + 7; + * + * int d = thrust::distance(iter1, iter2); + * + * // d is 7 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/iterator/distance + */ +template +inline __host__ __device__ + typename thrust::iterator_traits::difference_type + distance(InputIterator first, InputIterator last); + +/*! \} // end iterators + */ + +THRUST_NAMESPACE_END + +#include diff --git a/miniCUDA124/include/thrust/equal.h b/miniCUDA124/include/thrust/equal.h new file mode 100644 index 0000000000000000000000000000000000000000..4a0db021e5ebca0bcbd2674f3cd0f85c58daf080 --- /dev/null +++ b/miniCUDA124/include/thrust/equal.h @@ -0,0 +1,243 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file equal.h + * \brief Equality between ranges + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reductions + * \{ + * \addtogroup comparisons + * \ingroup reductions + * \{ + */ + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), *i == *(first2 + (i - first1)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is a model of Equality Comparable, + * and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is a model of Equality Comparable, + * and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type. + * + * The following code snippet demonstrates how to use \p equal to test + * two ranges for equality using the \p thrust::host execution policy: + * + * \code + * #include + * #include + * ... + * int A1[7] = {3, 1, 4, 1, 5, 9, 3}; + * int A2[7] = {3, 1, 4, 2, 8, 5, 7}; + * ... + * bool result = thrust::equal(thrust::host, A1, A1 + 7, A2); + * + * // result == false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +__host__ __device__ +bool equal(const thrust::detail::execution_policy_base &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), *i == *(first2 + (i - first1)). + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is a model of Equality Comparable, + * and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is a model of Equality Comparable, + * and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type. + * + * The following code snippet demonstrates how to use \p equal to test + * two ranges for equality. + * + * \code + * #include + * ... + * int A1[7] = {3, 1, 4, 1, 5, 9, 3}; + * int A2[7] = {3, 1, 4, 2, 8, 5, 7}; + * ... + * bool result = thrust::equal(A1, A1 + 7, A2); + * + * // result == false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +bool equal(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), + * binary_pred(*i, *(first2 + (i - first1))) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param binary_pred Binary predicate used to test element equality. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p equal to compare the + * elements in two ranges modulo 2 using the \p thrust::host execution policy. + * + * \code + * #include + * #include + * ... + * + * struct compare_modulo_two + * { + * __host__ __device__ + * bool operator()(int x, int y) const + * { + * return (x % 2) == (y % 2); + * } + * }; + * ... + * int x[6] = {0, 2, 4, 6, 8, 10}; + * int y[6] = {1, 3, 5, 7, 9, 11}; + * + * bool result = thrust::equal(x, x + 6, y, compare_modulo_two()); + * + * // result is false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +__host__ __device__ +bool equal(const thrust::detail::execution_policy_base &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate binary_pred); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), + * binary_pred(*i, *(first2 + (i - first1))) is \c true. + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param binary_pred Binary predicate used to test element equality. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p equal to compare the + * elements in two ranges modulo 2. + * + * \code + * #include + * + * struct compare_modulo_two + * { + * __host__ __device__ + * bool operator()(int x, int y) const + * { + * return (x % 2) == (y % 2); + * } + * }; + * ... + * int x[6] = {0, 2, 4, 6, 8, 10}; + * int y[6] = {1, 3, 5, 7, 9, 11}; + * + * bool result = thrust::equal(x, x + 5, y, compare_modulo_two()); + * + * // result is true + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +bool equal(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, BinaryPredicate binary_pred); + + +/*! \} // end comparisons + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include