Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -3
- tool_server/.venv/lib/python3.12/site-packages/nvidia/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/cufile.h +738 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0 +3 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile_rdma.so.1 +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand.h +1080 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_discrete.h +87 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_discrete2.h +253 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_globals.h +93 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_kernel.h +1677 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_lognormal.h +697 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mrg32k3a.h +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32.h +210 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32_host.h +516 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h +386 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_normal.h +840 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_normal_static.h +134 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_philox4x32_x.h +195 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_poisson.h +763 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_precalc.h +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_uniform.h +498 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/libcurand.so.10 +3 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/nccl.h +479 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/__init__.py +0 -0
- tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/__pycache__/__init__.cpython-312.pyc +0 -0
.gitattributes
CHANGED
|
@@ -4052,6 +4052,7 @@ tool_server/.venv/lib/python3.12/site-packages/pycountry/locales/uk/LC_MESSAGES/
|
|
| 4052 |
tool_server/.venv/lib/python3.12/site-packages/pycountry/locales/vi/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
|
| 4053 |
tool_server/.venv/lib/python3.12/site-packages/pycountry/locales/zh_CN/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
|
| 4054 |
tool_server/.venv/lib/python3.12/site-packages/google/_upb/_message.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 4055 |
-
tool_server/.venv/lib/python3.12/site-packages/
|
| 4056 |
-
tool_server/.venv/lib/python3.12/site-packages/
|
| 4057 |
-
tool_server/.venv/lib/python3.12/site-packages/
|
|
|
|
|
|
| 4052 |
tool_server/.venv/lib/python3.12/site-packages/pycountry/locales/vi/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
|
| 4053 |
tool_server/.venv/lib/python3.12/site-packages/pycountry/locales/zh_CN/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
|
| 4054 |
tool_server/.venv/lib/python3.12/site-packages/google/_upb/_message.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 4055 |
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 4056 |
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
|
| 4057 |
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
| 4058 |
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0 filter=lfs diff=lfs merge=lfs -text
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (176 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (183 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (191 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/include/cufile.h
ADDED
|
@@ -0,0 +1,738 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2023 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/**
|
| 51 |
+
* @file cufile.h
|
| 52 |
+
* @brief cuFile C APIs
|
| 53 |
+
*
|
| 54 |
+
* This file contains all the C APIs to perform GPUDirect Storage supported IO operations
|
| 55 |
+
*/
|
| 56 |
+
|
| 57 |
+
#ifdef __cplusplus
|
| 58 |
+
extern "C"
|
| 59 |
+
{
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
/// @cond DOXYGEN_SKIP_MACRO
|
| 63 |
+
#ifndef __CUFILE_H_
|
| 64 |
+
#define __CUFILE_H_
|
| 65 |
+
|
| 66 |
+
#include <stdlib.h>
|
| 67 |
+
#include <stdbool.h>
|
| 68 |
+
|
| 69 |
+
#include <cuda.h>
|
| 70 |
+
#include <arpa/inet.h>
|
| 71 |
+
#include <sys/socket.h>
|
| 72 |
+
|
| 73 |
+
#define CUFILEOP_BASE_ERR 5000
|
| 74 |
+
|
| 75 |
+
//Note :Data path errors are captured via standard error codes
|
| 76 |
+
#define CUFILEOP_STATUS_ENTRIES \
|
| 77 |
+
CUFILE_OP(0, CU_FILE_SUCCESS, cufile success) \
|
| 78 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 1, CU_FILE_DRIVER_NOT_INITIALIZED, nvidia-fs driver is not loaded. Set allow_compat_mode to true in cufile.json file to enable compatible mode) \
|
| 79 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 2, CU_FILE_DRIVER_INVALID_PROPS, invalid property) \
|
| 80 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 3, CU_FILE_DRIVER_UNSUPPORTED_LIMIT, property range error) \
|
| 81 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 4, CU_FILE_DRIVER_VERSION_MISMATCH, nvidia-fs driver version mismatch) \
|
| 82 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 5, CU_FILE_DRIVER_VERSION_READ_ERROR, nvidia-fs driver version read error) \
|
| 83 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 6, CU_FILE_DRIVER_CLOSING, driver shutdown in progress) \
|
| 84 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 7, CU_FILE_PLATFORM_NOT_SUPPORTED, GPUDirect Storage not supported on current platform) \
|
| 85 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 8, CU_FILE_IO_NOT_SUPPORTED, GPUDirect Storage not supported on current file) \
|
| 86 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 9, CU_FILE_DEVICE_NOT_SUPPORTED, GPUDirect Storage not supported on current GPU) \
|
| 87 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 10, CU_FILE_NVFS_DRIVER_ERROR, nvidia-fs driver ioctl error) \
|
| 88 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 11, CU_FILE_CUDA_DRIVER_ERROR, CUDA Driver API error) \
|
| 89 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 12, CU_FILE_CUDA_POINTER_INVALID, invalid device pointer) \
|
| 90 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 13, CU_FILE_CUDA_MEMORY_TYPE_INVALID, invalid pointer memory type) \
|
| 91 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 14, CU_FILE_CUDA_POINTER_RANGE_ERROR, pointer range exceeds allocated address range) \
|
| 92 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 15, CU_FILE_CUDA_CONTEXT_MISMATCH, cuda context mismatch) \
|
| 93 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 16, CU_FILE_INVALID_MAPPING_SIZE, access beyond maximum pinned size) \
|
| 94 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 17, CU_FILE_INVALID_MAPPING_RANGE, access beyond mapped size) \
|
| 95 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 18, CU_FILE_INVALID_FILE_TYPE, unsupported file type) \
|
| 96 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 19, CU_FILE_INVALID_FILE_OPEN_FLAG, unsupported file open flags) \
|
| 97 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 20, CU_FILE_DIO_NOT_SET, fd direct IO not set) \
|
| 98 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 22, CU_FILE_INVALID_VALUE, invalid arguments) \
|
| 99 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 23, CU_FILE_MEMORY_ALREADY_REGISTERED, device pointer already registered) \
|
| 100 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 24, CU_FILE_MEMORY_NOT_REGISTERED, device pointer lookup failure) \
|
| 101 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 25, CU_FILE_PERMISSION_DENIED, driver or file access error) \
|
| 102 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 26, CU_FILE_DRIVER_ALREADY_OPEN, driver is already open) \
|
| 103 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 27, CU_FILE_HANDLE_NOT_REGISTERED, file descriptor is not registered) \
|
| 104 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 28, CU_FILE_HANDLE_ALREADY_REGISTERED, file descriptor is already registered) \
|
| 105 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 29, CU_FILE_DEVICE_NOT_FOUND, GPU device not found) \
|
| 106 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 30, CU_FILE_INTERNAL_ERROR, internal error) \
|
| 107 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 31, CU_FILE_GETNEWFD_FAILED, failed to obtain new file descriptor) \
|
| 108 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 33, CU_FILE_NVFS_SETUP_ERROR, NVFS driver initialization error) \
|
| 109 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 34, CU_FILE_IO_DISABLED, GPUDirect Storage disabled by config on current file)\
|
| 110 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 35, CU_FILE_BATCH_SUBMIT_FAILED, failed to submit batch operation)\
|
| 111 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 36, CU_FILE_GPU_MEMORY_PINNING_FAILED, failed to allocate pinned GPU Memory) \
|
| 112 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 37, CU_FILE_BATCH_FULL, queue full for batch operation) \
|
| 113 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 38, CU_FILE_ASYNC_NOT_SUPPORTED, cuFile stream operation not supported) \
|
| 114 |
+
CUFILE_OP(CUFILEOP_BASE_ERR + 39, CU_FILE_IO_MAX_ERROR, GPUDirect Storage Max Error)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
/**
|
| 118 |
+
* @brief cufileop status enum
|
| 119 |
+
*
|
| 120 |
+
* @note on success the error code is set to @ref CU_FILE_SUCCESS.
|
| 121 |
+
* @note The error code can be inspected using @ref IS_CUFILE_ERR and @ref CUFILE_ERRSTR.
|
| 122 |
+
* @note The error code if set to @ref CU_FILE_CUDA_DRIVER_ERROR, then cuda error can be inspected using @ref IS_CUDA_ERR and @ref CU_FILE_CUDA_ERR.
|
| 123 |
+
* @note Data path errors are captured via standard error codes
|
| 124 |
+
*/
|
| 125 |
+
typedef enum CUfileOpError {
|
| 126 |
+
/// @cond DOXYGEN_SKIP_MACRO
|
| 127 |
+
#define CUFILE_OP(code, name, string) name = code,
|
| 128 |
+
CUFILEOP_STATUS_ENTRIES
|
| 129 |
+
#undef CUFILE_OP
|
| 130 |
+
///@endcond
|
| 131 |
+
} CUfileOpError;
|
| 132 |
+
|
| 133 |
+
/// @endcond
|
| 134 |
+
|
| 135 |
+
/**
|
| 136 |
+
* @brief cufileop status string
|
| 137 |
+
*/
|
| 138 |
+
static inline const char *cufileop_status_error(CUfileOpError status)
|
| 139 |
+
{
|
| 140 |
+
switch (status) {
|
| 141 |
+
/// @cond DOXYGEN_SKIP_MACRO
|
| 142 |
+
#define CUFILE_OP(code, name, string) \
|
| 143 |
+
case name: return #string;
|
| 144 |
+
CUFILEOP_STATUS_ENTRIES
|
| 145 |
+
#undef CUFILE_OP
|
| 146 |
+
///@endcond
|
| 147 |
+
default:return "unknown cufile error";
|
| 148 |
+
}
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
/**
|
| 152 |
+
* @brief cufileop status string
|
| 153 |
+
*/
|
| 154 |
+
typedef struct CUfileError {
|
| 155 |
+
|
| 156 |
+
CUfileOpError err; // cufile error
|
| 157 |
+
|
| 158 |
+
CUresult cu_err; // cuda driver error
|
| 159 |
+
|
| 160 |
+
}CUfileError_t;
|
| 161 |
+
|
| 162 |
+
/**
|
| 163 |
+
* @brief error macros to inspect error status of type @ref CUfileOpError
|
| 164 |
+
*/
|
| 165 |
+
|
| 166 |
+
#define IS_CUFILE_ERR(err) \
|
| 167 |
+
(abs((err)) > CUFILEOP_BASE_ERR)
|
| 168 |
+
|
| 169 |
+
#define CUFILE_ERRSTR(err) \
|
| 170 |
+
cufileop_status_error((CUfileOpError)abs((err)))
|
| 171 |
+
|
| 172 |
+
#define IS_CUDA_ERR(status) \
|
| 173 |
+
((status).err == CU_FILE_CUDA_DRIVER_ERROR)
|
| 174 |
+
|
| 175 |
+
#define CU_FILE_CUDA_ERR(status) ((status).cu_err)
|
| 176 |
+
|
| 177 |
+
/* driver properties */
|
| 178 |
+
typedef enum CUfileDriverStatusFlags {
|
| 179 |
+
CU_FILE_LUSTRE_SUPPORTED = 0, /*!< Support for DDN LUSTRE */
|
| 180 |
+
|
| 181 |
+
CU_FILE_WEKAFS_SUPPORTED = 1, /*!< Support for WEKAFS */
|
| 182 |
+
|
| 183 |
+
CU_FILE_NFS_SUPPORTED = 2, /*!< Support for NFS */
|
| 184 |
+
|
| 185 |
+
CU_FILE_GPFS_SUPPORTED = 3, /*! < Support for GPFS */
|
| 186 |
+
|
| 187 |
+
CU_FILE_NVME_SUPPORTED = 4, /*!< Support for NVMe */
|
| 188 |
+
|
| 189 |
+
CU_FILE_NVMEOF_SUPPORTED = 5, /*!< Support for NVMeOF */
|
| 190 |
+
|
| 191 |
+
CU_FILE_SCSI_SUPPORTED = 6, /*!< Support for SCSI */
|
| 192 |
+
|
| 193 |
+
CU_FILE_SCALEFLUX_CSD_SUPPORTED = 7, /*!< Support for Scaleflux CSD*/
|
| 194 |
+
|
| 195 |
+
CU_FILE_NVMESH_SUPPORTED = 8, /*!< Support for NVMesh Block Dev*/
|
| 196 |
+
CU_FILE_BEEGFS_SUPPORTED = 9, /*!< Support for BeeGFS */
|
| 197 |
+
|
| 198 |
+
}CUfileDriverStatusFlags_t;
|
| 199 |
+
|
| 200 |
+
typedef enum CUfileDriverControlFlags {
|
| 201 |
+
CU_FILE_USE_POLL_MODE = 0 , /*!< use POLL mode. properties.use_poll_mode*/
|
| 202 |
+
|
| 203 |
+
CU_FILE_ALLOW_COMPAT_MODE = 1/*!< allow COMPATIBILITY mode. properties.allow_compat_mode*/
|
| 204 |
+
|
| 205 |
+
}CUfileDriverControlFlags_t;
|
| 206 |
+
|
| 207 |
+
typedef enum CUfileFeatureFlags {
|
| 208 |
+
CU_FILE_DYN_ROUTING_SUPPORTED = 0, /*!< Support for Dynamic routing to handle devices across the PCIe bridges */
|
| 209 |
+
|
| 210 |
+
CU_FILE_BATCH_IO_SUPPORTED = 1, /*!< Unsupported */
|
| 211 |
+
|
| 212 |
+
CU_FILE_STREAMS_SUPPORTED = 2, /*!< Unsupported */
|
| 213 |
+
|
| 214 |
+
CU_FILE_PARALLEL_IO_SUPPORTED = 3 /*!< Unsupported */
|
| 215 |
+
}CUfileFeatureFlags_t;
|
| 216 |
+
|
| 217 |
+
typedef struct CUfileDrvProps {
|
| 218 |
+
struct {
|
| 219 |
+
unsigned int major_version;
|
| 220 |
+
|
| 221 |
+
unsigned int minor_version;
|
| 222 |
+
|
| 223 |
+
size_t poll_thresh_size;
|
| 224 |
+
|
| 225 |
+
size_t max_direct_io_size;
|
| 226 |
+
|
| 227 |
+
unsigned int dstatusflags;
|
| 228 |
+
|
| 229 |
+
unsigned int dcontrolflags;
|
| 230 |
+
|
| 231 |
+
} nvfs;
|
| 232 |
+
|
| 233 |
+
unsigned int fflags;
|
| 234 |
+
|
| 235 |
+
unsigned int max_device_cache_size;
|
| 236 |
+
|
| 237 |
+
unsigned int per_buffer_cache_size;
|
| 238 |
+
|
| 239 |
+
unsigned int max_device_pinned_mem_size;
|
| 240 |
+
|
| 241 |
+
unsigned int max_batch_io_size;
|
| 242 |
+
unsigned int max_batch_io_timeout_msecs;
|
| 243 |
+
}CUfileDrvProps_t;
|
| 244 |
+
|
| 245 |
+
typedef struct sockaddr sockaddr_t;
|
| 246 |
+
|
| 247 |
+
typedef struct cufileRDMAInfo
|
| 248 |
+
{
|
| 249 |
+
int version;
|
| 250 |
+
int desc_len;
|
| 251 |
+
const char *desc_str;
|
| 252 |
+
}cufileRDMAInfo_t;
|
| 253 |
+
|
| 254 |
+
#define CU_FILE_RDMA_REGISTER 1
|
| 255 |
+
#define CU_FILE_RDMA_RELAXED_ORDERING (1<<1)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
typedef struct CUfileFSOps {
|
| 260 |
+
/* NULL means discover using fstat */
|
| 261 |
+
const char* (*fs_type) (void *handle);
|
| 262 |
+
|
| 263 |
+
/* list of host addresses to use, NULL means no restriction */
|
| 264 |
+
int (*getRDMADeviceList)(void *handle, sockaddr_t **hostaddrs);
|
| 265 |
+
|
| 266 |
+
/* -1 no pref */
|
| 267 |
+
int (*getRDMADevicePriority)(void *handle, char*, size_t,
|
| 268 |
+
loff_t, sockaddr_t* hostaddr);
|
| 269 |
+
|
| 270 |
+
/* NULL means try VFS */
|
| 271 |
+
ssize_t (*read) (void *handle, char*, size_t, loff_t, cufileRDMAInfo_t*);
|
| 272 |
+
ssize_t (*write) (void *handle, const char *, size_t, loff_t , cufileRDMAInfo_t*);
|
| 273 |
+
}CUfileFSOps_t;
|
| 274 |
+
|
| 275 |
+
/* File Handle */
|
| 276 |
+
enum CUfileFileHandleType {
|
| 277 |
+
CU_FILE_HANDLE_TYPE_OPAQUE_FD = 1, /*!< Linux based fd */
|
| 278 |
+
|
| 279 |
+
CU_FILE_HANDLE_TYPE_OPAQUE_WIN32 = 2, /*!< Windows based handle (unsupported) */
|
| 280 |
+
|
| 281 |
+
CU_FILE_HANDLE_TYPE_USERSPACE_FS = 3, /* Userspace based FS */
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
typedef struct CUfileDescr_t {
|
| 285 |
+
enum CUfileFileHandleType type; /* type of file being registered */
|
| 286 |
+
union {
|
| 287 |
+
int fd; /* Linux */
|
| 288 |
+
void *handle; /* Windows */
|
| 289 |
+
} handle;
|
| 290 |
+
const CUfileFSOps_t *fs_ops; /* file system operation table */
|
| 291 |
+
}CUfileDescr_t;
|
| 292 |
+
|
| 293 |
+
/**
|
| 294 |
+
* @brief File handle type
|
| 295 |
+
*
|
| 296 |
+
*/
|
| 297 |
+
typedef void* CUfileHandle_t;
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
#pragma GCC visibility push(default)
|
| 301 |
+
|
| 302 |
+
/**
|
| 303 |
+
* @brief cuFileHandleRegister is required, and performs extra checking that is memoized to provide increased performance on later cuFile operations.
|
| 304 |
+
*
|
| 305 |
+
* @param fh @ref CUfileHandle_t opaque file handle for IO operations
|
| 306 |
+
* @param descr @ref CUfileDescr_t file descriptor (OS agnostic)
|
| 307 |
+
*
|
| 308 |
+
* @return CU_FILE_SUCCESS on successful completion. fh will be updated for use in @ref cuFileRead, @ref cuFileWrite, @ref cuFileHandleDeregister
|
| 309 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED on failure to load driver
|
| 310 |
+
* @return CU_FILE_IO_NOT_SUPPORTED - if filesystem is not supported
|
| 311 |
+
* @return CU_FILE_INVALID_VALUE if null or bad api arguments
|
| 312 |
+
* @return CU_FILE_INVALID_FILE_OPEN_FLAG if file is opened with unsupported modes like no O_DIRECT
|
| 313 |
+
* @return CU_FILE_INVALID_FILE_TYPE if filepath is not valid or is not a regular file
|
| 314 |
+
* @return CU_FILE_HANDLE_ALREADY_REGISTERED if file handle/descriptor is already registered
|
| 315 |
+
*
|
| 316 |
+
* <b>Description</b>
|
| 317 |
+
* cuFileHandleRegister registers the open file descriptor for use with cuFile IO operations.
|
| 318 |
+
*
|
| 319 |
+
* This API will ensure that the file’s descriptor is checked for GPUDirect Storage support and returns a valid file handle on CU_FILE_SUCCESS.
|
| 320 |
+
*
|
| 321 |
+
* @note the file needs to be opened in O_DIRECT mode to support GPUDirect Storage.
|
| 322 |
+
*
|
| 323 |
+
* @see cuFileRead
|
| 324 |
+
* @see cuFileWrite
|
| 325 |
+
* @see cuFileHandleDeregister
|
| 326 |
+
*
|
| 327 |
+
*/
|
| 328 |
+
CUfileError_t cuFileHandleRegister(CUfileHandle_t *fh, CUfileDescr_t *descr);
|
| 329 |
+
|
| 330 |
+
/**
|
| 331 |
+
* @brief releases a registered filehandle from cuFile
|
| 332 |
+
*
|
| 333 |
+
* @param fh @ref CUfileHandle_t file handle
|
| 334 |
+
*
|
| 335 |
+
* @return void
|
| 336 |
+
*
|
| 337 |
+
* @see cuFileHandleRegister
|
| 338 |
+
*/
|
| 339 |
+
void cuFileHandleDeregister(CUfileHandle_t fh);
|
| 340 |
+
|
| 341 |
+
/**
|
| 342 |
+
* @brief register an existing cudaMalloced memory with cuFile to pin for GPUDirect Storage access or
|
| 343 |
+
* register host allocated memory with cuFile.
|
| 344 |
+
*
|
| 345 |
+
* @param bufPtr_base buffer pointer allocated
|
| 346 |
+
* @param length size of memory region from the above specified bufPtr
|
| 347 |
+
* @param flags CU_FILE_RDMA_REGISTER
|
| 348 |
+
*
|
| 349 |
+
* @return CU_FILE_SUCCESS on success
|
| 350 |
+
* @return CU_FILE_NVFS_DRIVER_ERROR
|
| 351 |
+
* @return CU_FILE_INVALID_VALUE
|
| 352 |
+
* @return CU_FILE_CUDA_ERROR for unsuported memory type
|
| 353 |
+
* @return CU_FILE_MEMORY_ALREADY_REGISTERED on error
|
| 354 |
+
* @return CU_FILE_GPU_MEMORY_PINNING_FAILED if not enough pinned memory is available
|
| 355 |
+
* @note This memory will be use to perform GPU direct DMA from the supported storage.
|
| 356 |
+
* @warning This API is intended for usecases where the memory is used as streaming buffer that is reused across multiple cuFile IO operations before calling @ref cuFileBufDeregister
|
| 357 |
+
*
|
| 358 |
+
* @see cuFileBufDeregister
|
| 359 |
+
* @see cuFileRead
|
| 360 |
+
* @see cuFileWrite
|
| 361 |
+
*/
|
| 362 |
+
CUfileError_t cuFileBufRegister(const void *bufPtr_base, size_t length, int flags);
|
| 363 |
+
|
| 364 |
+
/**
|
| 365 |
+
* @brief deregister an already registered device or host memory from cuFile
|
| 366 |
+
*
|
| 367 |
+
* @param bufPtr_base buffer pointer to deregister
|
| 368 |
+
*
|
| 369 |
+
* @return CU_FILE_SUCCESS on success
|
| 370 |
+
* @return CU_FILE_INVALID_VALUE on invalid memory pointer or unregistered memory pointer
|
| 371 |
+
*
|
| 372 |
+
* @see cuFileBufRegister
|
| 373 |
+
* @see cuFileRead
|
| 374 |
+
* @see cuFileWrite
|
| 375 |
+
*/
|
| 376 |
+
|
| 377 |
+
CUfileError_t cuFileBufDeregister(const void *bufPtr_base);
|
| 378 |
+
|
| 379 |
+
/**
|
| 380 |
+
* @brief read data from a registered file handle to a specified device or host memory
|
| 381 |
+
*
|
| 382 |
+
* @param fh @ref CUfileHandle_t opaque file handle
|
| 383 |
+
* @param bufPtr_base base address of buffer in device or host memory
|
| 384 |
+
* @param size size bytes to read
|
| 385 |
+
* @param file_offset file-offset from begining of the file
|
| 386 |
+
* @param bufPtr_offset offset relative to the bufPtr_base pointer to read into.
|
| 387 |
+
*
|
| 388 |
+
* @return size of bytes successfully read
|
| 389 |
+
* @return -1 on error, in which case errno is set to indicate filesystem errors.
|
| 390 |
+
* @return all other errors will return a negative integer value of @ref CUfileOpError enum value.
|
| 391 |
+
*
|
| 392 |
+
* @note If the bufPtr is not registered with @ref cuFileBufRegister, the data will be buffered through preallocated pinned buffers if needed.
|
| 393 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 394 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 395 |
+
*
|
| 396 |
+
* @see cuFileBufRegister
|
| 397 |
+
* @see cuFileHandleRegister
|
| 398 |
+
* @see cuFileWrite
|
| 399 |
+
*/
|
| 400 |
+
|
| 401 |
+
ssize_t cuFileRead(CUfileHandle_t fh, void *bufPtr_base, size_t size, off_t file_offset, off_t bufPtr_offset);
|
| 402 |
+
|
| 403 |
+
/**
|
| 404 |
+
* @brief write data from a specified device or host memory to a registered file handle
|
| 405 |
+
*
|
| 406 |
+
* @param fh @ref CUfileHandle_t opaque file handle
|
| 407 |
+
* @param bufPtr_base base address of buffer in device or host memory
|
| 408 |
+
* @param size size bytes to write
|
| 409 |
+
* @param file_offset file-offset from begining of the file
|
| 410 |
+
* @param bufPtr_offset offset relative to the bufPtr_base pointer to write from.
|
| 411 |
+
*
|
| 412 |
+
* @return size of bytes successfully written
|
| 413 |
+
* @return -1 on error, in which case errno is set to indicate filesystem errors.
|
| 414 |
+
* @return all other errors will return a negative integer value of @ref CUfileOpError enum value.
|
| 415 |
+
*
|
| 416 |
+
* @note If the bufPtr is not registered with @ref cuFileBufRegister, the data will be buffered through preallocated pinned buffers if needed.
|
| 417 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 418 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 419 |
+
*
|
| 420 |
+
* @see cuFileBufRegister
|
| 421 |
+
* @see cuFileHandleRegister
|
| 422 |
+
* @see cuFileRead
|
| 423 |
+
*/
|
| 424 |
+
|
| 425 |
+
ssize_t cuFileWrite(CUfileHandle_t fh, const void *bufPtr_base, size_t size, off_t file_offset, off_t bufPtr_offset);
|
| 426 |
+
|
| 427 |
+
// CUFile Driver APIs
|
| 428 |
+
|
| 429 |
+
/**
|
| 430 |
+
* @brief
|
| 431 |
+
* Initialize the cuFile library and open the nvidia-fs driver
|
| 432 |
+
*
|
| 433 |
+
* @return CU_FILE_SUCCESS on success
|
| 434 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED
|
| 435 |
+
* @return CU_FILE_DRIVER_VERSION_MISMATCH on driver version mismatch error
|
| 436 |
+
*
|
| 437 |
+
* @see cuFileDriverClose
|
| 438 |
+
*/
|
| 439 |
+
CUfileError_t cuFileDriverOpen(void);
|
| 440 |
+
|
| 441 |
+
CUfileError_t cuFileDriverClose(void);
|
| 442 |
+
#define cuFileDriverClose cuFileDriverClose_v2
|
| 443 |
+
/**
|
| 444 |
+
* @brief
|
| 445 |
+
* reset the cuFile library and release the nvidia-fs driver
|
| 446 |
+
*
|
| 447 |
+
* @return CU_FILE_SUCCESS on success
|
| 448 |
+
* @return CU_FILE_DRIVER_CLOSING if there are any active IO operations using @ref cuFileRead or @ref cuFileWrite
|
| 449 |
+
*
|
| 450 |
+
* @see cuFileDriverOpen
|
| 451 |
+
*/
|
| 452 |
+
CUfileError_t cuFileDriverClose(void);
|
| 453 |
+
|
| 454 |
+
/**
|
| 455 |
+
* @brief
|
| 456 |
+
* returns use count of cufile drivers at that moment by the process.
|
| 457 |
+
*/
|
| 458 |
+
long cuFileUseCount(void);
|
| 459 |
+
|
| 460 |
+
/**
|
| 461 |
+
* @brief
|
| 462 |
+
* Gets the Driver session properties
|
| 463 |
+
*
|
| 464 |
+
* @return CU_FILE_SUCCESS on success
|
| 465 |
+
*
|
| 466 |
+
* @see cuFileDriverSetPollMode
|
| 467 |
+
* @see cuFileDriverSetMaxDirectIOSize
|
| 468 |
+
* @see cuFileDriverSetMaxCacheSize
|
| 469 |
+
* @see cuFileDriverSetMaxPinnedMemSize
|
| 470 |
+
*/
|
| 471 |
+
CUfileError_t cuFileDriverGetProperties(CUfileDrvProps_t *props);
|
| 472 |
+
|
| 473 |
+
/**
|
| 474 |
+
* @brief
|
| 475 |
+
* Sets whether the Read/Write APIs use polling to do IO operations
|
| 476 |
+
*
|
| 477 |
+
* @param poll boolean to indicate whether to use poll mode or not
|
| 478 |
+
* @param poll_threshold_size max IO size to use for POLLING mode in KB
|
| 479 |
+
*
|
| 480 |
+
* @return CU_FILE_SUCCESS on success
|
| 481 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 482 |
+
* @return CU_FILE_DRIVER_VERSION_MISMATCH, CU_FILE_DRIVER_UNSUPPORTED_LIMIT on error
|
| 483 |
+
*
|
| 484 |
+
* @warning This is an advanced command and should be tuned based on available system memory
|
| 485 |
+
*
|
| 486 |
+
* @see cuFileDriverGetProperties
|
| 487 |
+
*/
|
| 488 |
+
CUfileError_t cuFileDriverSetPollMode(bool poll, size_t poll_threshold_size);
|
| 489 |
+
|
| 490 |
+
/**
|
| 491 |
+
* @brief
|
| 492 |
+
* Control parameter to set max IO size(KB) used by the library to talk to nvidia-fs driver
|
| 493 |
+
*
|
| 494 |
+
* @param max_direct_io_size maximum allowed direct io size in KB
|
| 495 |
+
*
|
| 496 |
+
* @return CU_FILE_SUCCESS on success
|
| 497 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 498 |
+
* @return CU_FILE_DRIVER_VERSION_MISMATCH, CU_FILE_DRIVER_UNSUPPORTED_LIMIT on error
|
| 499 |
+
*
|
| 500 |
+
* @warning This is an advanced command and should be tuned based on available system memory
|
| 501 |
+
*
|
| 502 |
+
* @see cuFileDriverGetProperties
|
| 503 |
+
*
|
| 504 |
+
*/
|
| 505 |
+
CUfileError_t cuFileDriverSetMaxDirectIOSize(size_t max_direct_io_size);
|
| 506 |
+
|
| 507 |
+
/**
|
| 508 |
+
* @brief
|
| 509 |
+
* Control parameter to set maximum GPU memory reserved per device by the library for internal buffering
|
| 510 |
+
*
|
| 511 |
+
* @param max_cache_size The maximum GPU buffer space per device used for internal use in KB
|
| 512 |
+
*
|
| 513 |
+
* @return CU_FILE_SUCCESS on success
|
| 514 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 515 |
+
* @return CU_FILE_DRIVER_VERSION_MISMATCH, CU_FILE_DRIVER_UNSUPPORTED_LIMIT on error
|
| 516 |
+
*
|
| 517 |
+
* @warning This is an advanced command and should be tuned based on supported GPU memory
|
| 518 |
+
*
|
| 519 |
+
* @see cuFileDriverGetProperties
|
| 520 |
+
*/
|
| 521 |
+
CUfileError_t cuFileDriverSetMaxCacheSize(size_t max_cache_size);
|
| 522 |
+
|
| 523 |
+
/**
|
| 524 |
+
* @brief
|
| 525 |
+
* Sets maximum buffer space that is pinned in KB for use by @ref cuFileBufRegister
|
| 526 |
+
*
|
| 527 |
+
* @param max_pinned_size maximum buffer space that is pinned in KB
|
| 528 |
+
*
|
| 529 |
+
* @return CU_FILE_SUCCESS on success
|
| 530 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 531 |
+
* @return CU_FILE_DRIVER_VERSION_MISMATCH, CU_FILE_DRIVER_UNSUPPORTED_LIMIT on error
|
| 532 |
+
*
|
| 533 |
+
* @warning This is an advanced command and should be tuned based on supported GPU memory
|
| 534 |
+
*
|
| 535 |
+
* @see cuFileDriverGetProperties
|
| 536 |
+
*
|
| 537 |
+
*/
|
| 538 |
+
CUfileError_t cuFileDriverSetMaxPinnedMemSize(size_t max_pinned_size);
|
| 539 |
+
|
| 540 |
+
//Experimental Batch API's
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
typedef enum CUfileOpcode {
|
| 544 |
+
CUFILE_READ = 0,
|
| 545 |
+
CUFILE_WRITE
|
| 546 |
+
}CUfileOpcode_t;
|
| 547 |
+
|
| 548 |
+
typedef enum CUFILEStatus_enum {
|
| 549 |
+
CUFILE_WAITING = 0x000001, /* required value prior to submission */
|
| 550 |
+
CUFILE_PENDING = 0x000002, /* once enqueued */
|
| 551 |
+
CUFILE_INVALID = 0x000004, /* request was ill-formed or could not be enqueued */
|
| 552 |
+
CUFILE_CANCELED = 0x000008, /* request successfully canceled */
|
| 553 |
+
CUFILE_COMPLETE = 0x0000010, /* request successfully completed */
|
| 554 |
+
CUFILE_TIMEOUT = 0x0000020, /* request timed out */
|
| 555 |
+
CUFILE_FAILED = 0x0000040 /* unable to complete */
|
| 556 |
+
}CUfileStatus_t;
|
| 557 |
+
typedef enum cufileBatchMode {
|
| 558 |
+
CUFILE_BATCH = 1,
|
| 559 |
+
} CUfileBatchMode_t;
|
| 560 |
+
typedef struct CUfileIOParams {
|
| 561 |
+
CUfileBatchMode_t mode; // Must be the very first field.
|
| 562 |
+
union {
|
| 563 |
+
struct {
|
| 564 |
+
void *devPtr_base; //This can be a device memory or a host memory pointer.
|
| 565 |
+
off_t file_offset;
|
| 566 |
+
off_t devPtr_offset;
|
| 567 |
+
size_t size;
|
| 568 |
+
}batch;
|
| 569 |
+
}u;
|
| 570 |
+
CUfileHandle_t fh;
|
| 571 |
+
CUfileOpcode_t opcode;
|
| 572 |
+
void *cookie;
|
| 573 |
+
}CUfileIOParams_t;
|
| 574 |
+
typedef struct CUfileIOEvents {
|
| 575 |
+
void *cookie;
|
| 576 |
+
CUfileStatus_t status; /* status of the operation */
|
| 577 |
+
size_t ret; /* -ve error or amount of I/O done. */
|
| 578 |
+
}CUfileIOEvents_t;
|
| 579 |
+
|
| 580 |
+
typedef void* CUfileBatchHandle_t;
|
| 581 |
+
|
| 582 |
+
CUfileError_t cuFileBatchIOSetUp(CUfileBatchHandle_t *batch_idp, unsigned nr);
|
| 583 |
+
CUfileError_t cuFileBatchIOSubmit(CUfileBatchHandle_t batch_idp, unsigned nr, CUfileIOParams_t *iocbp, unsigned int flags);
|
| 584 |
+
CUfileError_t cuFileBatchIOGetStatus(CUfileBatchHandle_t batch_idp, unsigned min_nr, unsigned* nr,
|
| 585 |
+
CUfileIOEvents_t *iocbp, struct timespec* timeout);
|
| 586 |
+
CUfileError_t cuFileBatchIOCancel(CUfileBatchHandle_t batch_idp);
|
| 587 |
+
void cuFileBatchIODestroy(CUfileBatchHandle_t batch_idp);
|
| 588 |
+
|
| 589 |
+
//Async API's with cuda streams
|
| 590 |
+
|
| 591 |
+
// cuFile stream API registration flags
|
| 592 |
+
// buffer pointer offset is set at submission time
|
| 593 |
+
#define CU_FILE_STREAM_FIXED_BUF_OFFSET 1
|
| 594 |
+
// file offset is set at submission time
|
| 595 |
+
#define CU_FILE_STREAM_FIXED_FILE_OFFSET 2
|
| 596 |
+
// file size is set at submission time
|
| 597 |
+
#define CU_FILE_STREAM_FIXED_FILE_SIZE 4
|
| 598 |
+
// size, offset and buffer offset are 4k aligned
|
| 599 |
+
#define CU_FILE_STREAM_PAGE_ALIGNED_INPUTS 8
|
| 600 |
+
|
| 601 |
+
/**
|
| 602 |
+
*@brief
|
| 603 |
+
|
| 604 |
+
* @param fh The cuFile handle for the file.
|
| 605 |
+
* @param bufPtr_base base address of buffer in device or host memory
|
| 606 |
+
* @param size_p pointer to size bytes to read
|
| 607 |
+
* @note *size_p if the size is not known at the time of submission, then must provide the max possible size for I/O request.
|
| 608 |
+
* @param file_offset_p pointer to file-offset from begining of the file
|
| 609 |
+
* @param bufPtr_offset_p pointer to offset relative to the bufPtr_base pointer to read into.
|
| 610 |
+
* @param bytes_read_p pointer to the number of bytes that were successfully read.
|
| 611 |
+
* @param CUstream stream cuda stream for the operation.
|
| 612 |
+
*
|
| 613 |
+
* @return size of bytes successfully read in *bytes_read_p
|
| 614 |
+
* @return -1 on error, in which case errno is set to indicate filesystem errors.
|
| 615 |
+
* @return all other errors will return a negative integer value of @ref CUfileOpError enum value.
|
| 616 |
+
*
|
| 617 |
+
* @note If the bufPtr_base is not registered with @ref cuFileBufRegister, the data will be buffered through preallocated pinned buffers.
|
| 618 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 619 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 620 |
+
* @note If the stream is registered with cuFileStreamRegister, the IO setup and teardown overhead will be reduced.
|
| 621 |
+
* @note on cuda stream errors, the user must call cuFileStreamDeregister to release any outstanding cuFile resources for the stream.
|
| 622 |
+
*
|
| 623 |
+
*
|
| 624 |
+
* @see cuFileBufRegister
|
| 625 |
+
* @see cuFileHandleRegister
|
| 626 |
+
* @see cuFileRead
|
| 627 |
+
* @see cuFileStreamRegister
|
| 628 |
+
* @see cuFileStreamDeregister
|
| 629 |
+
*/
|
| 630 |
+
|
| 631 |
+
CUfileError_t cuFileReadAsync(CUfileHandle_t fh, void *bufPtr_base,
|
| 632 |
+
size_t *size_p, off_t *file_offset_p, off_t *bufPtr_offset_p, ssize_t *bytes_read_p, CUstream stream);
|
| 633 |
+
|
| 634 |
+
/**
|
| 635 |
+
*@brief
|
| 636 |
+
|
| 637 |
+
* @param fh The cuFile handle for the file.
|
| 638 |
+
* @param bufPtr_base base address of buffer in device or host memory
|
| 639 |
+
* @param size_p pointer to size bytes to write.
|
| 640 |
+
* @note *size_p if the size is not known at the time of submission, then must provide the max possible size for I/O request.
|
| 641 |
+
* @param file_offset_p pointer to file-offset from begining of the file
|
| 642 |
+
* @param bufPtr_offset_p pointer to offset relative to the bufPtr_base pointer to write from.
|
| 643 |
+
* @param bytes_written_p pointer to the number of bytes that were successfully written.
|
| 644 |
+
* @param CUstream cuda stream for the operation.
|
| 645 |
+
*
|
| 646 |
+
* @return size of bytes successfully written in *bytes_written_p
|
| 647 |
+
* @return -1 on error, in which case errno is set to indicate filesystem errors.
|
| 648 |
+
* @return all other errors will return a negative integer value of @ref CUfileOpError enum value.
|
| 649 |
+
*
|
| 650 |
+
* @note If the bufPtr_base is not registered with @ref cuFileBufRegister, the data will be buffered through preallocated pinned buffers.
|
| 651 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 652 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 653 |
+
* @note If the stream is registered with cuFileStreamRegister prior to this call, the IO setup and teardown overhead will be reduced.
|
| 654 |
+
* @note on cuda stream errors, the user must call cuFileStreamDeregister to release any outstanding cuFile resources for the stream.
|
| 655 |
+
*
|
| 656 |
+
* @see cuFileBufRegister
|
| 657 |
+
* @see cuFileHandleRegister
|
| 658 |
+
* @see cuFileWrite
|
| 659 |
+
* @see cuFileStreamRegister
|
| 660 |
+
* @see cuFileStreamDeregister
|
| 661 |
+
*/
|
| 662 |
+
|
| 663 |
+
CUfileError_t cuFileWriteAsync(CUfileHandle_t fh, void *bufPtr_base,
|
| 664 |
+
size_t *size_p, off_t *file_offset_p, off_t *bufPtr_offset_p, ssize_t *bytes_written_p, CUstream stream);
|
| 665 |
+
|
| 666 |
+
/**
|
| 667 |
+
*@brief
|
| 668 |
+
|
| 669 |
+
* @param CUstream cuda stream for the operation.
|
| 670 |
+
* @param flags for the stream to improve the stream execution of IO based on input parameters.
|
| 671 |
+
* @note supported FLAGS are
|
| 672 |
+
* @note CU_FILE_STREAM_FIXED_BUF_OFFSET - buffer pointer offset is set at submission time
|
| 673 |
+
* @note CU_FILE_STREAM_FIXED_FILE_OFFSET - file offset is set at submission time
|
| 674 |
+
* @note CU_FILE_STREAM_FIXED_FILE_SIZE - file size is set at submission time
|
| 675 |
+
* @note CU_FILE_STREAM_PAGE_ALIGNED_INPUTS - size, offset and buffer offset are 4k aligned
|
| 676 |
+
*
|
| 677 |
+
* @note allocates resources needed to support cuFile operations asynchronously for the cuda stream
|
| 678 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 679 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 680 |
+
*
|
| 681 |
+
* @return CU_FILE_SUCCESS on success
|
| 682 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 683 |
+
* @return CU_FILE_INVALID_VALUE if the stream is invalid
|
| 684 |
+
*
|
| 685 |
+
* @see cuFileReadAsync
|
| 686 |
+
* @see cuFileWriteAsync
|
| 687 |
+
* @see cuFileStreamDeregister
|
| 688 |
+
*/
|
| 689 |
+
|
| 690 |
+
CUfileError_t cuFileStreamRegister(CUstream stream, unsigned flags);
|
| 691 |
+
|
| 692 |
+
/**
|
| 693 |
+
*@brief
|
| 694 |
+
|
| 695 |
+
* @param CUstream cuda stream for the operation.
|
| 696 |
+
*
|
| 697 |
+
* @note deallocates resources used by previous cuFile asynchronous operations for the cuda stream
|
| 698 |
+
* @note highly recommend to call after cuda stream errors to release any outstanding cuFile resources for this stream
|
| 699 |
+
* @note must be called before cuStreamDestroy call for the specified stream.
|
| 700 |
+
* @note This is useful for applications that need to perform IO to unaligned file offsets and/or size. This is also recommended
|
| 701 |
+
* for cases where the BAR1 memory size is smaller than the size of the allocated memory.
|
| 702 |
+
*
|
| 703 |
+
* @return CU_FILE_SUCCESS on success
|
| 704 |
+
* @return CU_FILE_DRIVER_NOT_INITIALIZED if the driver is not initialized
|
| 705 |
+
* @return CU_FILE_INVALID_VALUE if the stream is invalid
|
| 706 |
+
*
|
| 707 |
+
* @see cuFileReadAsync
|
| 708 |
+
* @see cuFileWriteAsync
|
| 709 |
+
* @see cuFileStreamRegister
|
| 710 |
+
*/
|
| 711 |
+
|
| 712 |
+
CUfileError_t cuFileStreamDeregister(CUstream stream);
|
| 713 |
+
|
| 714 |
+
/**
|
| 715 |
+
*@brief
|
| 716 |
+
|
| 717 |
+
* @returns cufile library version.
|
| 718 |
+
*
|
| 719 |
+
* @The version is returned as (1000 major + 10 minor).
|
| 720 |
+
* @For example, CUFILE 1.7.0 would be represented by 1070.
|
| 721 |
+
* @note This is useful for applications that need to inquire the library.
|
| 722 |
+
*
|
| 723 |
+
* @return CU_FILE_SUCCESS on success
|
| 724 |
+
* @return CU_FILE_INVALID_VALUE if the input parameter is null.
|
| 725 |
+
* @return CU_FILE_DRIVER_VERSION_READ_ERROR if the version is not available.
|
| 726 |
+
*
|
| 727 |
+
*/
|
| 728 |
+
|
| 729 |
+
CUfileError_t cuFileGetVersion(int *version);
|
| 730 |
+
|
| 731 |
+
#pragma GCC visibility pop
|
| 732 |
+
|
| 733 |
+
/// @cond DOXYGEN_SKIP_MACRO
|
| 734 |
+
#endif // CUFILE_H
|
| 735 |
+
/// @endcond
|
| 736 |
+
#ifdef __cplusplus
|
| 737 |
+
}
|
| 738 |
+
#endif
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad6648ecf773da5a2fb829fe61573c04d621124beacd877de3d2fa98fcb5ec7d
|
| 3 |
+
size 3041296
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile_rdma.so.1
ADDED
|
Binary file (46.5 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (183 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (191 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand.h
ADDED
|
@@ -0,0 +1,1080 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(CURAND_H_)
|
| 51 |
+
#define CURAND_H_
|
| 52 |
+
|
| 53 |
+
/**
|
| 54 |
+
* \defgroup HOST Host API
|
| 55 |
+
*
|
| 56 |
+
* @{
|
| 57 |
+
*/
|
| 58 |
+
#ifndef __CUDACC_RTC__
|
| 59 |
+
#include <cuda_runtime.h>
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#ifndef CURANDAPI
|
| 63 |
+
#ifdef _WIN32
|
| 64 |
+
#define CURANDAPI __stdcall
|
| 65 |
+
#else
|
| 66 |
+
#define CURANDAPI
|
| 67 |
+
#endif
|
| 68 |
+
#endif
|
| 69 |
+
|
| 70 |
+
#if defined(__cplusplus)
|
| 71 |
+
extern "C" {
|
| 72 |
+
#endif /* __cplusplus */
|
| 73 |
+
|
| 74 |
+
#define CURAND_VER_MAJOR 10
|
| 75 |
+
#define CURAND_VER_MINOR 3
|
| 76 |
+
#define CURAND_VER_PATCH 7
|
| 77 |
+
#define CURAND_VER_BUILD 77
|
| 78 |
+
#define CURAND_VERSION (CURAND_VER_MAJOR * 1000 + \
|
| 79 |
+
CURAND_VER_MINOR * 100 + \
|
| 80 |
+
CURAND_VER_PATCH)
|
| 81 |
+
/* CURAND Host API datatypes */
|
| 82 |
+
|
| 83 |
+
/**
|
| 84 |
+
* @{
|
| 85 |
+
*/
|
| 86 |
+
|
| 87 |
+
/**
|
| 88 |
+
* CURAND function call status types
|
| 89 |
+
*/
|
| 90 |
+
enum curandStatus {
|
| 91 |
+
CURAND_STATUS_SUCCESS = 0, ///< No errors
|
| 92 |
+
CURAND_STATUS_VERSION_MISMATCH = 100, ///< Header file and linked library version do not match
|
| 93 |
+
CURAND_STATUS_NOT_INITIALIZED = 101, ///< Generator not initialized
|
| 94 |
+
CURAND_STATUS_ALLOCATION_FAILED = 102, ///< Memory allocation failed
|
| 95 |
+
CURAND_STATUS_TYPE_ERROR = 103, ///< Generator is wrong type
|
| 96 |
+
CURAND_STATUS_OUT_OF_RANGE = 104, ///< Argument out of range
|
| 97 |
+
CURAND_STATUS_LENGTH_NOT_MULTIPLE = 105, ///< Length requested is not a multple of dimension
|
| 98 |
+
CURAND_STATUS_DOUBLE_PRECISION_REQUIRED = 106, ///< GPU does not have double precision required by MRG32k3a
|
| 99 |
+
CURAND_STATUS_LAUNCH_FAILURE = 201, ///< Kernel launch failure
|
| 100 |
+
CURAND_STATUS_PREEXISTING_FAILURE = 202, ///< Preexisting failure on library entry
|
| 101 |
+
CURAND_STATUS_INITIALIZATION_FAILED = 203, ///< Initialization of CUDA failed
|
| 102 |
+
CURAND_STATUS_ARCH_MISMATCH = 204, ///< Architecture mismatch, GPU does not support requested feature
|
| 103 |
+
CURAND_STATUS_INTERNAL_ERROR = 999 ///< Internal library error
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
/*
|
| 107 |
+
* CURAND function call status types
|
| 108 |
+
*/
|
| 109 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 110 |
+
typedef enum curandStatus curandStatus_t;
|
| 111 |
+
/** \endcond */
|
| 112 |
+
|
| 113 |
+
/**
|
| 114 |
+
* CURAND generator types
|
| 115 |
+
*/
|
| 116 |
+
enum curandRngType {
|
| 117 |
+
CURAND_RNG_TEST = 0,
|
| 118 |
+
CURAND_RNG_PSEUDO_DEFAULT = 100, ///< Default pseudorandom generator
|
| 119 |
+
CURAND_RNG_PSEUDO_XORWOW = 101, ///< XORWOW pseudorandom generator
|
| 120 |
+
CURAND_RNG_PSEUDO_MRG32K3A = 121, ///< MRG32k3a pseudorandom generator
|
| 121 |
+
CURAND_RNG_PSEUDO_MTGP32 = 141, ///< Mersenne Twister MTGP32 pseudorandom generator
|
| 122 |
+
CURAND_RNG_PSEUDO_MT19937 = 142, ///< Mersenne Twister MT19937 pseudorandom generator
|
| 123 |
+
CURAND_RNG_PSEUDO_PHILOX4_32_10 = 161, ///< PHILOX-4x32-10 pseudorandom generator
|
| 124 |
+
CURAND_RNG_QUASI_DEFAULT = 200, ///< Default quasirandom generator
|
| 125 |
+
CURAND_RNG_QUASI_SOBOL32 = 201, ///< Sobol32 quasirandom generator
|
| 126 |
+
CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 = 202, ///< Scrambled Sobol32 quasirandom generator
|
| 127 |
+
CURAND_RNG_QUASI_SOBOL64 = 203, ///< Sobol64 quasirandom generator
|
| 128 |
+
CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 = 204 ///< Scrambled Sobol64 quasirandom generator
|
| 129 |
+
};
|
| 130 |
+
|
| 131 |
+
/*
|
| 132 |
+
* CURAND generator types
|
| 133 |
+
*/
|
| 134 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 135 |
+
typedef enum curandRngType curandRngType_t;
|
| 136 |
+
/** \endcond */
|
| 137 |
+
|
| 138 |
+
/**
|
| 139 |
+
* CURAND ordering of results in memory
|
| 140 |
+
*/
|
| 141 |
+
enum curandOrdering {
|
| 142 |
+
CURAND_ORDERING_PSEUDO_BEST = 100, ///< Best ordering for pseudorandom results
|
| 143 |
+
CURAND_ORDERING_PSEUDO_DEFAULT = 101, ///< Specific default thread sequence for pseudorandom results, same as CURAND_ORDERING_PSEUDO_BEST
|
| 144 |
+
CURAND_ORDERING_PSEUDO_SEEDED = 102, ///< Specific seeding pattern for fast lower quality pseudorandom results
|
| 145 |
+
CURAND_ORDERING_PSEUDO_LEGACY = 103, ///< Specific legacy sequence for pseudorandom results, guaranteed to remain the same for all cuRAND release
|
| 146 |
+
CURAND_ORDERING_PSEUDO_DYNAMIC = 104, ///< Specific ordering adjusted to the device it is being executed on, provides the best performance
|
| 147 |
+
CURAND_ORDERING_QUASI_DEFAULT = 201 ///< Specific n-dimensional ordering for quasirandom results
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
/*
|
| 151 |
+
* CURAND ordering of results in memory
|
| 152 |
+
*/
|
| 153 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 154 |
+
typedef enum curandOrdering curandOrdering_t;
|
| 155 |
+
/** \endcond */
|
| 156 |
+
|
| 157 |
+
/**
|
| 158 |
+
* CURAND choice of direction vector set
|
| 159 |
+
*/
|
| 160 |
+
enum curandDirectionVectorSet {
|
| 161 |
+
CURAND_DIRECTION_VECTORS_32_JOEKUO6 = 101, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
|
| 162 |
+
CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 = 102, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
|
| 163 |
+
CURAND_DIRECTION_VECTORS_64_JOEKUO6 = 103, ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
|
| 164 |
+
CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 = 104 ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
|
| 165 |
+
};
|
| 166 |
+
|
| 167 |
+
/*
|
| 168 |
+
* CURAND choice of direction vector set
|
| 169 |
+
*/
|
| 170 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 171 |
+
typedef enum curandDirectionVectorSet curandDirectionVectorSet_t;
|
| 172 |
+
/** \endcond */
|
| 173 |
+
|
| 174 |
+
/**
|
| 175 |
+
* CURAND array of 32-bit direction vectors
|
| 176 |
+
*/
|
| 177 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 178 |
+
typedef unsigned int curandDirectionVectors32_t[32];
|
| 179 |
+
/** \endcond */
|
| 180 |
+
|
| 181 |
+
/**
|
| 182 |
+
* CURAND array of 64-bit direction vectors
|
| 183 |
+
*/
|
| 184 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 185 |
+
typedef unsigned long long curandDirectionVectors64_t[64];
|
| 186 |
+
/** \endcond **/
|
| 187 |
+
|
| 188 |
+
/**
|
| 189 |
+
* CURAND generator (opaque)
|
| 190 |
+
*/
|
| 191 |
+
struct curandGenerator_st;
|
| 192 |
+
|
| 193 |
+
/**
|
| 194 |
+
* CURAND generator
|
| 195 |
+
*/
|
| 196 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 197 |
+
typedef struct curandGenerator_st *curandGenerator_t;
|
| 198 |
+
/** \endcond */
|
| 199 |
+
|
| 200 |
+
/**
|
| 201 |
+
* CURAND distribution
|
| 202 |
+
*/
|
| 203 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 204 |
+
typedef double curandDistribution_st;
|
| 205 |
+
typedef curandDistribution_st *curandDistribution_t;
|
| 206 |
+
typedef struct curandDistributionShift_st *curandDistributionShift_t;
|
| 207 |
+
/** \endcond */
|
| 208 |
+
/**
|
| 209 |
+
* CURAND distribution M2
|
| 210 |
+
*/
|
| 211 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 212 |
+
typedef struct curandDistributionM2Shift_st *curandDistributionM2Shift_t;
|
| 213 |
+
typedef struct curandHistogramM2_st *curandHistogramM2_t;
|
| 214 |
+
typedef unsigned int curandHistogramM2K_st;
|
| 215 |
+
typedef curandHistogramM2K_st *curandHistogramM2K_t;
|
| 216 |
+
typedef curandDistribution_st curandHistogramM2V_st;
|
| 217 |
+
typedef curandHistogramM2V_st *curandHistogramM2V_t;
|
| 218 |
+
|
| 219 |
+
typedef struct curandDiscreteDistribution_st *curandDiscreteDistribution_t;
|
| 220 |
+
/** \endcond */
|
| 221 |
+
|
| 222 |
+
/*
|
| 223 |
+
* CURAND METHOD
|
| 224 |
+
*/
|
| 225 |
+
/** \cond UNHIDE_ENUMS */
|
| 226 |
+
enum curandMethod {
|
| 227 |
+
CURAND_CHOOSE_BEST = 0, // choose best depends on args
|
| 228 |
+
CURAND_ITR = 1,
|
| 229 |
+
CURAND_KNUTH = 2,
|
| 230 |
+
CURAND_HITR = 3,
|
| 231 |
+
CURAND_M1 = 4,
|
| 232 |
+
CURAND_M2 = 5,
|
| 233 |
+
CURAND_BINARY_SEARCH = 6,
|
| 234 |
+
CURAND_DISCRETE_GAUSS = 7,
|
| 235 |
+
CURAND_REJECTION = 8,
|
| 236 |
+
CURAND_DEVICE_API = 9,
|
| 237 |
+
CURAND_FAST_REJECTION = 10,
|
| 238 |
+
CURAND_3RD = 11,
|
| 239 |
+
CURAND_DEFINITION = 12,
|
| 240 |
+
CURAND_POISSON = 13
|
| 241 |
+
};
|
| 242 |
+
|
| 243 |
+
typedef enum curandMethod curandMethod_t;
|
| 244 |
+
/** \endcond */
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
#ifndef __CUDACC_RTC__
|
| 248 |
+
|
| 249 |
+
/**
|
| 250 |
+
* @}
|
| 251 |
+
*/
|
| 252 |
+
|
| 253 |
+
/**
|
| 254 |
+
* \brief Create new random number generator.
|
| 255 |
+
*
|
| 256 |
+
* Creates a new random number generator of type \p rng_type
|
| 257 |
+
* and returns it in \p *generator.
|
| 258 |
+
*
|
| 259 |
+
* Legal values for \p rng_type are:
|
| 260 |
+
* - CURAND_RNG_PSEUDO_DEFAULT
|
| 261 |
+
* - CURAND_RNG_PSEUDO_XORWOW
|
| 262 |
+
* - CURAND_RNG_PSEUDO_MRG32K3A
|
| 263 |
+
* - CURAND_RNG_PSEUDO_MTGP32
|
| 264 |
+
* - CURAND_RNG_PSEUDO_MT19937
|
| 265 |
+
* - CURAND_RNG_PSEUDO_PHILOX4_32_10
|
| 266 |
+
* - CURAND_RNG_QUASI_DEFAULT
|
| 267 |
+
* - CURAND_RNG_QUASI_SOBOL32
|
| 268 |
+
* - CURAND_RNG_QUASI_SCRAMBLED_SOBOL32
|
| 269 |
+
* - CURAND_RNG_QUASI_SOBOL64
|
| 270 |
+
* - CURAND_RNG_QUASI_SCRAMBLED_SOBOL64
|
| 271 |
+
*
|
| 272 |
+
* When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
|
| 273 |
+
* is CURAND_RNG_PSEUDO_XORWOW. \n
|
| 274 |
+
* When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
|
| 275 |
+
* the type chosen is CURAND_RNG_QUASI_SOBOL32.
|
| 276 |
+
*
|
| 277 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
|
| 278 |
+
* - \p seed = 0
|
| 279 |
+
* - \p offset = 0
|
| 280 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 281 |
+
*
|
| 282 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
|
| 283 |
+
* - \p seed = 0
|
| 284 |
+
* - \p offset = 0
|
| 285 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 286 |
+
*
|
| 287 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
|
| 288 |
+
* - \p seed = 0
|
| 289 |
+
* - \p offset = 0
|
| 290 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 291 |
+
*
|
| 292 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
|
| 293 |
+
* - \p seed = 0
|
| 294 |
+
* - \p offset = 0
|
| 295 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 296 |
+
*
|
| 297 |
+
* * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
|
| 298 |
+
* - \p seed = 0
|
| 299 |
+
* - \p offset = 0
|
| 300 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 301 |
+
*
|
| 302 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
|
| 303 |
+
* - \p dimensions = 1
|
| 304 |
+
* - \p offset = 0
|
| 305 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 306 |
+
*
|
| 307 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
|
| 308 |
+
* - \p dimensions = 1
|
| 309 |
+
* - \p offset = 0
|
| 310 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 311 |
+
*
|
| 312 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBBLED_SOBOL32 are:
|
| 313 |
+
* - \p dimensions = 1
|
| 314 |
+
* - \p offset = 0
|
| 315 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 316 |
+
*
|
| 317 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
|
| 318 |
+
* - \p dimensions = 1
|
| 319 |
+
* - \p offset = 0
|
| 320 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 321 |
+
*
|
| 322 |
+
* \param generator - Pointer to generator
|
| 323 |
+
* \param rng_type - Type of generator to create
|
| 324 |
+
*
|
| 325 |
+
* \return
|
| 326 |
+
* - CURAND_STATUS_ALLOCATION_FAILED, if memory could not be allocated \n
|
| 327 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
|
| 328 |
+
* - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
|
| 329 |
+
* dynamically linked library version \n
|
| 330 |
+
* - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
|
| 331 |
+
* - CURAND_STATUS_SUCCESS if generator was created successfully \n
|
| 332 |
+
*
|
| 333 |
+
*/
|
| 334 |
+
curandStatus_t CURANDAPI
|
| 335 |
+
curandCreateGenerator(curandGenerator_t *generator, curandRngType_t rng_type);
|
| 336 |
+
|
| 337 |
+
/**
|
| 338 |
+
* \brief Create new host CPU random number generator.
|
| 339 |
+
*
|
| 340 |
+
* Creates a new host CPU random number generator of type \p rng_type
|
| 341 |
+
* and returns it in \p *generator.
|
| 342 |
+
*
|
| 343 |
+
* Legal values for \p rng_type are:
|
| 344 |
+
* - CURAND_RNG_PSEUDO_DEFAULT
|
| 345 |
+
* - CURAND_RNG_PSEUDO_XORWOW
|
| 346 |
+
* - CURAND_RNG_PSEUDO_MRG32K3A
|
| 347 |
+
* - CURAND_RNG_PSEUDO_MTGP32
|
| 348 |
+
* - CURAND_RNG_PSEUDO_MT19937
|
| 349 |
+
* - CURAND_RNG_PSEUDO_PHILOX4_32_10
|
| 350 |
+
* - CURAND_RNG_QUASI_DEFAULT
|
| 351 |
+
* - CURAND_RNG_QUASI_SOBOL32
|
| 352 |
+
* - CURAND_RNG_QUASI_SCRAMBLED_SOBOL32
|
| 353 |
+
* - CURAND_RNG_QUASI_SOBOL64
|
| 354 |
+
* - CURAND_RNG_QUASI_SCRAMBLED_SOBOL64
|
| 355 |
+
*
|
| 356 |
+
* When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
|
| 357 |
+
* is CURAND_RNG_PSEUDO_XORWOW. \n
|
| 358 |
+
* When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
|
| 359 |
+
* the type chosen is CURAND_RNG_QUASI_SOBOL32.
|
| 360 |
+
*
|
| 361 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
|
| 362 |
+
* - \p seed = 0
|
| 363 |
+
* - \p offset = 0
|
| 364 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 365 |
+
*
|
| 366 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
|
| 367 |
+
* - \p seed = 0
|
| 368 |
+
* - \p offset = 0
|
| 369 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 370 |
+
*
|
| 371 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
|
| 372 |
+
* - \p seed = 0
|
| 373 |
+
* - \p offset = 0
|
| 374 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 375 |
+
*
|
| 376 |
+
* The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
|
| 377 |
+
* - \p seed = 0
|
| 378 |
+
* - \p offset = 0
|
| 379 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 380 |
+
*
|
| 381 |
+
* * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
|
| 382 |
+
* - \p seed = 0
|
| 383 |
+
* - \p offset = 0
|
| 384 |
+
* - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
|
| 385 |
+
*
|
| 386 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
|
| 387 |
+
* - \p dimensions = 1
|
| 388 |
+
* - \p offset = 0
|
| 389 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 390 |
+
*
|
| 391 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
|
| 392 |
+
* - \p dimensions = 1
|
| 393 |
+
* - \p offset = 0
|
| 394 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 395 |
+
*
|
| 396 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 are:
|
| 397 |
+
* - \p dimensions = 1
|
| 398 |
+
* - \p offset = 0
|
| 399 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 400 |
+
*
|
| 401 |
+
* The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
|
| 402 |
+
* - \p dimensions = 1
|
| 403 |
+
* - \p offset = 0
|
| 404 |
+
* - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
|
| 405 |
+
*
|
| 406 |
+
* \param generator - Pointer to generator
|
| 407 |
+
* \param rng_type - Type of generator to create
|
| 408 |
+
*
|
| 409 |
+
* \return
|
| 410 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 411 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
|
| 412 |
+
* - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
|
| 413 |
+
* dynamically linked library version \n
|
| 414 |
+
* - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
|
| 415 |
+
* - CURAND_STATUS_SUCCESS if generator was created successfully \n
|
| 416 |
+
*/
|
| 417 |
+
curandStatus_t CURANDAPI
|
| 418 |
+
curandCreateGeneratorHost(curandGenerator_t *generator, curandRngType_t rng_type);
|
| 419 |
+
|
| 420 |
+
/**
|
| 421 |
+
* \brief Destroy an existing generator.
|
| 422 |
+
*
|
| 423 |
+
* Destroy an existing generator and free all memory associated with its state.
|
| 424 |
+
*
|
| 425 |
+
* \param generator - Generator to destroy
|
| 426 |
+
*
|
| 427 |
+
* \return
|
| 428 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 429 |
+
* - CURAND_STATUS_SUCCESS if generator was destroyed successfully \n
|
| 430 |
+
*/
|
| 431 |
+
curandStatus_t CURANDAPI
|
| 432 |
+
curandDestroyGenerator(curandGenerator_t generator);
|
| 433 |
+
|
| 434 |
+
/**
|
| 435 |
+
* \brief Return the version number of the library.
|
| 436 |
+
*
|
| 437 |
+
* Return in \p *version the version number of the dynamically linked CURAND
|
| 438 |
+
* library. The format is the same as CUDART_VERSION from the CUDA Runtime.
|
| 439 |
+
* The only supported configuration is CURAND version equal to CUDA Runtime
|
| 440 |
+
* version.
|
| 441 |
+
*
|
| 442 |
+
* \param version - CURAND library version
|
| 443 |
+
*
|
| 444 |
+
* \return
|
| 445 |
+
* - CURAND_STATUS_SUCCESS if the version number was successfully returned \n
|
| 446 |
+
*/
|
| 447 |
+
curandStatus_t CURANDAPI
|
| 448 |
+
curandGetVersion(int *version);
|
| 449 |
+
|
| 450 |
+
/**
|
| 451 |
+
* \brief Return the value of the curand property.
|
| 452 |
+
*
|
| 453 |
+
* Return in \p *value the number for the property described by \p type of the
|
| 454 |
+
* dynamically linked CURAND library.
|
| 455 |
+
*
|
| 456 |
+
* \param type - CUDA library property
|
| 457 |
+
* \param value - integer value for the requested property
|
| 458 |
+
*
|
| 459 |
+
* \return
|
| 460 |
+
* - CURAND_STATUS_SUCCESS if the property value was successfully returned \n
|
| 461 |
+
* - CURAND_STATUS_OUT_OF_RANGE if the property type is not recognized \n
|
| 462 |
+
*/
|
| 463 |
+
curandStatus_t CURANDAPI
|
| 464 |
+
curandGetProperty(libraryPropertyType type, int *value);
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
/**
|
| 468 |
+
* \brief Set the current stream for CURAND kernel launches.
|
| 469 |
+
*
|
| 470 |
+
* Set the current stream for CURAND kernel launches. All library functions
|
| 471 |
+
* will use this stream until set again.
|
| 472 |
+
*
|
| 473 |
+
* \param generator - Generator to modify
|
| 474 |
+
* \param stream - Stream to use or ::NULL for null stream
|
| 475 |
+
*
|
| 476 |
+
* \return
|
| 477 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 478 |
+
* - CURAND_STATUS_SUCCESS if stream was set successfully \n
|
| 479 |
+
*/
|
| 480 |
+
curandStatus_t CURANDAPI
|
| 481 |
+
curandSetStream(curandGenerator_t generator, cudaStream_t stream);
|
| 482 |
+
|
| 483 |
+
/**
|
| 484 |
+
* \brief Set the seed value of the pseudo-random number generator.
|
| 485 |
+
*
|
| 486 |
+
* Set the seed value of the pseudorandom number generator.
|
| 487 |
+
* All values of seed are valid. Different seeds will produce different sequences.
|
| 488 |
+
* Different seeds will often not be statistically correlated with each other,
|
| 489 |
+
* but some pairs of seed values may generate sequences which are statistically correlated.
|
| 490 |
+
*
|
| 491 |
+
* \param generator - Generator to modify
|
| 492 |
+
* \param seed - Seed value
|
| 493 |
+
*
|
| 494 |
+
* \return
|
| 495 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 496 |
+
* - CURAND_STATUS_TYPE_ERROR if the generator is not a pseudorandom number generator \n
|
| 497 |
+
* - CURAND_STATUS_SUCCESS if generator seed was set successfully \n
|
| 498 |
+
*/
|
| 499 |
+
curandStatus_t CURANDAPI
|
| 500 |
+
curandSetPseudoRandomGeneratorSeed(curandGenerator_t generator, unsigned long long seed);
|
| 501 |
+
|
| 502 |
+
/**
|
| 503 |
+
* \brief Set the absolute offset of the pseudo or quasirandom number generator.
|
| 504 |
+
*
|
| 505 |
+
* Set the absolute offset of the pseudo or quasirandom number generator.
|
| 506 |
+
*
|
| 507 |
+
* All values of offset are valid. The offset position is absolute, not
|
| 508 |
+
* relative to the current position in the sequence.
|
| 509 |
+
*
|
| 510 |
+
* \param generator - Generator to modify
|
| 511 |
+
* \param offset - Absolute offset position
|
| 512 |
+
*
|
| 513 |
+
* \return
|
| 514 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 515 |
+
* - CURAND_STATUS_SUCCESS if generator offset was set successfully \n
|
| 516 |
+
*/
|
| 517 |
+
curandStatus_t CURANDAPI
|
| 518 |
+
curandSetGeneratorOffset(curandGenerator_t generator, unsigned long long offset);
|
| 519 |
+
|
| 520 |
+
/**
|
| 521 |
+
* \brief Set the ordering of results of the pseudo or quasirandom number generator.
|
| 522 |
+
*
|
| 523 |
+
* Set the ordering of results of the pseudo or quasirandom number generator.
|
| 524 |
+
*
|
| 525 |
+
* Legal values of \p order for pseudorandom generators are:
|
| 526 |
+
* - CURAND_ORDERING_PSEUDO_DEFAULT
|
| 527 |
+
* - CURAND_ORDERING_PSEUDO_BEST
|
| 528 |
+
* - CURAND_ORDERING_PSEUDO_SEEDED
|
| 529 |
+
* - CURAND_ORDERING_PSEUDO_LEGACY
|
| 530 |
+
*
|
| 531 |
+
* Legal values of \p order for quasirandom generators are:
|
| 532 |
+
* - CURAND_ORDERING_QUASI_DEFAULT
|
| 533 |
+
*
|
| 534 |
+
* \param generator - Generator to modify
|
| 535 |
+
* \param order - Ordering of results
|
| 536 |
+
*
|
| 537 |
+
* \return
|
| 538 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 539 |
+
* - CURAND_STATUS_OUT_OF_RANGE if the ordering is not valid \n
|
| 540 |
+
* - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
|
| 541 |
+
*/
|
| 542 |
+
curandStatus_t CURANDAPI
|
| 543 |
+
curandSetGeneratorOrdering(curandGenerator_t generator, curandOrdering_t order);
|
| 544 |
+
|
| 545 |
+
/**
|
| 546 |
+
* \brief Set the number of dimensions.
|
| 547 |
+
*
|
| 548 |
+
* Set the number of dimensions to be generated by the quasirandom number
|
| 549 |
+
* generator.
|
| 550 |
+
*
|
| 551 |
+
* Legal values for \p num_dimensions are 1 to 20000.
|
| 552 |
+
*
|
| 553 |
+
* \param generator - Generator to modify
|
| 554 |
+
* \param num_dimensions - Number of dimensions
|
| 555 |
+
*
|
| 556 |
+
* \return
|
| 557 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 558 |
+
* - CURAND_STATUS_OUT_OF_RANGE if num_dimensions is not valid \n
|
| 559 |
+
* - CURAND_STATUS_TYPE_ERROR if the generator is not a quasirandom number generator \n
|
| 560 |
+
* - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
|
| 561 |
+
*/
|
| 562 |
+
curandStatus_t CURANDAPI
|
| 563 |
+
curandSetQuasiRandomGeneratorDimensions(curandGenerator_t generator, unsigned int num_dimensions);
|
| 564 |
+
|
| 565 |
+
/**
|
| 566 |
+
* \brief Generate 32-bit pseudo or quasirandom numbers.
|
| 567 |
+
*
|
| 568 |
+
* Use \p generator to generate \p num 32-bit results into the device memory at
|
| 569 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 570 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 571 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 572 |
+
*
|
| 573 |
+
* Results are 32-bit values with every bit random.
|
| 574 |
+
*
|
| 575 |
+
* \param generator - Generator to use
|
| 576 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 577 |
+
* Pointer to host memory to store CPU-generated results
|
| 578 |
+
* \param num - Number of random 32-bit values to generate
|
| 579 |
+
*
|
| 580 |
+
* \return
|
| 581 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 582 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 583 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 584 |
+
* a previous kernel launch \n
|
| 585 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 586 |
+
* not a multiple of the quasirandom dimension \n
|
| 587 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 588 |
+
* - CURAND_STATUS_TYPE_ERROR if the generator is a 64 bit quasirandom generator.
|
| 589 |
+
* (use ::curandGenerateLongLong() with 64 bit quasirandom generators)
|
| 590 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 591 |
+
*/
|
| 592 |
+
curandStatus_t CURANDAPI
|
| 593 |
+
curandGenerate(curandGenerator_t generator, unsigned int *outputPtr, size_t num);
|
| 594 |
+
|
| 595 |
+
/**
|
| 596 |
+
* \brief Generate 64-bit quasirandom numbers.
|
| 597 |
+
*
|
| 598 |
+
* Use \p generator to generate \p num 64-bit results into the device memory at
|
| 599 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 600 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 601 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 602 |
+
*
|
| 603 |
+
* Results are 64-bit values with every bit random.
|
| 604 |
+
*
|
| 605 |
+
* \param generator - Generator to use
|
| 606 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 607 |
+
* Pointer to host memory to store CPU-generated results
|
| 608 |
+
* \param num - Number of random 64-bit values to generate
|
| 609 |
+
*
|
| 610 |
+
* \return
|
| 611 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 612 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 613 |
+
* a previous kernel launch \n
|
| 614 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 615 |
+
* not a multiple of the quasirandom dimension \n
|
| 616 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 617 |
+
* - CURAND_STATUS_TYPE_ERROR if the generator is not a 64 bit quasirandom generator\n
|
| 618 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 619 |
+
*/
|
| 620 |
+
curandStatus_t CURANDAPI
|
| 621 |
+
curandGenerateLongLong(curandGenerator_t generator, unsigned long long *outputPtr, size_t num);
|
| 622 |
+
|
| 623 |
+
/**
|
| 624 |
+
* \brief Generate uniformly distributed floats.
|
| 625 |
+
*
|
| 626 |
+
* Use \p generator to generate \p num float results into the device memory at
|
| 627 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 628 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 629 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 630 |
+
*
|
| 631 |
+
* Results are 32-bit floating point values between \p 0.0f and \p 1.0f,
|
| 632 |
+
* excluding \p 0.0f and including \p 1.0f.
|
| 633 |
+
*
|
| 634 |
+
* \param generator - Generator to use
|
| 635 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 636 |
+
* Pointer to host memory to store CPU-generated results
|
| 637 |
+
* \param num - Number of floats to generate
|
| 638 |
+
*
|
| 639 |
+
* \return
|
| 640 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 641 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 642 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 643 |
+
* a previous kernel launch \n
|
| 644 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 645 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 646 |
+
* not a multiple of the quasirandom dimension \n
|
| 647 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 648 |
+
*/
|
| 649 |
+
curandStatus_t CURANDAPI
|
| 650 |
+
curandGenerateUniform(curandGenerator_t generator, float *outputPtr, size_t num);
|
| 651 |
+
|
| 652 |
+
/**
|
| 653 |
+
* \brief Generate uniformly distributed doubles.
|
| 654 |
+
*
|
| 655 |
+
* Use \p generator to generate \p num double results into the device memory at
|
| 656 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 657 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 658 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 659 |
+
*
|
| 660 |
+
* Results are 64-bit double precision floating point values between
|
| 661 |
+
* \p 0.0 and \p 1.0, excluding \p 0.0 and including \p 1.0.
|
| 662 |
+
*
|
| 663 |
+
* \param generator - Generator to use
|
| 664 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 665 |
+
* Pointer to host memory to store CPU-generated results
|
| 666 |
+
* \param num - Number of doubles to generate
|
| 667 |
+
*
|
| 668 |
+
* \return
|
| 669 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 670 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 671 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 672 |
+
* a previous kernel launch \n
|
| 673 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 674 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 675 |
+
* not a multiple of the quasirandom dimension \n
|
| 676 |
+
* - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
|
| 677 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 678 |
+
*/
|
| 679 |
+
curandStatus_t CURANDAPI
|
| 680 |
+
curandGenerateUniformDouble(curandGenerator_t generator, double *outputPtr, size_t num);
|
| 681 |
+
|
| 682 |
+
/**
|
| 683 |
+
* \brief Generate normally distributed doubles.
|
| 684 |
+
*
|
| 685 |
+
* Use \p generator to generate \p n float results into the device memory at
|
| 686 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 687 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 688 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 689 |
+
*
|
| 690 |
+
* Results are 32-bit floating point values with mean \p mean and standard
|
| 691 |
+
* deviation \p stddev.
|
| 692 |
+
*
|
| 693 |
+
* Normally distributed results are generated from pseudorandom generators
|
| 694 |
+
* with a Box-Muller transform, and so require \p n to be even.
|
| 695 |
+
* Quasirandom generators use an inverse cumulative distribution
|
| 696 |
+
* function to preserve dimensionality.
|
| 697 |
+
*
|
| 698 |
+
* There may be slight numerical differences between results generated
|
| 699 |
+
* on the GPU with generators created with ::curandCreateGenerator()
|
| 700 |
+
* and results calculated on the CPU with generators created with
|
| 701 |
+
* ::curandCreateGeneratorHost(). These differences arise because of
|
| 702 |
+
* differences in results for transcendental functions. In addition,
|
| 703 |
+
* future versions of CURAND may use newer versions of the CUDA math
|
| 704 |
+
* library, so different versions of CURAND may give slightly different
|
| 705 |
+
* numerical values.
|
| 706 |
+
*
|
| 707 |
+
* \param generator - Generator to use
|
| 708 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 709 |
+
* Pointer to host memory to store CPU-generated results
|
| 710 |
+
* \param n - Number of floats to generate
|
| 711 |
+
* \param mean - Mean of normal distribution
|
| 712 |
+
* \param stddev - Standard deviation of normal distribution
|
| 713 |
+
*
|
| 714 |
+
* \return
|
| 715 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 716 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 717 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 718 |
+
* a previous kernel launch \n
|
| 719 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 720 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 721 |
+
* not a multiple of the quasirandom dimension, or is not a multiple
|
| 722 |
+
* of two for pseudorandom generators \n
|
| 723 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 724 |
+
*/
|
| 725 |
+
curandStatus_t CURANDAPI
|
| 726 |
+
curandGenerateNormal(curandGenerator_t generator, float *outputPtr,
|
| 727 |
+
size_t n, float mean, float stddev);
|
| 728 |
+
|
| 729 |
+
/**
|
| 730 |
+
* \brief Generate normally distributed doubles.
|
| 731 |
+
*
|
| 732 |
+
* Use \p generator to generate \p n double results into the device memory at
|
| 733 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 734 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 735 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 736 |
+
*
|
| 737 |
+
* Results are 64-bit floating point values with mean \p mean and standard
|
| 738 |
+
* deviation \p stddev.
|
| 739 |
+
*
|
| 740 |
+
* Normally distributed results are generated from pseudorandom generators
|
| 741 |
+
* with a Box-Muller transform, and so require \p n to be even.
|
| 742 |
+
* Quasirandom generators use an inverse cumulative distribution
|
| 743 |
+
* function to preserve dimensionality.
|
| 744 |
+
*
|
| 745 |
+
* There may be slight numerical differences between results generated
|
| 746 |
+
* on the GPU with generators created with ::curandCreateGenerator()
|
| 747 |
+
* and results calculated on the CPU with generators created with
|
| 748 |
+
* ::curandCreateGeneratorHost(). These differences arise because of
|
| 749 |
+
* differences in results for transcendental functions. In addition,
|
| 750 |
+
* future versions of CURAND may use newer versions of the CUDA math
|
| 751 |
+
* library, so different versions of CURAND may give slightly different
|
| 752 |
+
* numerical values.
|
| 753 |
+
*
|
| 754 |
+
* \param generator - Generator to use
|
| 755 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 756 |
+
* Pointer to host memory to store CPU-generated results
|
| 757 |
+
* \param n - Number of doubles to generate
|
| 758 |
+
* \param mean - Mean of normal distribution
|
| 759 |
+
* \param stddev - Standard deviation of normal distribution
|
| 760 |
+
*
|
| 761 |
+
* \return
|
| 762 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 763 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 764 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 765 |
+
* a previous kernel launch \n
|
| 766 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 767 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 768 |
+
* not a multiple of the quasirandom dimension, or is not a multiple
|
| 769 |
+
* of two for pseudorandom generators \n
|
| 770 |
+
* - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
|
| 771 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 772 |
+
*/
|
| 773 |
+
curandStatus_t CURANDAPI
|
| 774 |
+
curandGenerateNormalDouble(curandGenerator_t generator, double *outputPtr,
|
| 775 |
+
size_t n, double mean, double stddev);
|
| 776 |
+
|
| 777 |
+
/**
|
| 778 |
+
* \brief Generate log-normally distributed floats.
|
| 779 |
+
*
|
| 780 |
+
* Use \p generator to generate \p n float results into the device memory at
|
| 781 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 782 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 783 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 784 |
+
*
|
| 785 |
+
* Results are 32-bit floating point values with log-normal distribution based on
|
| 786 |
+
* an associated normal distribution with mean \p mean and standard deviation \p stddev.
|
| 787 |
+
*
|
| 788 |
+
* Normally distributed results are generated from pseudorandom generators
|
| 789 |
+
* with a Box-Muller transform, and so require \p n to be even.
|
| 790 |
+
* Quasirandom generators use an inverse cumulative distribution
|
| 791 |
+
* function to preserve dimensionality.
|
| 792 |
+
* The normally distributed results are transformed into log-normal distribution.
|
| 793 |
+
*
|
| 794 |
+
* There may be slight numerical differences between results generated
|
| 795 |
+
* on the GPU with generators created with ::curandCreateGenerator()
|
| 796 |
+
* and results calculated on the CPU with generators created with
|
| 797 |
+
* ::curandCreateGeneratorHost(). These differences arise because of
|
| 798 |
+
* differences in results for transcendental functions. In addition,
|
| 799 |
+
* future versions of CURAND may use newer versions of the CUDA math
|
| 800 |
+
* library, so different versions of CURAND may give slightly different
|
| 801 |
+
* numerical values.
|
| 802 |
+
*
|
| 803 |
+
* \param generator - Generator to use
|
| 804 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 805 |
+
* Pointer to host memory to store CPU-generated results
|
| 806 |
+
* \param n - Number of floats to generate
|
| 807 |
+
* \param mean - Mean of associated normal distribution
|
| 808 |
+
* \param stddev - Standard deviation of associated normal distribution
|
| 809 |
+
*
|
| 810 |
+
* \return
|
| 811 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 812 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 813 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 814 |
+
* a previous kernel launch \n
|
| 815 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 816 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 817 |
+
* not a multiple of the quasirandom dimension, or is not a multiple
|
| 818 |
+
* of two for pseudorandom generators \n
|
| 819 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 820 |
+
*/
|
| 821 |
+
curandStatus_t CURANDAPI
|
| 822 |
+
curandGenerateLogNormal(curandGenerator_t generator, float *outputPtr,
|
| 823 |
+
size_t n, float mean, float stddev);
|
| 824 |
+
|
| 825 |
+
/**
|
| 826 |
+
* \brief Generate log-normally distributed doubles.
|
| 827 |
+
*
|
| 828 |
+
* Use \p generator to generate \p n double results into the device memory at
|
| 829 |
+
* \p outputPtr. The device memory must have been previously allocated and be
|
| 830 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 831 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 832 |
+
*
|
| 833 |
+
* Results are 64-bit floating point values with log-normal distribution based on
|
| 834 |
+
* an associated normal distribution with mean \p mean and standard deviation \p stddev.
|
| 835 |
+
*
|
| 836 |
+
* Normally distributed results are generated from pseudorandom generators
|
| 837 |
+
* with a Box-Muller transform, and so require \p n to be even.
|
| 838 |
+
* Quasirandom generators use an inverse cumulative distribution
|
| 839 |
+
* function to preserve dimensionality.
|
| 840 |
+
* The normally distributed results are transformed into log-normal distribution.
|
| 841 |
+
*
|
| 842 |
+
* There may be slight numerical differences between results generated
|
| 843 |
+
* on the GPU with generators created with ::curandCreateGenerator()
|
| 844 |
+
* and results calculated on the CPU with generators created with
|
| 845 |
+
* ::curandCreateGeneratorHost(). These differences arise because of
|
| 846 |
+
* differences in results for transcendental functions. In addition,
|
| 847 |
+
* future versions of CURAND may use newer versions of the CUDA math
|
| 848 |
+
* library, so different versions of CURAND may give slightly different
|
| 849 |
+
* numerical values.
|
| 850 |
+
*
|
| 851 |
+
* \param generator - Generator to use
|
| 852 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 853 |
+
* Pointer to host memory to store CPU-generated results
|
| 854 |
+
* \param n - Number of doubles to generate
|
| 855 |
+
* \param mean - Mean of normal distribution
|
| 856 |
+
* \param stddev - Standard deviation of normal distribution
|
| 857 |
+
*
|
| 858 |
+
* \return
|
| 859 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 860 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 861 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 862 |
+
* a previous kernel launch \n
|
| 863 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 864 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 865 |
+
* not a multiple of the quasirandom dimension, or is not a multiple
|
| 866 |
+
* of two for pseudorandom generators \n
|
| 867 |
+
* - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
|
| 868 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 869 |
+
*/
|
| 870 |
+
curandStatus_t CURANDAPI
|
| 871 |
+
curandGenerateLogNormalDouble(curandGenerator_t generator, double *outputPtr,
|
| 872 |
+
size_t n, double mean, double stddev);
|
| 873 |
+
|
| 874 |
+
/**
|
| 875 |
+
* \brief Construct the histogram array for a Poisson distribution.
|
| 876 |
+
*
|
| 877 |
+
* Construct the histogram array for the Poisson distribution with lambda \p lambda.
|
| 878 |
+
* For lambda greater than 2000, an approximation with a normal distribution is used.
|
| 879 |
+
*
|
| 880 |
+
* \param lambda - lambda for the Poisson distribution
|
| 881 |
+
*
|
| 882 |
+
*
|
| 883 |
+
* \param discrete_distribution - pointer to the histogram in device memory
|
| 884 |
+
*
|
| 885 |
+
* \return
|
| 886 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 887 |
+
* - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
|
| 888 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
|
| 889 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the distribution pointer was null \n
|
| 890 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 891 |
+
* a previous kernel launch \n
|
| 892 |
+
* - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
|
| 893 |
+
* - CURAND_STATUS_SUCCESS if the histogram was generated successfully \n
|
| 894 |
+
*/
|
| 895 |
+
|
| 896 |
+
curandStatus_t CURANDAPI
|
| 897 |
+
curandCreatePoissonDistribution(double lambda, curandDiscreteDistribution_t *discrete_distribution);
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
/**
|
| 902 |
+
* \brief Destroy the histogram array for a discrete distribution (e.g. Poisson).
|
| 903 |
+
*
|
| 904 |
+
* Destroy the histogram array for a discrete distribution created by curandCreatePoissonDistribution.
|
| 905 |
+
*
|
| 906 |
+
* \param discrete_distribution - pointer to device memory where the histogram is stored
|
| 907 |
+
*
|
| 908 |
+
* \return
|
| 909 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the histogram was never created \n
|
| 910 |
+
* - CURAND_STATUS_SUCCESS if the histogram was destroyed successfully \n
|
| 911 |
+
*/
|
| 912 |
+
curandStatus_t CURANDAPI
|
| 913 |
+
curandDestroyDistribution(curandDiscreteDistribution_t discrete_distribution);
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
/**
|
| 917 |
+
* \brief Generate Poisson-distributed unsigned ints.
|
| 918 |
+
*
|
| 919 |
+
* Use \p generator to generate \p n unsigned int results into device memory at
|
| 920 |
+
* \p outputPtr. The device memory must have been previously allocated and must be
|
| 921 |
+
* large enough to hold all the results. Launches are done with the stream
|
| 922 |
+
* set using ::curandSetStream(), or the null stream if no stream has been set.
|
| 923 |
+
*
|
| 924 |
+
* Results are 32-bit unsigned int point values with Poisson distribution, with lambda \p lambda.
|
| 925 |
+
*
|
| 926 |
+
* \param generator - Generator to use
|
| 927 |
+
* \param outputPtr - Pointer to device memory to store CUDA-generated results, or
|
| 928 |
+
* Pointer to host memory to store CPU-generated results
|
| 929 |
+
* \param n - Number of unsigned ints to generate
|
| 930 |
+
* \param lambda - lambda for the Poisson distribution
|
| 931 |
+
*
|
| 932 |
+
* \return
|
| 933 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 934 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 935 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 936 |
+
* a previous kernel launch \n
|
| 937 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 938 |
+
* - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
|
| 939 |
+
* not a multiple of the quasirandom dimension\n
|
| 940 |
+
* - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU or sm does not support double precision \n
|
| 941 |
+
* - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
|
| 942 |
+
* - CURAND_STATUS_SUCCESS if the results were generated successfully \n
|
| 943 |
+
*/
|
| 944 |
+
|
| 945 |
+
curandStatus_t CURANDAPI
|
| 946 |
+
curandGeneratePoisson(curandGenerator_t generator, unsigned int *outputPtr,
|
| 947 |
+
size_t n, double lambda);
|
| 948 |
+
// just for internal usage
|
| 949 |
+
curandStatus_t CURANDAPI
|
| 950 |
+
curandGeneratePoissonMethod(curandGenerator_t generator, unsigned int *outputPtr,
|
| 951 |
+
size_t n, double lambda, curandMethod_t method);
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
curandStatus_t CURANDAPI
|
| 955 |
+
curandGenerateBinomial(curandGenerator_t generator, unsigned int *outputPtr,
|
| 956 |
+
size_t num, unsigned int n, double p);
|
| 957 |
+
// just for internal usage
|
| 958 |
+
curandStatus_t CURANDAPI
|
| 959 |
+
curandGenerateBinomialMethod(curandGenerator_t generator,
|
| 960 |
+
unsigned int *outputPtr,
|
| 961 |
+
size_t num, unsigned int n, double p,
|
| 962 |
+
curandMethod_t method);
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
/**
|
| 966 |
+
* \brief Setup starting states.
|
| 967 |
+
*
|
| 968 |
+
* Generate the starting state of the generator. This function is
|
| 969 |
+
* automatically called by generation functions such as
|
| 970 |
+
* ::curandGenerate() and ::curandGenerateUniform().
|
| 971 |
+
* It can be called manually for performance testing reasons to separate
|
| 972 |
+
* timings for starting state generation and random number generation.
|
| 973 |
+
*
|
| 974 |
+
* \param generator - Generator to update
|
| 975 |
+
*
|
| 976 |
+
* \return
|
| 977 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
|
| 978 |
+
* - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
|
| 979 |
+
* - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
|
| 980 |
+
* a previous kernel launch \n
|
| 981 |
+
* - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
|
| 982 |
+
* - CURAND_STATUS_SUCCESS if the seeds were generated successfully \n
|
| 983 |
+
*/
|
| 984 |
+
curandStatus_t CURANDAPI
|
| 985 |
+
curandGenerateSeeds(curandGenerator_t generator);
|
| 986 |
+
|
| 987 |
+
/**
|
| 988 |
+
* \brief Get direction vectors for 32-bit quasirandom number generation.
|
| 989 |
+
*
|
| 990 |
+
* Get a pointer to an array of direction vectors that can be used
|
| 991 |
+
* for quasirandom number generation. The resulting pointer will
|
| 992 |
+
* reference an array of direction vectors in host memory.
|
| 993 |
+
*
|
| 994 |
+
* The array contains vectors for many dimensions. Each dimension
|
| 995 |
+
* has 32 vectors. Each individual vector is an unsigned int.
|
| 996 |
+
*
|
| 997 |
+
* Legal values for \p set are:
|
| 998 |
+
* - CURAND_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
|
| 999 |
+
* - CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
|
| 1000 |
+
*
|
| 1001 |
+
* \param vectors - Address of pointer in which to return direction vectors
|
| 1002 |
+
* \param set - Which set of direction vectors to use
|
| 1003 |
+
*
|
| 1004 |
+
* \return
|
| 1005 |
+
* - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
|
| 1006 |
+
* - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
|
| 1007 |
+
*/
|
| 1008 |
+
curandStatus_t CURANDAPI
|
| 1009 |
+
curandGetDirectionVectors32(curandDirectionVectors32_t *vectors[], curandDirectionVectorSet_t set);
|
| 1010 |
+
|
| 1011 |
+
/**
|
| 1012 |
+
* \brief Get scramble constants for 32-bit scrambled Sobol' .
|
| 1013 |
+
*
|
| 1014 |
+
* Get a pointer to an array of scramble constants that can be used
|
| 1015 |
+
* for quasirandom number generation. The resulting pointer will
|
| 1016 |
+
* reference an array of unsinged ints in host memory.
|
| 1017 |
+
*
|
| 1018 |
+
* The array contains constants for many dimensions. Each dimension
|
| 1019 |
+
* has a single unsigned int constant.
|
| 1020 |
+
*
|
| 1021 |
+
* \param constants - Address of pointer in which to return scramble constants
|
| 1022 |
+
*
|
| 1023 |
+
* \return
|
| 1024 |
+
* - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
|
| 1025 |
+
*/
|
| 1026 |
+
curandStatus_t CURANDAPI
|
| 1027 |
+
curandGetScrambleConstants32(unsigned int * * constants);
|
| 1028 |
+
|
| 1029 |
+
/**
|
| 1030 |
+
* \brief Get direction vectors for 64-bit quasirandom number generation.
|
| 1031 |
+
*
|
| 1032 |
+
* Get a pointer to an array of direction vectors that can be used
|
| 1033 |
+
* for quasirandom number generation. The resulting pointer will
|
| 1034 |
+
* reference an array of direction vectors in host memory.
|
| 1035 |
+
*
|
| 1036 |
+
* The array contains vectors for many dimensions. Each dimension
|
| 1037 |
+
* has 64 vectors. Each individual vector is an unsigned long long.
|
| 1038 |
+
*
|
| 1039 |
+
* Legal values for \p set are:
|
| 1040 |
+
* - CURAND_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
|
| 1041 |
+
* - CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
|
| 1042 |
+
*
|
| 1043 |
+
* \param vectors - Address of pointer in which to return direction vectors
|
| 1044 |
+
* \param set - Which set of direction vectors to use
|
| 1045 |
+
*
|
| 1046 |
+
* \return
|
| 1047 |
+
* - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
|
| 1048 |
+
* - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
|
| 1049 |
+
*/
|
| 1050 |
+
curandStatus_t CURANDAPI
|
| 1051 |
+
curandGetDirectionVectors64(curandDirectionVectors64_t *vectors[], curandDirectionVectorSet_t set);
|
| 1052 |
+
|
| 1053 |
+
/**
|
| 1054 |
+
* \brief Get scramble constants for 64-bit scrambled Sobol' .
|
| 1055 |
+
*
|
| 1056 |
+
* Get a pointer to an array of scramble constants that can be used
|
| 1057 |
+
* for quasirandom number generation. The resulting pointer will
|
| 1058 |
+
* reference an array of unsinged long longs in host memory.
|
| 1059 |
+
*
|
| 1060 |
+
* The array contains constants for many dimensions. Each dimension
|
| 1061 |
+
* has a single unsigned long long constant.
|
| 1062 |
+
*
|
| 1063 |
+
* \param constants - Address of pointer in which to return scramble constants
|
| 1064 |
+
*
|
| 1065 |
+
* \return
|
| 1066 |
+
* - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
|
| 1067 |
+
*/
|
| 1068 |
+
curandStatus_t CURANDAPI
|
| 1069 |
+
curandGetScrambleConstants64(unsigned long long * * constants);
|
| 1070 |
+
|
| 1071 |
+
/** @} */
|
| 1072 |
+
|
| 1073 |
+
#endif // __CUDACC_RTC__
|
| 1074 |
+
|
| 1075 |
+
#if defined(__cplusplus)
|
| 1076 |
+
}
|
| 1077 |
+
#endif /* __cplusplus */
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
#endif /* !defined(CURAND_H_) */
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_discrete.h
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#if !defined(CURANDDISCRETE_H_)
|
| 50 |
+
#define CURANDDISCRETE_H_
|
| 51 |
+
|
| 52 |
+
struct curandDistributionShift_st {
|
| 53 |
+
curandDistribution_t probability;
|
| 54 |
+
curandDistribution_t host_probability;
|
| 55 |
+
unsigned int shift;
|
| 56 |
+
unsigned int length;
|
| 57 |
+
unsigned int host_gen;
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
struct curandHistogramM2_st {
|
| 61 |
+
curandHistogramM2V_t V;
|
| 62 |
+
curandHistogramM2V_t host_V;
|
| 63 |
+
curandHistogramM2K_t K;
|
| 64 |
+
curandHistogramM2K_t host_K;
|
| 65 |
+
unsigned int host_gen;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
struct curandDistributionM2Shift_st {
|
| 70 |
+
curandHistogramM2_t histogram;
|
| 71 |
+
curandHistogramM2_t host_histogram;
|
| 72 |
+
unsigned int shift;
|
| 73 |
+
unsigned int length;
|
| 74 |
+
unsigned int host_gen;
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
struct curandDiscreteDistribution_st {
|
| 78 |
+
curandDiscreteDistribution_t self_host_ptr;
|
| 79 |
+
curandDistributionM2Shift_t M2;
|
| 80 |
+
curandDistributionM2Shift_t host_M2;
|
| 81 |
+
double stddev;
|
| 82 |
+
double mean;
|
| 83 |
+
curandMethod_t method;
|
| 84 |
+
unsigned int host_gen;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
#endif // !defined(CURANDDISCRETE_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_discrete2.h
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_DISCRETE_H_)
|
| 52 |
+
#define CURAND_DISCRETE_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include "curand_mrg32k3a.h"
|
| 65 |
+
#include "curand_mtgp32_kernel.h"
|
| 66 |
+
#include "curand_philox4x32_x.h"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
template <typename T>
|
| 70 |
+
QUALIFIERS unsigned int _curand_discrete(T x, curandDiscreteDistribution_t discrete_distribution){
|
| 71 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 72 |
+
return _curand_M2_double(x, discrete_distribution->M2);
|
| 73 |
+
}
|
| 74 |
+
return (unsigned int)((discrete_distribution->stddev * _curand_normal_icdf_double(x)) + discrete_distribution->mean + 0.5);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
template <typename STATE>
|
| 79 |
+
QUALIFIERS unsigned int curand__discrete(STATE state, curandDiscreteDistribution_t discrete_distribution){
|
| 80 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 81 |
+
return curand_M2_double(state, discrete_distribution->M2);
|
| 82 |
+
}
|
| 83 |
+
return (unsigned int)((discrete_distribution->stddev * curand_normal_double(state)) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template <typename STATE>
|
| 87 |
+
QUALIFIERS uint4 curand__discrete4(STATE state, curandDiscreteDistribution_t discrete_distribution){
|
| 88 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 89 |
+
return curand_M2_double4(state, discrete_distribution->M2);
|
| 90 |
+
}
|
| 91 |
+
double4 _res;
|
| 92 |
+
uint4 result;
|
| 93 |
+
_res = curand_normal4_double(state);
|
| 94 |
+
result.x = (unsigned int)((discrete_distribution->stddev * _res.x) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 95 |
+
result.y = (unsigned int)((discrete_distribution->stddev * _res.y) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 96 |
+
result.z = (unsigned int)((discrete_distribution->stddev * _res.z) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 97 |
+
result.w = (unsigned int)((discrete_distribution->stddev * _res.w) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 98 |
+
return result;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
/*
|
| 102 |
+
* \brief Return a discrete distributed unsigned int from a XORWOW generator.
|
| 103 |
+
*
|
| 104 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 105 |
+
* distribution defined by \p discrete_distribution from the XORWOW generator in \p state,
|
| 106 |
+
* increment position of generator by one.
|
| 107 |
+
*
|
| 108 |
+
* \param state - Pointer to state to update
|
| 109 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 110 |
+
*
|
| 111 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 112 |
+
*/
|
| 113 |
+
QUALIFIERS unsigned int curand_discrete(curandStateXORWOW_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 114 |
+
{
|
| 115 |
+
return curand__discrete(state, discrete_distribution);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/*
|
| 119 |
+
* \brief Return a discrete distributed unsigned int from a Philox4_32_10 generator.
|
| 120 |
+
*
|
| 121 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 122 |
+
* distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
|
| 123 |
+
* increment position of generator by one.
|
| 124 |
+
*
|
| 125 |
+
* \param state - Pointer to state to update
|
| 126 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 127 |
+
*
|
| 128 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 129 |
+
*/
|
| 130 |
+
QUALIFIERS unsigned int curand_discrete(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 131 |
+
{
|
| 132 |
+
return curand__discrete(state, discrete_distribution);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
/*
|
| 136 |
+
* \brief Return four discrete distributed unsigned ints from a Philox4_32_10 generator.
|
| 137 |
+
*
|
| 138 |
+
* Return four single discrete distributed unsigned ints derived from a
|
| 139 |
+
* distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
|
| 140 |
+
* increment position of generator by one.
|
| 141 |
+
*
|
| 142 |
+
* \param state - Pointer to state to update
|
| 143 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 144 |
+
*
|
| 145 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 146 |
+
*/
|
| 147 |
+
QUALIFIERS uint4 curand_discrete4(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 148 |
+
{
|
| 149 |
+
return curand__discrete4(state, discrete_distribution);
|
| 150 |
+
}
|
| 151 |
+
/*
|
| 152 |
+
* \brief Return a discrete distributed unsigned int from a MRG32k3a generator.
|
| 153 |
+
*
|
| 154 |
+
* Re turn a single discrete distributed unsigned int derived from a
|
| 155 |
+
* distribution defined by \p discrete_distribution from the MRG32k3a generator in \p state,
|
| 156 |
+
* increment position of generator by one.
|
| 157 |
+
*
|
| 158 |
+
* \param state - Pointer to state to update
|
| 159 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 160 |
+
*
|
| 161 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 162 |
+
*/
|
| 163 |
+
QUALIFIERS unsigned int curand_discrete(curandStateMRG32k3a_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 164 |
+
{
|
| 165 |
+
return curand__discrete(state, discrete_distribution);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
/*
|
| 169 |
+
* \brief Return a discrete distributed unsigned int from a MTGP32 generator.
|
| 170 |
+
*
|
| 171 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 172 |
+
* distribution defined by \p discrete_distribution from the MTGP32 generator in \p state,
|
| 173 |
+
* increment position of generator by one.
|
| 174 |
+
*
|
| 175 |
+
* \param state - Pointer to state to update
|
| 176 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 177 |
+
*
|
| 178 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 179 |
+
*/
|
| 180 |
+
QUALIFIERS unsigned int curand_discrete(curandStateMtgp32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 181 |
+
{
|
| 182 |
+
return curand__discrete(state, discrete_distribution);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/*
|
| 186 |
+
* \brief Return a discrete distributed unsigned int from a Sobol32 generator.
|
| 187 |
+
*
|
| 188 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 189 |
+
* distribution defined by \p discrete_distribution from the Sobol32 generator in \p state,
|
| 190 |
+
* increment position of generator by one.
|
| 191 |
+
*
|
| 192 |
+
* \param state - Pointer to state to update
|
| 193 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 194 |
+
*
|
| 195 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 196 |
+
*/
|
| 197 |
+
QUALIFIERS unsigned int curand_discrete(curandStateSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 198 |
+
{
|
| 199 |
+
return curand__discrete(state, discrete_distribution);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
/*
|
| 203 |
+
* \brief Return a discrete distributed unsigned int from a scrambled Sobol32 generator.
|
| 204 |
+
*
|
| 205 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 206 |
+
* distribution defined by \p discrete_distribution from the scrambled Sobol32 generator in \p state,
|
| 207 |
+
* increment position of generator by one.
|
| 208 |
+
*
|
| 209 |
+
* \param state - Pointer to state to update
|
| 210 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 211 |
+
*
|
| 212 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 213 |
+
*/
|
| 214 |
+
QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 215 |
+
{
|
| 216 |
+
return curand__discrete(state, discrete_distribution);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
/*
|
| 220 |
+
* \brief Return a discrete distributed unsigned int from a Sobol64 generator.
|
| 221 |
+
*
|
| 222 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 223 |
+
* distribution defined by \p discrete_distribution from the Sobol64 generator in \p state,
|
| 224 |
+
* increment position of generator by one.
|
| 225 |
+
*
|
| 226 |
+
* \param state - Pointer to state to update
|
| 227 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 228 |
+
*
|
| 229 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 230 |
+
*/
|
| 231 |
+
QUALIFIERS unsigned int curand_discrete(curandStateSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 232 |
+
{
|
| 233 |
+
return curand__discrete(state, discrete_distribution);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
/*
|
| 237 |
+
* \brief Return a discrete distributed unsigned int from a scrambled Sobol64 generator.
|
| 238 |
+
*
|
| 239 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 240 |
+
* distribution defined by \p discrete_distribution from the scrambled Sobol64 generator in \p state,
|
| 241 |
+
* increment position of generator by one.
|
| 242 |
+
*
|
| 243 |
+
* \param state - Pointer to state to update
|
| 244 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 245 |
+
*
|
| 246 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 247 |
+
*/
|
| 248 |
+
QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 249 |
+
{
|
| 250 |
+
return curand__discrete(state, discrete_distribution);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
#endif // !defined(CURAND_DISCRETE_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_globals.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
#ifndef CURAND_GLOBALS_H
|
| 49 |
+
#define CURAND_GLOBALS_H
|
| 50 |
+
|
| 51 |
+
#define MAX_XOR_N (5)
|
| 52 |
+
#define SKIPAHEAD_BLOCKSIZE (4)
|
| 53 |
+
#define SKIPAHEAD_MASK ((1<<SKIPAHEAD_BLOCKSIZE)-1)
|
| 54 |
+
#define CURAND_2POW32 (4294967296.f)
|
| 55 |
+
#define CURAND_2POW32_DOUBLE (4294967296.)
|
| 56 |
+
#define CURAND_2POW32_INV (2.3283064e-10f)
|
| 57 |
+
#define CURAND_2POW32_INV_DOUBLE (2.3283064365386963e-10)
|
| 58 |
+
#define CURAND_2POW53_INV_DOUBLE (1.1102230246251565e-16)
|
| 59 |
+
#define CURAND_2POW32_INV_2PI (2.3283064e-10f * 6.2831855f)
|
| 60 |
+
#define CURAND_2PI (6.2831855f)
|
| 61 |
+
#define CURAND_2POW53_INV_2PI_DOUBLE (1.1102230246251565e-16 * 6.2831853071795860)
|
| 62 |
+
#define CURAND_PI_DOUBLE (3.1415926535897932)
|
| 63 |
+
#define CURAND_2PI_DOUBLE (6.2831853071795860)
|
| 64 |
+
#define CURAND_SQRT2 (-1.4142135f)
|
| 65 |
+
#define CURAND_SQRT2_DOUBLE (-1.4142135623730951)
|
| 66 |
+
|
| 67 |
+
#define SOBOL64_ITR_BINARY_DIVIDE 2
|
| 68 |
+
#define SOBOL_M2_BINARY_DIVIDE 10
|
| 69 |
+
#define MTGP32_M2_BINARY_DIVIDE 32
|
| 70 |
+
#define MAX_LAMBDA 400000
|
| 71 |
+
#define MIN_GAUSS_LAMBDA 2000
|
| 72 |
+
|
| 73 |
+
struct normal_args_st {
|
| 74 |
+
float mean;
|
| 75 |
+
float stddev;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
typedef struct normal_args_st normal_args_t;
|
| 79 |
+
|
| 80 |
+
struct normal_args_double_st {
|
| 81 |
+
double mean;
|
| 82 |
+
double stddev;
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
typedef struct normal_args_double_st normal_args_double_t;
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#endif
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_kernel.h
ADDED
|
@@ -0,0 +1,1677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_KERNEL_H_)
|
| 52 |
+
#define CURAND_KERNEL_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#if !defined(QUALIFIERS)
|
| 61 |
+
#define QUALIFIERS static __forceinline__ __device__
|
| 62 |
+
#endif
|
| 63 |
+
|
| 64 |
+
/* To prevent unused parameter warnings */
|
| 65 |
+
#if !defined(GCC_UNUSED_PARAMETER)
|
| 66 |
+
#if defined(__GNUC__)
|
| 67 |
+
#define GCC_UNUSED_PARAMETER __attribute__((unused))
|
| 68 |
+
#else
|
| 69 |
+
#define GCC_UNUSED_PARAMETER
|
| 70 |
+
#endif /* defined(__GNUC__) */
|
| 71 |
+
#endif /* !defined(GCC_UNUSED_PARAMETER) */
|
| 72 |
+
|
| 73 |
+
#include <nv/target>
|
| 74 |
+
|
| 75 |
+
#ifdef __CUDACC_RTC__
|
| 76 |
+
#define CURAND_DETAIL_USE_CUDA_STL
|
| 77 |
+
#endif
|
| 78 |
+
|
| 79 |
+
#if __cplusplus >= 201103L
|
| 80 |
+
# ifdef CURAND_DETAIL_USE_CUDA_STL
|
| 81 |
+
# define CURAND_STD cuda::std
|
| 82 |
+
# include <cuda/std/type_traits>
|
| 83 |
+
# else
|
| 84 |
+
# define CURAND_STD std
|
| 85 |
+
# include <type_traits>
|
| 86 |
+
# endif // CURAND_DETAIL_USE_CUDA_STL
|
| 87 |
+
#else
|
| 88 |
+
// To support C++03 compilation
|
| 89 |
+
# define CURAND_STD curand_detail
|
| 90 |
+
namespace curand_detail {
|
| 91 |
+
template<bool B, class T = void>
|
| 92 |
+
struct enable_if {};
|
| 93 |
+
|
| 94 |
+
template<class T>
|
| 95 |
+
struct enable_if<true, T> { typedef T type; };
|
| 96 |
+
|
| 97 |
+
template<class T, class U>
|
| 98 |
+
struct is_same { static const bool value = false; };
|
| 99 |
+
|
| 100 |
+
template<class T>
|
| 101 |
+
struct is_same<T, T> { static const bool value = true; };
|
| 102 |
+
} // namespace curand_detail
|
| 103 |
+
#endif // __cplusplus >= 201103L
|
| 104 |
+
|
| 105 |
+
#ifndef __CUDACC_RTC__
|
| 106 |
+
#include <math.h>
|
| 107 |
+
#endif // __CUDACC_RTC__
|
| 108 |
+
|
| 109 |
+
#include "curand.h"
|
| 110 |
+
#include "curand_discrete.h"
|
| 111 |
+
#include "curand_precalc.h"
|
| 112 |
+
#include "curand_mrg32k3a.h"
|
| 113 |
+
#include "curand_mtgp32_kernel.h"
|
| 114 |
+
#include "curand_philox4x32_x.h"
|
| 115 |
+
#include "curand_globals.h"
|
| 116 |
+
|
| 117 |
+
/* Test RNG */
|
| 118 |
+
/* This generator uses the formula:
|
| 119 |
+
x_n = x_(n-1) + 1 mod 2^32
|
| 120 |
+
x_0 = (unsigned int)seed * 3
|
| 121 |
+
Subsequences are spaced 31337 steps apart.
|
| 122 |
+
*/
|
| 123 |
+
struct curandStateTest {
|
| 124 |
+
unsigned int v;
|
| 125 |
+
};
|
| 126 |
+
|
| 127 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 128 |
+
typedef struct curandStateTest curandStateTest_t;
|
| 129 |
+
/** \endcond */
|
| 130 |
+
|
| 131 |
+
/* XORSHIFT FAMILY RNGs */
|
| 132 |
+
/* These generators are a family proposed by Marsaglia. They keep state
|
| 133 |
+
in 32 bit chunks, then use repeated shift and xor operations to scramble
|
| 134 |
+
the bits. The following generators are a combination of a simple Weyl
|
| 135 |
+
generator with an N variable XORSHIFT generator.
|
| 136 |
+
*/
|
| 137 |
+
|
| 138 |
+
/* XORSHIFT RNG */
|
| 139 |
+
/* This generator uses the xorwow formula of
|
| 140 |
+
www.jstatsoft.org/v08/i14/paper page 5
|
| 141 |
+
Has period 2^192 - 2^32.
|
| 142 |
+
*/
|
| 143 |
+
/**
|
| 144 |
+
* CURAND XORWOW state
|
| 145 |
+
*/
|
| 146 |
+
struct curandStateXORWOW;
|
| 147 |
+
|
| 148 |
+
/*
|
| 149 |
+
* Implementation details not in reference documentation */
|
| 150 |
+
struct curandStateXORWOW {
|
| 151 |
+
unsigned int d, v[5];
|
| 152 |
+
int boxmuller_flag;
|
| 153 |
+
int boxmuller_flag_double;
|
| 154 |
+
float boxmuller_extra;
|
| 155 |
+
double boxmuller_extra_double;
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
/*
|
| 159 |
+
* CURAND XORWOW state
|
| 160 |
+
*/
|
| 161 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 162 |
+
typedef struct curandStateXORWOW curandStateXORWOW_t;
|
| 163 |
+
|
| 164 |
+
#define EXTRA_FLAG_NORMAL 0x00000001
|
| 165 |
+
#define EXTRA_FLAG_LOG_NORMAL 0x00000002
|
| 166 |
+
/** \endcond */
|
| 167 |
+
|
| 168 |
+
/* Combined Multiple Recursive Generators */
|
| 169 |
+
/* These generators are a family proposed by L'Ecuyer. They keep state
|
| 170 |
+
in sets of doubles, then use repeated modular arithmetic multiply operations
|
| 171 |
+
to scramble the bits in each set, and combine the result.
|
| 172 |
+
*/
|
| 173 |
+
|
| 174 |
+
/* MRG32k3a RNG */
|
| 175 |
+
/* This generator uses the MRG32k3A formula of
|
| 176 |
+
http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf
|
| 177 |
+
Has period 2^191.
|
| 178 |
+
*/
|
| 179 |
+
|
| 180 |
+
/* moduli for the recursions */
|
| 181 |
+
/** \cond UNHIDE_DEFINES */
|
| 182 |
+
#define MRG32K3A_MOD1 4294967087.
|
| 183 |
+
#define MRG32K3A_MOD2 4294944443.
|
| 184 |
+
|
| 185 |
+
/* Constants used in generation */
|
| 186 |
+
|
| 187 |
+
#define MRG32K3A_A12 1403580.
|
| 188 |
+
#define MRG32K3A_A13N 810728.
|
| 189 |
+
#define MRG32K3A_A21 527612.
|
| 190 |
+
#define MRG32K3A_A23N 1370589.
|
| 191 |
+
#define MRG32K3A_NORM (2.3283065498378288e-10)
|
| 192 |
+
//
|
| 193 |
+
// #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1))
|
| 194 |
+
// above constant, used verbatim, rounds differently on some host systems.
|
| 195 |
+
#define MRG32K3A_BITS_NORM 1.000000048662
|
| 196 |
+
|
| 197 |
+
/** \endcond */
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
/**
|
| 203 |
+
* CURAND MRG32K3A state
|
| 204 |
+
*/
|
| 205 |
+
struct curandStateMRG32k3a;
|
| 206 |
+
|
| 207 |
+
/* Implementation details not in reference documentation */
|
| 208 |
+
struct curandStateMRG32k3a {
|
| 209 |
+
unsigned int s1[3];
|
| 210 |
+
unsigned int s2[3];
|
| 211 |
+
int boxmuller_flag;
|
| 212 |
+
int boxmuller_flag_double;
|
| 213 |
+
float boxmuller_extra;
|
| 214 |
+
double boxmuller_extra_double;
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
/*
|
| 218 |
+
* CURAND MRG32K3A state
|
| 219 |
+
*/
|
| 220 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 221 |
+
typedef struct curandStateMRG32k3a curandStateMRG32k3a_t;
|
| 222 |
+
/** \endcond */
|
| 223 |
+
|
| 224 |
+
/* SOBOL QRNG */
|
| 225 |
+
/**
|
| 226 |
+
* CURAND Sobol32 state
|
| 227 |
+
*/
|
| 228 |
+
struct curandStateSobol32;
|
| 229 |
+
|
| 230 |
+
/* Implementation details not in reference documentation */
|
| 231 |
+
struct curandStateSobol32 {
|
| 232 |
+
unsigned int i, x, c;
|
| 233 |
+
unsigned int direction_vectors[32];
|
| 234 |
+
};
|
| 235 |
+
|
| 236 |
+
/*
|
| 237 |
+
* CURAND Sobol32 state
|
| 238 |
+
*/
|
| 239 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 240 |
+
typedef struct curandStateSobol32 curandStateSobol32_t;
|
| 241 |
+
/** \endcond */
|
| 242 |
+
|
| 243 |
+
/**
|
| 244 |
+
* CURAND Scrambled Sobol32 state
|
| 245 |
+
*/
|
| 246 |
+
struct curandStateScrambledSobol32;
|
| 247 |
+
|
| 248 |
+
/* Implementation details not in reference documentation */
|
| 249 |
+
struct curandStateScrambledSobol32 {
|
| 250 |
+
unsigned int i, x, c;
|
| 251 |
+
unsigned int direction_vectors[32];
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
/*
|
| 255 |
+
* CURAND Scrambled Sobol32 state
|
| 256 |
+
*/
|
| 257 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 258 |
+
typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t;
|
| 259 |
+
/** \endcond */
|
| 260 |
+
|
| 261 |
+
/**
|
| 262 |
+
* CURAND Sobol64 state
|
| 263 |
+
*/
|
| 264 |
+
struct curandStateSobol64;
|
| 265 |
+
|
| 266 |
+
/* Implementation details not in reference documentation */
|
| 267 |
+
struct curandStateSobol64 {
|
| 268 |
+
unsigned long long i, x, c;
|
| 269 |
+
unsigned long long direction_vectors[64];
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
/*
|
| 273 |
+
* CURAND Sobol64 state
|
| 274 |
+
*/
|
| 275 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 276 |
+
typedef struct curandStateSobol64 curandStateSobol64_t;
|
| 277 |
+
/** \endcond */
|
| 278 |
+
|
| 279 |
+
/**
|
| 280 |
+
* CURAND Scrambled Sobol64 state
|
| 281 |
+
*/
|
| 282 |
+
struct curandStateScrambledSobol64;
|
| 283 |
+
|
| 284 |
+
/* Implementation details not in reference documentation */
|
| 285 |
+
struct curandStateScrambledSobol64 {
|
| 286 |
+
unsigned long long i, x, c;
|
| 287 |
+
unsigned long long direction_vectors[64];
|
| 288 |
+
};
|
| 289 |
+
|
| 290 |
+
/*
|
| 291 |
+
* CURAND Scrambled Sobol64 state
|
| 292 |
+
*/
|
| 293 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 294 |
+
typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t;
|
| 295 |
+
/** \endcond */
|
| 296 |
+
|
| 297 |
+
/*
|
| 298 |
+
* Default RNG
|
| 299 |
+
*/
|
| 300 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 301 |
+
typedef struct curandStateXORWOW curandState_t;
|
| 302 |
+
typedef struct curandStateXORWOW curandState;
|
| 303 |
+
/** \endcond */
|
| 304 |
+
|
| 305 |
+
/****************************************************************************/
|
| 306 |
+
/* Utility functions needed by RNGs */
|
| 307 |
+
/****************************************************************************/
|
| 308 |
+
/** \cond UNHIDE_UTILITIES */
|
| 309 |
+
/*
|
| 310 |
+
multiply vector by matrix, store in result
|
| 311 |
+
matrix is n x n, measured in 32 bit units
|
| 312 |
+
matrix is stored in row major order
|
| 313 |
+
vector and result cannot be same pointer
|
| 314 |
+
*/
|
| 315 |
+
template<int N>
|
| 316 |
+
QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix)
|
| 317 |
+
{
|
| 318 |
+
unsigned int result[N] = { 0 };
|
| 319 |
+
for(int i = 0; i < N; i++) {
|
| 320 |
+
#ifdef __CUDA_ARCH__
|
| 321 |
+
#pragma unroll 16
|
| 322 |
+
#endif
|
| 323 |
+
for(int j = 0; j < 32; j++) {
|
| 324 |
+
if(vector[i] & (1 << j)) {
|
| 325 |
+
for(int k = 0; k < N; k++) {
|
| 326 |
+
result[k] ^= matrix[N * (i * 32 + j) + k];
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
for(int i = 0; i < N; i++) {
|
| 332 |
+
vector[i] = result[i];
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix,
|
| 337 |
+
unsigned int *result, int n)
|
| 338 |
+
{
|
| 339 |
+
for(int i = 0; i < n; i++) {
|
| 340 |
+
result[i] = 0;
|
| 341 |
+
}
|
| 342 |
+
for(int i = 0; i < n; i++) {
|
| 343 |
+
for(int j = 0; j < 32; j++) {
|
| 344 |
+
if(vector[i] & (1 << j)) {
|
| 345 |
+
for(int k = 0; k < n; k++) {
|
| 346 |
+
result[k] ^= matrix[n * (i * 32 + j) + k];
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
/* generate identity matrix */
|
| 354 |
+
QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n)
|
| 355 |
+
{
|
| 356 |
+
int r;
|
| 357 |
+
for(int i = 0; i < n * 32; i++) {
|
| 358 |
+
for(int j = 0; j < n; j++) {
|
| 359 |
+
r = i & 31;
|
| 360 |
+
if(i / 32 == j) {
|
| 361 |
+
matrix[i * n + j] = (1 << r);
|
| 362 |
+
} else {
|
| 363 |
+
matrix[i * n + j] = 0;
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
/* multiply matrixA by matrixB, store back in matrixA
|
| 370 |
+
matrixA and matrixB must not be same matrix */
|
| 371 |
+
QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n)
|
| 372 |
+
{
|
| 373 |
+
unsigned int result[MAX_XOR_N];
|
| 374 |
+
for(int i = 0; i < n * 32; i++) {
|
| 375 |
+
__curand_matvec(matrixA + i * n, matrixB, result, n);
|
| 376 |
+
for(int j = 0; j < n; j++) {
|
| 377 |
+
matrixA[i * n + j] = result[j];
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
/* copy vectorA to vector */
|
| 383 |
+
QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n)
|
| 384 |
+
{
|
| 385 |
+
for(int i = 0; i < n; i++) {
|
| 386 |
+
vector[i] = vectorA[i];
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/* copy matrixA to matrix */
|
| 391 |
+
QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n)
|
| 392 |
+
{
|
| 393 |
+
for(int i = 0; i < n * n * 32; i++) {
|
| 394 |
+
matrix[i] = matrixA[i];
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
/* compute matrixA to power p, store result in matrix */
|
| 399 |
+
QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA,
|
| 400 |
+
unsigned long long p, int n)
|
| 401 |
+
{
|
| 402 |
+
unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32];
|
| 403 |
+
unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32];
|
| 404 |
+
__curand_matidentity(matrix, n);
|
| 405 |
+
__curand_matcopy(matrixR, matrixA, n);
|
| 406 |
+
while(p) {
|
| 407 |
+
if(p & 1) {
|
| 408 |
+
__curand_matmat(matrix, matrixR, n);
|
| 409 |
+
}
|
| 410 |
+
__curand_matcopy(matrixS, matrixR, n);
|
| 411 |
+
__curand_matmat(matrixR, matrixS, n);
|
| 412 |
+
p >>= 1;
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
/****************************************************************************/
|
| 417 |
+
/* Utility functions needed by MRG32k3a RNG */
|
| 418 |
+
/* Matrix operations modulo some integer less than 2**32, done in */
|
| 419 |
+
/* double precision floating point, with care not to overflow 53 bits */
|
| 420 |
+
/****************************************************************************/
|
| 421 |
+
|
| 422 |
+
/* return i mod m. */
|
| 423 |
+
/* assumes i and m are integers represented accurately in doubles */
|
| 424 |
+
|
| 425 |
+
QUALIFIERS double curand_MRGmod(double i, double m)
|
| 426 |
+
{
|
| 427 |
+
double quo;
|
| 428 |
+
double rem;
|
| 429 |
+
quo = floor(i/m);
|
| 430 |
+
rem = i - (quo*m);
|
| 431 |
+
if (rem < 0.0) rem += m;
|
| 432 |
+
return rem;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
/* Multiplication modulo m. Inputs i and j less than 2**32 */
|
| 436 |
+
/* Ensure intermediate results do not exceed 2**53 */
|
| 437 |
+
|
| 438 |
+
QUALIFIERS double curand_MRGmodMul(double i, double j, double m)
|
| 439 |
+
{
|
| 440 |
+
double tempHi;
|
| 441 |
+
double tempLo;
|
| 442 |
+
|
| 443 |
+
tempHi = floor(i/131072.0);
|
| 444 |
+
tempLo = i - (tempHi*131072.0);
|
| 445 |
+
tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m);
|
| 446 |
+
|
| 447 |
+
if (tempLo < 0.0) tempLo += m;
|
| 448 |
+
return tempLo;
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
/* multiply 3 by 3 matrices of doubles, modulo m */
|
| 452 |
+
|
| 453 |
+
QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m)
|
| 454 |
+
{
|
| 455 |
+
int i,j;
|
| 456 |
+
double temp[3][3];
|
| 457 |
+
for (i=0; i<3; i++){
|
| 458 |
+
for (j=0; j<3; j++){
|
| 459 |
+
temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) +
|
| 460 |
+
curand_MRGmodMul(i1[i][1], i2[1][j], m) +
|
| 461 |
+
curand_MRGmodMul(i1[i][2], i2[2][j], m));
|
| 462 |
+
temp[i][j] = curand_MRGmod( temp[i][j], m );
|
| 463 |
+
}
|
| 464 |
+
}
|
| 465 |
+
for (i=0; i<3; i++){
|
| 466 |
+
for (j=0; j<3; j++){
|
| 467 |
+
o[i][j] = (unsigned int)temp[i][j];
|
| 468 |
+
}
|
| 469 |
+
}
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
/* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */
|
| 473 |
+
|
| 474 |
+
QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m)
|
| 475 |
+
{
|
| 476 |
+
int k;
|
| 477 |
+
double t[3];
|
| 478 |
+
for (k = 0; k < 3; k++) {
|
| 479 |
+
t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) +
|
| 480 |
+
curand_MRGmodMul(i[k][1], v[1], m) +
|
| 481 |
+
curand_MRGmodMul(i[k][2], v[2], m) );
|
| 482 |
+
t[k] = curand_MRGmod( t[k], m );
|
| 483 |
+
}
|
| 484 |
+
for (k = 0; k < 3; k++) {
|
| 485 |
+
v[k] = (unsigned int)t[k];
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
/* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */
|
| 491 |
+
/* input is index zero of an array of 3 by 3 matrices m, */
|
| 492 |
+
/* each m = m[0]**(2**index) */
|
| 493 |
+
|
| 494 |
+
QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow )
|
| 495 |
+
{
|
| 496 |
+
int i,j;
|
| 497 |
+
for ( i = 0; i < 3; i++ ) {
|
| 498 |
+
for ( j = 0; j < 3; j++ ) {
|
| 499 |
+
o[i][j] = 0;
|
| 500 |
+
if ( i == j ) o[i][j] = 1;
|
| 501 |
+
}
|
| 502 |
+
}
|
| 503 |
+
i = 0;
|
| 504 |
+
curand_MRGmatVecMul3x3(o,o[0],m);
|
| 505 |
+
while (pow) {
|
| 506 |
+
if ( pow & 1ll ) {
|
| 507 |
+
curand_MRGmatMul3x3(in[i], o, o, m);
|
| 508 |
+
}
|
| 509 |
+
i++;
|
| 510 |
+
pow >>= 1;
|
| 511 |
+
}
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
/* raise a 3 by 3 matrix of doubles to the power */
|
| 515 |
+
/* 2 to the power (pow modulo 191), modulo m */
|
| 516 |
+
|
| 517 |
+
QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow )
|
| 518 |
+
{
|
| 519 |
+
unsigned int temp[3][3];
|
| 520 |
+
int i,j;
|
| 521 |
+
pow = pow % 191;
|
| 522 |
+
for ( i = 0; i < 3; i++ ) {
|
| 523 |
+
for ( j = 0; j < 3; j++ ) {
|
| 524 |
+
temp[i][j] = (unsigned int)in[i][j];
|
| 525 |
+
}
|
| 526 |
+
}
|
| 527 |
+
while (pow) {
|
| 528 |
+
curand_MRGmatMul3x3(temp, temp, temp, m);
|
| 529 |
+
pow--;
|
| 530 |
+
}
|
| 531 |
+
for ( i = 0; i < 3; i++ ) {
|
| 532 |
+
for ( j = 0; j < 3; j++ ) {
|
| 533 |
+
o[i][j] = temp[i][j];
|
| 534 |
+
}
|
| 535 |
+
}
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
/** \endcond */
|
| 539 |
+
|
| 540 |
+
/****************************************************************************/
|
| 541 |
+
/* Kernel implementations of RNGs */
|
| 542 |
+
/****************************************************************************/
|
| 543 |
+
|
| 544 |
+
/* Test RNG */
|
| 545 |
+
|
| 546 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 547 |
+
unsigned long long subsequence,
|
| 548 |
+
unsigned long long offset,
|
| 549 |
+
curandStateTest_t *state)
|
| 550 |
+
{
|
| 551 |
+
state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \
|
| 552 |
+
(unsigned int)offset;
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
QUALIFIERS unsigned int curand(curandStateTest_t *state)
|
| 557 |
+
{
|
| 558 |
+
unsigned int r = state->v++;
|
| 559 |
+
return r;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state)
|
| 563 |
+
{
|
| 564 |
+
state->v += (unsigned int)n;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
/* XORWOW RNG */
|
| 568 |
+
|
| 569 |
+
template <typename T, int n>
|
| 570 |
+
QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[])
|
| 571 |
+
{
|
| 572 |
+
T state;
|
| 573 |
+
// Generate matrix that advances one step
|
| 574 |
+
// matrix has n * n * 32 32-bit elements
|
| 575 |
+
// solve for matrix by stepping single bit states
|
| 576 |
+
for(int i = 0; i < 32 * n; i++) {
|
| 577 |
+
state.d = 0;
|
| 578 |
+
for(int j = 0; j < n; j++) {
|
| 579 |
+
state.v[j] = 0;
|
| 580 |
+
}
|
| 581 |
+
state.v[i / 32] = (1 << (i & 31));
|
| 582 |
+
curand(&state);
|
| 583 |
+
for(int j = 0; j < n; j++) {
|
| 584 |
+
matrix[i * n + j] = state.v[j];
|
| 585 |
+
}
|
| 586 |
+
}
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
template <typename T, int n>
|
| 590 |
+
QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch)
|
| 591 |
+
{
|
| 592 |
+
// unsigned int matrix[n * n * 32];
|
| 593 |
+
unsigned int *matrix = scratch;
|
| 594 |
+
// unsigned int matrixA[n * n * 32];
|
| 595 |
+
unsigned int *matrixA = scratch + (n * n * 32);
|
| 596 |
+
// unsigned int vector[n];
|
| 597 |
+
unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
|
| 598 |
+
// unsigned int result[n];
|
| 599 |
+
unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
|
| 600 |
+
unsigned long long p = x;
|
| 601 |
+
for(int i = 0; i < n; i++) {
|
| 602 |
+
vector[i] = state->v[i];
|
| 603 |
+
}
|
| 604 |
+
int matrix_num = 0;
|
| 605 |
+
while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) {
|
| 606 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 607 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 608 |
+
__curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n);
|
| 609 |
+
,
|
| 610 |
+
__curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n);
|
| 611 |
+
)
|
| 612 |
+
__curand_veccopy(vector, result, n);
|
| 613 |
+
}
|
| 614 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 615 |
+
matrix_num++;
|
| 616 |
+
}
|
| 617 |
+
if(p) {
|
| 618 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 619 |
+
__curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 620 |
+
__curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 621 |
+
,
|
| 622 |
+
__curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 623 |
+
__curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 624 |
+
)
|
| 625 |
+
}
|
| 626 |
+
while(p) {
|
| 627 |
+
for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
|
| 628 |
+
__curand_matvec(vector, matrixA, result, n);
|
| 629 |
+
__curand_veccopy(vector, result, n);
|
| 630 |
+
}
|
| 631 |
+
p >>= SKIPAHEAD_BLOCKSIZE;
|
| 632 |
+
if(p) {
|
| 633 |
+
for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
|
| 634 |
+
__curand_matmat(matrix, matrixA, n);
|
| 635 |
+
__curand_matcopy(matrixA, matrix, n);
|
| 636 |
+
}
|
| 637 |
+
}
|
| 638 |
+
}
|
| 639 |
+
for(int i = 0; i < n; i++) {
|
| 640 |
+
state->v[i] = vector[i];
|
| 641 |
+
}
|
| 642 |
+
state->d += 362437 * (unsigned int)x;
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
template <typename T, int n>
|
| 646 |
+
QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch)
|
| 647 |
+
{
|
| 648 |
+
// unsigned int matrix[n * n * 32];
|
| 649 |
+
unsigned int *matrix = scratch;
|
| 650 |
+
// unsigned int matrixA[n * n * 32];
|
| 651 |
+
unsigned int *matrixA = scratch + (n * n * 32);
|
| 652 |
+
// unsigned int vector[n];
|
| 653 |
+
unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
|
| 654 |
+
// unsigned int result[n];
|
| 655 |
+
unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
|
| 656 |
+
unsigned long long p = x;
|
| 657 |
+
for(int i = 0; i < n; i++) {
|
| 658 |
+
vector[i] = state->v[i];
|
| 659 |
+
}
|
| 660 |
+
int matrix_num = 0;
|
| 661 |
+
while(p && matrix_num < PRECALC_NUM_MATRICES - 1) {
|
| 662 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 663 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 664 |
+
__curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n);
|
| 665 |
+
,
|
| 666 |
+
__curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n);
|
| 667 |
+
)
|
| 668 |
+
__curand_veccopy(vector, result, n);
|
| 669 |
+
}
|
| 670 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 671 |
+
matrix_num++;
|
| 672 |
+
}
|
| 673 |
+
if(p) {
|
| 674 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 675 |
+
__curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 676 |
+
__curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 677 |
+
,
|
| 678 |
+
__curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 679 |
+
__curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 680 |
+
)
|
| 681 |
+
}
|
| 682 |
+
while(p) {
|
| 683 |
+
for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
|
| 684 |
+
__curand_matvec(vector, matrixA, result, n);
|
| 685 |
+
__curand_veccopy(vector, result, n);
|
| 686 |
+
}
|
| 687 |
+
p >>= SKIPAHEAD_BLOCKSIZE;
|
| 688 |
+
if(p) {
|
| 689 |
+
for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
|
| 690 |
+
__curand_matmat(matrix, matrixA, n);
|
| 691 |
+
__curand_matcopy(matrixA, matrix, n);
|
| 692 |
+
}
|
| 693 |
+
}
|
| 694 |
+
}
|
| 695 |
+
for(int i = 0; i < n; i++) {
|
| 696 |
+
state->v[i] = vector[i];
|
| 697 |
+
}
|
| 698 |
+
/* No update of state->d needed, guaranteed to be a multiple of 2^32 */
|
| 699 |
+
}
|
| 700 |
+
|
| 701 |
+
template <typename T, int N>
|
| 702 |
+
QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state)
|
| 703 |
+
{
|
| 704 |
+
unsigned long long p = x;
|
| 705 |
+
int matrix_num = 0;
|
| 706 |
+
while(p) {
|
| 707 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 708 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 709 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix[matrix_num]);
|
| 710 |
+
,
|
| 711 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix_host[matrix_num]);
|
| 712 |
+
)
|
| 713 |
+
}
|
| 714 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 715 |
+
matrix_num++;
|
| 716 |
+
}
|
| 717 |
+
state->d += 362437 * (unsigned int)x;
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
template <typename T, int N>
|
| 721 |
+
QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state)
|
| 722 |
+
{
|
| 723 |
+
int matrix_num = 0;
|
| 724 |
+
while(x) {
|
| 725 |
+
for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) {
|
| 726 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 727 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix[matrix_num]);
|
| 728 |
+
,
|
| 729 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix_host[matrix_num]);
|
| 730 |
+
)
|
| 731 |
+
}
|
| 732 |
+
x >>= PRECALC_BLOCK_SIZE;
|
| 733 |
+
matrix_num++;
|
| 734 |
+
}
|
| 735 |
+
/* No update of state->d needed, guaranteed to be a multiple of 2^32 */
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
/**
|
| 739 |
+
* \brief Update XORWOW state to skip \p n elements.
|
| 740 |
+
*
|
| 741 |
+
* Update the XORWOW state in \p state to skip ahead \p n elements.
|
| 742 |
+
*
|
| 743 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 744 |
+
* will take more time to complete.
|
| 745 |
+
*
|
| 746 |
+
* \param n - Number of elements to skip
|
| 747 |
+
* \param state - Pointer to state to update
|
| 748 |
+
*/
|
| 749 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state)
|
| 750 |
+
{
|
| 751 |
+
_skipahead_inplace<curandStateXORWOW_t, 5>(n, state);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
/**
|
| 755 |
+
* \brief Update XORWOW state to skip ahead \p n subsequences.
|
| 756 |
+
*
|
| 757 |
+
* Update the XORWOW state in \p state to skip ahead \p n subsequences. Each
|
| 758 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 759 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
|
| 760 |
+
*
|
| 761 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 762 |
+
* will take more time to complete.
|
| 763 |
+
*
|
| 764 |
+
* \param n - Number of subsequences to skip
|
| 765 |
+
* \param state - Pointer to state to update
|
| 766 |
+
*/
|
| 767 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state)
|
| 768 |
+
{
|
| 769 |
+
_skipahead_sequence_inplace<curandStateXORWOW_t, 5>(n, state);
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
QUALIFIERS void _curand_init_scratch(unsigned long long seed,
|
| 773 |
+
unsigned long long subsequence,
|
| 774 |
+
unsigned long long offset,
|
| 775 |
+
curandStateXORWOW_t *state,
|
| 776 |
+
unsigned int *scratch)
|
| 777 |
+
{
|
| 778 |
+
// Break up seed, apply salt
|
| 779 |
+
// Constants are arbitrary nonzero values
|
| 780 |
+
unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
|
| 781 |
+
unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
|
| 782 |
+
// Simple multiplication to mix up bits
|
| 783 |
+
// Constants are arbitrary odd values
|
| 784 |
+
unsigned int t0 = 1099087573UL * s0;
|
| 785 |
+
unsigned int t1 = 2591861531UL * s1;
|
| 786 |
+
state->d = 6615241 + t1 + t0;
|
| 787 |
+
state->v[0] = 123456789UL + t0;
|
| 788 |
+
state->v[1] = 362436069UL ^ t0;
|
| 789 |
+
state->v[2] = 521288629UL + t1;
|
| 790 |
+
state->v[3] = 88675123UL ^ t1;
|
| 791 |
+
state->v[4] = 5783321UL + t0;
|
| 792 |
+
_skipahead_sequence_scratch<curandStateXORWOW_t, 5>(subsequence, state, scratch);
|
| 793 |
+
_skipahead_scratch<curandStateXORWOW_t, 5>(offset, state, scratch);
|
| 794 |
+
state->boxmuller_flag = 0;
|
| 795 |
+
state->boxmuller_flag_double = 0;
|
| 796 |
+
state->boxmuller_extra = 0.f;
|
| 797 |
+
state->boxmuller_extra_double = 0.;
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
+
QUALIFIERS void _curand_init_inplace(unsigned long long seed,
|
| 801 |
+
unsigned long long subsequence,
|
| 802 |
+
unsigned long long offset,
|
| 803 |
+
curandStateXORWOW_t *state)
|
| 804 |
+
{
|
| 805 |
+
// Break up seed, apply salt
|
| 806 |
+
// Constants are arbitrary nonzero values
|
| 807 |
+
unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
|
| 808 |
+
unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
|
| 809 |
+
// Simple multiplication to mix up bits
|
| 810 |
+
// Constants are arbitrary odd values
|
| 811 |
+
unsigned int t0 = 1099087573UL * s0;
|
| 812 |
+
unsigned int t1 = 2591861531UL * s1;
|
| 813 |
+
state->d = 6615241 + t1 + t0;
|
| 814 |
+
state->v[0] = 123456789UL + t0;
|
| 815 |
+
state->v[1] = 362436069UL ^ t0;
|
| 816 |
+
state->v[2] = 521288629UL + t1;
|
| 817 |
+
state->v[3] = 88675123UL ^ t1;
|
| 818 |
+
state->v[4] = 5783321UL + t0;
|
| 819 |
+
_skipahead_sequence_inplace<curandStateXORWOW_t, 5>(subsequence, state);
|
| 820 |
+
_skipahead_inplace<curandStateXORWOW_t, 5>(offset, state);
|
| 821 |
+
state->boxmuller_flag = 0;
|
| 822 |
+
state->boxmuller_flag_double = 0;
|
| 823 |
+
state->boxmuller_extra = 0.f;
|
| 824 |
+
state->boxmuller_extra_double = 0.;
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
/**
|
| 828 |
+
* \brief Initialize XORWOW state.
|
| 829 |
+
*
|
| 830 |
+
* Initialize XORWOW state in \p state with the given \p seed, \p subsequence,
|
| 831 |
+
* and \p offset.
|
| 832 |
+
*
|
| 833 |
+
* All input values of \p seed, \p subsequence, and \p offset are legal. Large
|
| 834 |
+
* values for \p subsequence and \p offset require more computation and so will
|
| 835 |
+
* take more time to complete.
|
| 836 |
+
*
|
| 837 |
+
* A value of 0 for \p seed sets the state to the values of the original
|
| 838 |
+
* published version of the \p xorwow algorithm.
|
| 839 |
+
*
|
| 840 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 841 |
+
* \param subsequence - Subsequence to start at
|
| 842 |
+
* \param offset - Absolute offset into sequence
|
| 843 |
+
* \param state - Pointer to state to initialize
|
| 844 |
+
*/
|
| 845 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 846 |
+
unsigned long long subsequence,
|
| 847 |
+
unsigned long long offset,
|
| 848 |
+
curandStateXORWOW_t *state)
|
| 849 |
+
{
|
| 850 |
+
_curand_init_inplace(seed, subsequence, offset, state);
|
| 851 |
+
}
|
| 852 |
+
|
| 853 |
+
/**
|
| 854 |
+
* \brief Return 32-bits of pseudorandomness from an XORWOW generator.
|
| 855 |
+
*
|
| 856 |
+
* Return 32-bits of pseudorandomness from the XORWOW generator in \p state,
|
| 857 |
+
* increment position of generator by one.
|
| 858 |
+
*
|
| 859 |
+
* \param state - Pointer to state to update
|
| 860 |
+
*
|
| 861 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 862 |
+
*/
|
| 863 |
+
QUALIFIERS unsigned int curand(curandStateXORWOW_t *state)
|
| 864 |
+
{
|
| 865 |
+
unsigned int t;
|
| 866 |
+
t = (state->v[0] ^ (state->v[0] >> 2));
|
| 867 |
+
state->v[0] = state->v[1];
|
| 868 |
+
state->v[1] = state->v[2];
|
| 869 |
+
state->v[2] = state->v[3];
|
| 870 |
+
state->v[3] = state->v[4];
|
| 871 |
+
state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1));
|
| 872 |
+
state->d += 362437;
|
| 873 |
+
return state->v[4] + state->d;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
/**
|
| 878 |
+
* \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator.
|
| 879 |
+
*
|
| 880 |
+
* Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state,
|
| 881 |
+
* increment position of generator by one.
|
| 882 |
+
*
|
| 883 |
+
* \param state - Pointer to state to update
|
| 884 |
+
*
|
| 885 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 886 |
+
*/
|
| 887 |
+
|
| 888 |
+
QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
|
| 889 |
+
{
|
| 890 |
+
// Maintain the invariant: output[STATE] is always "good" and
|
| 891 |
+
// is the next value to be returned by curand.
|
| 892 |
+
unsigned int ret;
|
| 893 |
+
switch(state->STATE++){
|
| 894 |
+
default:
|
| 895 |
+
ret = state->output.x;
|
| 896 |
+
break;
|
| 897 |
+
case 1:
|
| 898 |
+
ret = state->output.y;
|
| 899 |
+
break;
|
| 900 |
+
case 2:
|
| 901 |
+
ret = state->output.z;
|
| 902 |
+
break;
|
| 903 |
+
case 3:
|
| 904 |
+
ret = state->output.w;
|
| 905 |
+
break;
|
| 906 |
+
}
|
| 907 |
+
if(state->STATE == 4){
|
| 908 |
+
Philox_State_Incr(state);
|
| 909 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 910 |
+
state->STATE = 0;
|
| 911 |
+
}
|
| 912 |
+
return ret;
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
/**
|
| 916 |
+
* \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator.
|
| 917 |
+
*
|
| 918 |
+
* Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state,
|
| 919 |
+
* increment position of generator by four.
|
| 920 |
+
*
|
| 921 |
+
* \param state - Pointer to state to update
|
| 922 |
+
*
|
| 923 |
+
* \return 128-bits of pseudorandomness as a uint4, all bits valid to use.
|
| 924 |
+
*/
|
| 925 |
+
|
| 926 |
+
QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state)
|
| 927 |
+
{
|
| 928 |
+
uint4 r;
|
| 929 |
+
|
| 930 |
+
uint4 tmp = state->output;
|
| 931 |
+
Philox_State_Incr(state);
|
| 932 |
+
state->output= curand_Philox4x32_10(state->ctr,state->key);
|
| 933 |
+
switch(state->STATE){
|
| 934 |
+
case 0:
|
| 935 |
+
return tmp;
|
| 936 |
+
case 1:
|
| 937 |
+
r.x = tmp.y;
|
| 938 |
+
r.y = tmp.z;
|
| 939 |
+
r.z = tmp.w;
|
| 940 |
+
r.w = state->output.x;
|
| 941 |
+
break;
|
| 942 |
+
case 2:
|
| 943 |
+
r.x = tmp.z;
|
| 944 |
+
r.y = tmp.w;
|
| 945 |
+
r.z = state->output.x;
|
| 946 |
+
r.w = state->output.y;
|
| 947 |
+
break;
|
| 948 |
+
case 3:
|
| 949 |
+
r.x = tmp.w;
|
| 950 |
+
r.y = state->output.x;
|
| 951 |
+
r.z = state->output.y;
|
| 952 |
+
r.w = state->output.z;
|
| 953 |
+
break;
|
| 954 |
+
default:
|
| 955 |
+
// NOT possible but needed to avoid compiler warnings
|
| 956 |
+
return tmp;
|
| 957 |
+
}
|
| 958 |
+
return r;
|
| 959 |
+
}
|
| 960 |
+
|
| 961 |
+
/**
|
| 962 |
+
* \brief Update Philox4_32_10 state to skip \p n elements.
|
| 963 |
+
*
|
| 964 |
+
* Update the Philox4_32_10 state in \p state to skip ahead \p n elements.
|
| 965 |
+
*
|
| 966 |
+
* All values of \p n are valid.
|
| 967 |
+
*
|
| 968 |
+
* \param n - Number of elements to skip
|
| 969 |
+
* \param state - Pointer to state to update
|
| 970 |
+
*/
|
| 971 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state)
|
| 972 |
+
{
|
| 973 |
+
state->STATE += (n & 3);
|
| 974 |
+
n /= 4;
|
| 975 |
+
if( state->STATE > 3 ){
|
| 976 |
+
n += 1;
|
| 977 |
+
state->STATE -= 4;
|
| 978 |
+
}
|
| 979 |
+
Philox_State_Incr(state, n);
|
| 980 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 981 |
+
}
|
| 982 |
+
|
| 983 |
+
/**
|
| 984 |
+
* \brief Update Philox4_32_10 state to skip ahead \p n subsequences.
|
| 985 |
+
*
|
| 986 |
+
* Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each
|
| 987 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 988 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly * n elements.
|
| 989 |
+
*
|
| 990 |
+
* All values of \p n are valid.
|
| 991 |
+
*
|
| 992 |
+
* \param n - Number of subsequences to skip
|
| 993 |
+
* \param state - Pointer to state to update
|
| 994 |
+
*/
|
| 995 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state)
|
| 996 |
+
{
|
| 997 |
+
Philox_State_Incr_hi(state, n);
|
| 998 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 999 |
+
}
|
| 1000 |
+
|
| 1001 |
+
/**
|
| 1002 |
+
* \brief Initialize Philox4_32_10 state.
|
| 1003 |
+
*
|
| 1004 |
+
* Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence,
|
| 1005 |
+
* and \p offset.
|
| 1006 |
+
*
|
| 1007 |
+
* All input values for \p seed, \p subseqence and \p offset are legal. Each of the
|
| 1008 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>64</sup></ph>\endxmlonly possible
|
| 1009 |
+
* values of seed selects an independent sequence of length
|
| 1010 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>130</sup></ph>\endxmlonly.
|
| 1011 |
+
* The first
|
| 1012 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup> * subsequence + offset</ph>\endxmlonly.
|
| 1013 |
+
* values of the sequence are skipped.
|
| 1014 |
+
* I.e., subsequences are of length
|
| 1015 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly.
|
| 1016 |
+
*
|
| 1017 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 1018 |
+
* \param subsequence - Subsequence to start at
|
| 1019 |
+
* \param offset - Absolute offset into subsequence
|
| 1020 |
+
* \param state - Pointer to state to initialize
|
| 1021 |
+
*/
|
| 1022 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 1023 |
+
unsigned long long subsequence,
|
| 1024 |
+
unsigned long long offset,
|
| 1025 |
+
curandStatePhilox4_32_10_t *state)
|
| 1026 |
+
{
|
| 1027 |
+
state->ctr = make_uint4(0, 0, 0, 0);
|
| 1028 |
+
state->key.x = (unsigned int)seed;
|
| 1029 |
+
state->key.y = (unsigned int)(seed>>32);
|
| 1030 |
+
state->STATE = 0;
|
| 1031 |
+
state->boxmuller_flag = 0;
|
| 1032 |
+
state->boxmuller_flag_double = 0;
|
| 1033 |
+
state->boxmuller_extra = 0.f;
|
| 1034 |
+
state->boxmuller_extra_double = 0.;
|
| 1035 |
+
skipahead_sequence(subsequence, state);
|
| 1036 |
+
skipahead(offset, state);
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
|
| 1040 |
+
/* MRG32k3a RNG */
|
| 1041 |
+
|
| 1042 |
+
/* Base generator for MRG32k3a */
|
| 1043 |
+
QUALIFIERS unsigned long long __curand_umad(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b, GCC_UNUSED_PARAMETER unsigned long long c)
|
| 1044 |
+
{
|
| 1045 |
+
unsigned long long r = 0;
|
| 1046 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1047 |
+
asm("mad.wide.u32 %0, %1, %2, %3;"
|
| 1048 |
+
: "=l"(r) : "r"(a), "r"(b), "l"(c));
|
| 1049 |
+
)
|
| 1050 |
+
return r;
|
| 1051 |
+
}
|
| 1052 |
+
QUALIFIERS unsigned long long __curand_umul(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b)
|
| 1053 |
+
{
|
| 1054 |
+
unsigned long long r = 0;
|
| 1055 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1056 |
+
asm("mul.wide.u32 %0, %1, %2;"
|
| 1057 |
+
: "=l"(r) : "r"(a), "r"(b));
|
| 1058 |
+
)
|
| 1059 |
+
return r;
|
| 1060 |
+
}
|
| 1061 |
+
QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state)
|
| 1062 |
+
{
|
| 1063 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1064 |
+
const unsigned int m1 = 4294967087u;
|
| 1065 |
+
const unsigned int m2 = 4294944443u;
|
| 1066 |
+
const unsigned int m1c = 209u;
|
| 1067 |
+
const unsigned int m2c = 22853u;
|
| 1068 |
+
const unsigned int a12 = 1403580u;
|
| 1069 |
+
const unsigned int a13n = 810728u;
|
| 1070 |
+
const unsigned int a21 = 527612u;
|
| 1071 |
+
const unsigned int a23n = 1370589u;
|
| 1072 |
+
|
| 1073 |
+
unsigned long long p1;
|
| 1074 |
+
unsigned long long p2;
|
| 1075 |
+
const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]);
|
| 1076 |
+
p1 = __curand_umad(a12, state->s1[1], p3);
|
| 1077 |
+
|
| 1078 |
+
// Putting addition inside and changing umul to umad
|
| 1079 |
+
// slowed this function down on GV100
|
| 1080 |
+
p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff);
|
| 1081 |
+
if (p1 >= m1) p1 -= m1;
|
| 1082 |
+
|
| 1083 |
+
state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1;
|
| 1084 |
+
const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]);
|
| 1085 |
+
p2 = __curand_umad(a21, state->s2[2], p4);
|
| 1086 |
+
|
| 1087 |
+
// Putting addition inside and changing umul to umad
|
| 1088 |
+
// slowed this function down on GV100
|
| 1089 |
+
p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
|
| 1090 |
+
p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
|
| 1091 |
+
if (p2 >= m2) p2 -= m2;
|
| 1092 |
+
|
| 1093 |
+
state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2;
|
| 1094 |
+
|
| 1095 |
+
const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2;
|
| 1096 |
+
if(p1 <= p2) return p5 + m1;
|
| 1097 |
+
return p5;
|
| 1098 |
+
)
|
| 1099 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 1100 |
+
/* nj's implementation */
|
| 1101 |
+
const double m1 = 4294967087.;
|
| 1102 |
+
const double m2 = 4294944443.;
|
| 1103 |
+
const double a12 = 1403580.;
|
| 1104 |
+
const double a13n = 810728.;
|
| 1105 |
+
const double a21 = 527612.;
|
| 1106 |
+
const double a23n = 1370589.;
|
| 1107 |
+
|
| 1108 |
+
const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */
|
| 1109 |
+
const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */
|
| 1110 |
+
const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */
|
| 1111 |
+
const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */
|
| 1112 |
+
|
| 1113 |
+
double q;
|
| 1114 |
+
double p1;
|
| 1115 |
+
double p2;
|
| 1116 |
+
p1 = a12 * state->s1[1] - a13n * state->s1[0];
|
| 1117 |
+
q = trunc (fma (p1, rh1, p1 * rl1));
|
| 1118 |
+
p1 -= q * m1;
|
| 1119 |
+
if (p1 < 0.0) p1 += m1;
|
| 1120 |
+
state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1;
|
| 1121 |
+
p2 = a21 * state->s2[2] - a23n * state->s2[0];
|
| 1122 |
+
q = trunc (fma (p2, rh2, p2 * rl2));
|
| 1123 |
+
p2 -= q * m2;
|
| 1124 |
+
if (p2 < 0.0) p2 += m2;
|
| 1125 |
+
state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2;
|
| 1126 |
+
if (p1 <= p2) return (p1 - p2 + m1);
|
| 1127 |
+
else return (p1 - p2);
|
| 1128 |
+
)
|
| 1129 |
+
/* end nj's implementation */
|
| 1130 |
+
double p1;
|
| 1131 |
+
double p2;
|
| 1132 |
+
double r;
|
| 1133 |
+
p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]);
|
| 1134 |
+
p1 = curand_MRGmod(p1, MRG32K3A_MOD1);
|
| 1135 |
+
if (p1 < 0.0) p1 += MRG32K3A_MOD1;
|
| 1136 |
+
state->s1[0] = state->s1[1];
|
| 1137 |
+
state->s1[1] = state->s1[2];
|
| 1138 |
+
state->s1[2] = (unsigned int)p1;
|
| 1139 |
+
p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]);
|
| 1140 |
+
p2 = curand_MRGmod(p2, MRG32K3A_MOD2);
|
| 1141 |
+
if (p2 < 0) p2 += MRG32K3A_MOD2;
|
| 1142 |
+
state->s2[0] = state->s2[1];
|
| 1143 |
+
state->s2[1] = state->s2[2];
|
| 1144 |
+
state->s2[2] = (unsigned int)p2;
|
| 1145 |
+
r = p1 - p2;
|
| 1146 |
+
if (r <= 0) r += MRG32K3A_MOD1;
|
| 1147 |
+
return r;
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
|
| 1151 |
+
/**
|
| 1152 |
+
* \brief Return 32-bits of pseudorandomness from an MRG32k3a generator.
|
| 1153 |
+
*
|
| 1154 |
+
* Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state,
|
| 1155 |
+
* increment position of generator by one.
|
| 1156 |
+
*
|
| 1157 |
+
* \param state - Pointer to state to update
|
| 1158 |
+
*
|
| 1159 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 1160 |
+
*/
|
| 1161 |
+
QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state)
|
| 1162 |
+
{
|
| 1163 |
+
double dRet;
|
| 1164 |
+
dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM;
|
| 1165 |
+
return (unsigned int)dRet;
|
| 1166 |
+
}
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
|
| 1170 |
+
/**
|
| 1171 |
+
* \brief Update MRG32k3a state to skip \p n elements.
|
| 1172 |
+
*
|
| 1173 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n elements.
|
| 1174 |
+
*
|
| 1175 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 1176 |
+
* will take more time to complete.
|
| 1177 |
+
*
|
| 1178 |
+
* \param n - Number of elements to skip
|
| 1179 |
+
* \param state - Pointer to state to update
|
| 1180 |
+
*/
|
| 1181 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1182 |
+
{
|
| 1183 |
+
unsigned int t[3][3];
|
| 1184 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1185 |
+
curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n);
|
| 1186 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1187 |
+
curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n);
|
| 1188 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1189 |
+
,
|
| 1190 |
+
curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n);
|
| 1191 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1192 |
+
curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n);
|
| 1193 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1194 |
+
)
|
| 1195 |
+
}
|
| 1196 |
+
|
| 1197 |
+
/**
|
| 1198 |
+
* \brief Update MRG32k3a state to skip ahead \p n subsequences.
|
| 1199 |
+
*
|
| 1200 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each
|
| 1201 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly
|
| 1202 |
+
*
|
| 1203 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>76</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 1204 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
|
| 1205 |
+
*
|
| 1206 |
+
* Valid values of \p n are 0 to \xmlonly<ph outputclass="xmlonly">2<sup>51</sup></ph>\endxmlonly. Note \p n will be masked to 51 bits
|
| 1207 |
+
*
|
| 1208 |
+
* \param n - Number of subsequences to skip
|
| 1209 |
+
* \param state - Pointer to state to update
|
| 1210 |
+
*/
|
| 1211 |
+
QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1212 |
+
{
|
| 1213 |
+
unsigned int t[3][3];
|
| 1214 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1215 |
+
curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n);
|
| 1216 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1217 |
+
curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n);
|
| 1218 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1219 |
+
,
|
| 1220 |
+
curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n);
|
| 1221 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1222 |
+
curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n);
|
| 1223 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1224 |
+
)
|
| 1225 |
+
}
|
| 1226 |
+
|
| 1227 |
+
/**
|
| 1228 |
+
* \brief Update MRG32k3a state to skip ahead \p n sequences.
|
| 1229 |
+
*
|
| 1230 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each
|
| 1231 |
+
* sequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 1232 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly * n elements.
|
| 1233 |
+
*
|
| 1234 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 1235 |
+
* will take more time to complete.
|
| 1236 |
+
*
|
| 1237 |
+
* \param n - Number of sequences to skip
|
| 1238 |
+
* \param state - Pointer to state to update
|
| 1239 |
+
*/
|
| 1240 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1241 |
+
{
|
| 1242 |
+
unsigned int t[3][3];
|
| 1243 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1244 |
+
curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n);
|
| 1245 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1246 |
+
curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n);
|
| 1247 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1248 |
+
,
|
| 1249 |
+
curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n);
|
| 1250 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1251 |
+
curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n);
|
| 1252 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1253 |
+
)
|
| 1254 |
+
}
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
/**
|
| 1258 |
+
* \brief Initialize MRG32k3a state.
|
| 1259 |
+
*
|
| 1260 |
+
* Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence,
|
| 1261 |
+
* and \p offset.
|
| 1262 |
+
*
|
| 1263 |
+
* All input values of \p seed, \p subsequence, and \p offset are legal.
|
| 1264 |
+
* \p subsequence will be truncated to 51 bits to avoid running into the next sequence
|
| 1265 |
+
*
|
| 1266 |
+
* A value of 0 for \p seed sets the state to the values of the original
|
| 1267 |
+
* published version of the \p MRG32k3a algorithm.
|
| 1268 |
+
*
|
| 1269 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 1270 |
+
* \param subsequence - Subsequence to start at
|
| 1271 |
+
* \param offset - Absolute offset into sequence
|
| 1272 |
+
* \param state - Pointer to state to initialize
|
| 1273 |
+
*/
|
| 1274 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 1275 |
+
unsigned long long subsequence,
|
| 1276 |
+
unsigned long long offset,
|
| 1277 |
+
curandStateMRG32k3a_t *state)
|
| 1278 |
+
{
|
| 1279 |
+
int i;
|
| 1280 |
+
for ( i=0; i<3; i++ ) {
|
| 1281 |
+
state->s1[i] = 12345u;
|
| 1282 |
+
state->s2[i] = 12345u;
|
| 1283 |
+
}
|
| 1284 |
+
if (seed != 0ull) {
|
| 1285 |
+
unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL;
|
| 1286 |
+
unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL);
|
| 1287 |
+
state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1);
|
| 1288 |
+
state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1);
|
| 1289 |
+
state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1);
|
| 1290 |
+
state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2);
|
| 1291 |
+
state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2);
|
| 1292 |
+
state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2);
|
| 1293 |
+
}
|
| 1294 |
+
skipahead_subsequence( subsequence, state );
|
| 1295 |
+
skipahead( offset, state );
|
| 1296 |
+
state->boxmuller_flag = 0;
|
| 1297 |
+
state->boxmuller_flag_double = 0;
|
| 1298 |
+
state->boxmuller_extra = 0.f;
|
| 1299 |
+
state->boxmuller_extra_double = 0.;
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
/**
|
| 1303 |
+
* \brief Update Sobol32 state to skip \p n elements.
|
| 1304 |
+
*
|
| 1305 |
+
* Update the Sobol32 state in \p state to skip ahead \p n elements.
|
| 1306 |
+
*
|
| 1307 |
+
* All values of \p n are valid.
|
| 1308 |
+
*
|
| 1309 |
+
* \param n - Number of elements to skip
|
| 1310 |
+
* \param state - Pointer to state to update
|
| 1311 |
+
*/
|
| 1312 |
+
template <typename T>
|
| 1313 |
+
QUALIFIERS
|
| 1314 |
+
typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol32_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol32_t*, T>::value>::type
|
| 1315 |
+
skipahead(unsigned int n, T state)
|
| 1316 |
+
{
|
| 1317 |
+
unsigned int i_gray;
|
| 1318 |
+
state->x = state->c;
|
| 1319 |
+
state->i += n;
|
| 1320 |
+
/* Convert state->i to gray code */
|
| 1321 |
+
i_gray = state->i ^ (state->i >> 1);
|
| 1322 |
+
for(unsigned int k = 0; k < 32; k++) {
|
| 1323 |
+
if(i_gray & (1 << k)) {
|
| 1324 |
+
state->x ^= state->direction_vectors[k];
|
| 1325 |
+
}
|
| 1326 |
+
}
|
| 1327 |
+
return;
|
| 1328 |
+
}
|
| 1329 |
+
|
| 1330 |
+
/**
|
| 1331 |
+
* \brief Update Sobol64 state to skip \p n elements.
|
| 1332 |
+
*
|
| 1333 |
+
* Update the Sobol64 state in \p state to skip ahead \p n elements.
|
| 1334 |
+
*
|
| 1335 |
+
* All values of \p n are valid.
|
| 1336 |
+
*
|
| 1337 |
+
* \param n - Number of elements to skip
|
| 1338 |
+
* \param state - Pointer to state to update
|
| 1339 |
+
*/
|
| 1340 |
+
template <typename T>
|
| 1341 |
+
QUALIFIERS
|
| 1342 |
+
typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol64_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol64_t*, T>::value>::type
|
| 1343 |
+
skipahead(unsigned long long n, T state)
|
| 1344 |
+
{
|
| 1345 |
+
unsigned long long i_gray;
|
| 1346 |
+
state->x = state->c;
|
| 1347 |
+
state->i += n;
|
| 1348 |
+
/* Convert state->i to gray code */
|
| 1349 |
+
i_gray = state->i ^ (state->i >> 1);
|
| 1350 |
+
for(unsigned k = 0; k < 64; k++) {
|
| 1351 |
+
if(i_gray & (1ULL << k)) {
|
| 1352 |
+
state->x ^= state->direction_vectors[k];
|
| 1353 |
+
}
|
| 1354 |
+
}
|
| 1355 |
+
return;
|
| 1356 |
+
}
|
| 1357 |
+
|
| 1358 |
+
/**
|
| 1359 |
+
* \brief Initialize Sobol32 state.
|
| 1360 |
+
*
|
| 1361 |
+
* Initialize Sobol32 state in \p state with the given \p direction \p vectors and
|
| 1362 |
+
* \p offset.
|
| 1363 |
+
*
|
| 1364 |
+
* The direction vector is a device pointer to an array of 32 unsigned ints.
|
| 1365 |
+
* All input values of \p offset are legal.
|
| 1366 |
+
*
|
| 1367 |
+
* \param direction_vectors - Pointer to array of 32 unsigned ints representing the
|
| 1368 |
+
* direction vectors for the desired dimension
|
| 1369 |
+
* \param offset - Absolute offset into sequence
|
| 1370 |
+
* \param state - Pointer to state to initialize
|
| 1371 |
+
*/
|
| 1372 |
+
QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
|
| 1373 |
+
unsigned int offset,
|
| 1374 |
+
curandStateSobol32_t *state)
|
| 1375 |
+
{
|
| 1376 |
+
state->i = 0;
|
| 1377 |
+
state->c = 0;
|
| 1378 |
+
for(int i = 0; i < 32; i++) {
|
| 1379 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1380 |
+
}
|
| 1381 |
+
state->x = 0;
|
| 1382 |
+
skipahead<curandStateSobol32_t *>(offset, state);
|
| 1383 |
+
}
|
| 1384 |
+
/**
|
| 1385 |
+
* \brief Initialize Scrambled Sobol32 state.
|
| 1386 |
+
*
|
| 1387 |
+
* Initialize Sobol32 state in \p state with the given \p direction \p vectors and
|
| 1388 |
+
* \p offset.
|
| 1389 |
+
*
|
| 1390 |
+
* The direction vector is a device pointer to an array of 32 unsigned ints.
|
| 1391 |
+
* All input values of \p offset are legal.
|
| 1392 |
+
*
|
| 1393 |
+
* \param direction_vectors - Pointer to array of 32 unsigned ints representing the
|
| 1394 |
+
direction vectors for the desired dimension
|
| 1395 |
+
* \param scramble_c Scramble constant
|
| 1396 |
+
* \param offset - Absolute offset into sequence
|
| 1397 |
+
* \param state - Pointer to state to initialize
|
| 1398 |
+
*/
|
| 1399 |
+
QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
|
| 1400 |
+
unsigned int scramble_c,
|
| 1401 |
+
unsigned int offset,
|
| 1402 |
+
curandStateScrambledSobol32_t *state)
|
| 1403 |
+
{
|
| 1404 |
+
state->i = 0;
|
| 1405 |
+
state->c = scramble_c;
|
| 1406 |
+
for(int i = 0; i < 32; i++) {
|
| 1407 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1408 |
+
}
|
| 1409 |
+
state->x = state->c;
|
| 1410 |
+
skipahead<curandStateScrambledSobol32_t *>(offset, state);
|
| 1411 |
+
}
|
| 1412 |
+
|
| 1413 |
+
QUALIFIERS int __curand_find_trailing_zero(unsigned int x)
|
| 1414 |
+
{
|
| 1415 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1416 |
+
int y = __ffs(~x);
|
| 1417 |
+
if(y)
|
| 1418 |
+
return y - 1;
|
| 1419 |
+
return 31;
|
| 1420 |
+
,
|
| 1421 |
+
int i = 1;
|
| 1422 |
+
while(x & 1) {
|
| 1423 |
+
i++;
|
| 1424 |
+
x >>= 1;
|
| 1425 |
+
}
|
| 1426 |
+
i = i - 1;
|
| 1427 |
+
return i == 32 ? 31 : i;
|
| 1428 |
+
)
|
| 1429 |
+
}
|
| 1430 |
+
|
| 1431 |
+
QUALIFIERS int __curand_find_trailing_zero(unsigned long long x)
|
| 1432 |
+
{
|
| 1433 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1434 |
+
int y = __ffsll(~x);
|
| 1435 |
+
if(y)
|
| 1436 |
+
return y - 1;
|
| 1437 |
+
return 63;
|
| 1438 |
+
,
|
| 1439 |
+
int i = 1;
|
| 1440 |
+
while(x & 1) {
|
| 1441 |
+
i++;
|
| 1442 |
+
x >>= 1;
|
| 1443 |
+
}
|
| 1444 |
+
i = i - 1;
|
| 1445 |
+
return i == 64 ? 63 : i;
|
| 1446 |
+
)
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
/**
|
| 1450 |
+
* \brief Initialize Sobol64 state.
|
| 1451 |
+
*
|
| 1452 |
+
* Initialize Sobol64 state in \p state with the given \p direction \p vectors and
|
| 1453 |
+
* \p offset.
|
| 1454 |
+
*
|
| 1455 |
+
* The direction vector is a device pointer to an array of 64 unsigned long longs.
|
| 1456 |
+
* All input values of \p offset are legal.
|
| 1457 |
+
*
|
| 1458 |
+
* \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
|
| 1459 |
+
direction vectors for the desired dimension
|
| 1460 |
+
* \param offset - Absolute offset into sequence
|
| 1461 |
+
* \param state - Pointer to state to initialize
|
| 1462 |
+
*/
|
| 1463 |
+
QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
|
| 1464 |
+
unsigned long long offset,
|
| 1465 |
+
curandStateSobol64_t *state)
|
| 1466 |
+
{
|
| 1467 |
+
state->i = 0;
|
| 1468 |
+
state->c = 0;
|
| 1469 |
+
for(int i = 0; i < 64; i++) {
|
| 1470 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1471 |
+
}
|
| 1472 |
+
state->x = 0;
|
| 1473 |
+
skipahead<curandStateSobol64_t *>(offset, state);
|
| 1474 |
+
}
|
| 1475 |
+
|
| 1476 |
+
/**
|
| 1477 |
+
* \brief Initialize Scrambled Sobol64 state.
|
| 1478 |
+
*
|
| 1479 |
+
* Initialize Sobol64 state in \p state with the given \p direction \p vectors and
|
| 1480 |
+
* \p offset.
|
| 1481 |
+
*
|
| 1482 |
+
* The direction vector is a device pointer to an array of 64 unsigned long longs.
|
| 1483 |
+
* All input values of \p offset are legal.
|
| 1484 |
+
*
|
| 1485 |
+
* \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
|
| 1486 |
+
direction vectors for the desired dimension
|
| 1487 |
+
* \param scramble_c Scramble constant
|
| 1488 |
+
* \param offset - Absolute offset into sequence
|
| 1489 |
+
* \param state - Pointer to state to initialize
|
| 1490 |
+
*/
|
| 1491 |
+
QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
|
| 1492 |
+
unsigned long long scramble_c,
|
| 1493 |
+
unsigned long long offset,
|
| 1494 |
+
curandStateScrambledSobol64_t *state)
|
| 1495 |
+
{
|
| 1496 |
+
state->i = 0;
|
| 1497 |
+
state->c = scramble_c;
|
| 1498 |
+
for(int i = 0; i < 64; i++) {
|
| 1499 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1500 |
+
}
|
| 1501 |
+
state->x = state->c;
|
| 1502 |
+
skipahead<curandStateScrambledSobol64_t *>(offset, state);
|
| 1503 |
+
}
|
| 1504 |
+
|
| 1505 |
+
/**
|
| 1506 |
+
* \brief Return 32-bits of quasirandomness from a Sobol32 generator.
|
| 1507 |
+
*
|
| 1508 |
+
* Return 32-bits of quasirandomness from the Sobol32 generator in \p state,
|
| 1509 |
+
* increment position of generator by one.
|
| 1510 |
+
*
|
| 1511 |
+
* \param state - Pointer to state to update
|
| 1512 |
+
*
|
| 1513 |
+
* \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
|
| 1514 |
+
*/
|
| 1515 |
+
|
| 1516 |
+
QUALIFIERS unsigned int curand(curandStateSobol32_t * state)
|
| 1517 |
+
{
|
| 1518 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1519 |
+
the trailing zero bit of i
|
| 1520 |
+
*/
|
| 1521 |
+
unsigned int res = state->x;
|
| 1522 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1523 |
+
state->i ++;
|
| 1524 |
+
return res;
|
| 1525 |
+
}
|
| 1526 |
+
|
| 1527 |
+
/**
|
| 1528 |
+
* \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator.
|
| 1529 |
+
*
|
| 1530 |
+
* Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
|
| 1531 |
+
* increment position of generator by one.
|
| 1532 |
+
*
|
| 1533 |
+
* \param state - Pointer to state to update
|
| 1534 |
+
*
|
| 1535 |
+
* \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
|
| 1536 |
+
*/
|
| 1537 |
+
|
| 1538 |
+
QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state)
|
| 1539 |
+
{
|
| 1540 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1541 |
+
the trailing zero bit of i
|
| 1542 |
+
*/
|
| 1543 |
+
unsigned int res = state->x;
|
| 1544 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1545 |
+
state->i ++;
|
| 1546 |
+
return res;
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
/**
|
| 1550 |
+
* \brief Return 64-bits of quasirandomness from a Sobol64 generator.
|
| 1551 |
+
*
|
| 1552 |
+
* Return 64-bits of quasirandomness from the Sobol64 generator in \p state,
|
| 1553 |
+
* increment position of generator by one.
|
| 1554 |
+
*
|
| 1555 |
+
* \param state - Pointer to state to update
|
| 1556 |
+
*
|
| 1557 |
+
* \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
|
| 1558 |
+
*/
|
| 1559 |
+
|
| 1560 |
+
QUALIFIERS unsigned long long curand(curandStateSobol64_t * state)
|
| 1561 |
+
{
|
| 1562 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1563 |
+
the trailing zero bit of i
|
| 1564 |
+
*/
|
| 1565 |
+
unsigned long long res = state->x;
|
| 1566 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1567 |
+
state->i ++;
|
| 1568 |
+
return res;
|
| 1569 |
+
}
|
| 1570 |
+
|
| 1571 |
+
/**
|
| 1572 |
+
* \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator.
|
| 1573 |
+
*
|
| 1574 |
+
* Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
|
| 1575 |
+
* increment position of generator by one.
|
| 1576 |
+
*
|
| 1577 |
+
* \param state - Pointer to state to update
|
| 1578 |
+
*
|
| 1579 |
+
* \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
|
| 1580 |
+
*/
|
| 1581 |
+
|
| 1582 |
+
QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state)
|
| 1583 |
+
{
|
| 1584 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1585 |
+
the trailing zero bit of i
|
| 1586 |
+
*/
|
| 1587 |
+
unsigned long long res = state->x;
|
| 1588 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1589 |
+
state->i ++;
|
| 1590 |
+
return res;
|
| 1591 |
+
}
|
| 1592 |
+
|
| 1593 |
+
#include "curand_uniform.h"
|
| 1594 |
+
#include "curand_normal.h"
|
| 1595 |
+
#include "curand_lognormal.h"
|
| 1596 |
+
#include "curand_poisson.h"
|
| 1597 |
+
#include "curand_discrete2.h"
|
| 1598 |
+
|
| 1599 |
+
__device__ static inline unsigned int *__get_precalculated_matrix(int n)
|
| 1600 |
+
{
|
| 1601 |
+
if(n == 0) {
|
| 1602 |
+
return precalc_xorwow_matrix[n];
|
| 1603 |
+
}
|
| 1604 |
+
if(n == 2) {
|
| 1605 |
+
return precalc_xorwow_offset_matrix[n];
|
| 1606 |
+
}
|
| 1607 |
+
return precalc_xorwow_matrix[n];
|
| 1608 |
+
}
|
| 1609 |
+
|
| 1610 |
+
#ifndef __CUDACC_RTC__
|
| 1611 |
+
__host__ static inline unsigned int *__get_precalculated_matrix_host(int n)
|
| 1612 |
+
{
|
| 1613 |
+
if(n == 1) {
|
| 1614 |
+
return precalc_xorwow_matrix_host[n];
|
| 1615 |
+
}
|
| 1616 |
+
if(n == 3) {
|
| 1617 |
+
return precalc_xorwow_offset_matrix_host[n];
|
| 1618 |
+
}
|
| 1619 |
+
return precalc_xorwow_matrix_host[n];
|
| 1620 |
+
}
|
| 1621 |
+
#endif // #ifndef __CUDACC_RTC__
|
| 1622 |
+
|
| 1623 |
+
__device__ static inline unsigned int *__get_mrg32k3a_matrix(int n)
|
| 1624 |
+
{
|
| 1625 |
+
if(n == 0) {
|
| 1626 |
+
return mrg32k3aM1[n][0];
|
| 1627 |
+
}
|
| 1628 |
+
if(n == 2) {
|
| 1629 |
+
return mrg32k3aM2[n][0];
|
| 1630 |
+
}
|
| 1631 |
+
if(n == 4) {
|
| 1632 |
+
return mrg32k3aM1SubSeq[n][0];
|
| 1633 |
+
}
|
| 1634 |
+
if(n == 6) {
|
| 1635 |
+
return mrg32k3aM2SubSeq[n][0];
|
| 1636 |
+
}
|
| 1637 |
+
if(n == 8) {
|
| 1638 |
+
return mrg32k3aM1Seq[n][0];
|
| 1639 |
+
}
|
| 1640 |
+
if(n == 10) {
|
| 1641 |
+
return mrg32k3aM2Seq[n][0];
|
| 1642 |
+
}
|
| 1643 |
+
return mrg32k3aM1[n][0];
|
| 1644 |
+
}
|
| 1645 |
+
|
| 1646 |
+
#ifndef __CUDACC_RTC__
|
| 1647 |
+
__host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n)
|
| 1648 |
+
{
|
| 1649 |
+
if(n == 1) {
|
| 1650 |
+
return mrg32k3aM1Host[n][0];
|
| 1651 |
+
}
|
| 1652 |
+
if(n == 3) {
|
| 1653 |
+
return mrg32k3aM2Host[n][0];
|
| 1654 |
+
}
|
| 1655 |
+
if(n == 5) {
|
| 1656 |
+
return mrg32k3aM1SubSeqHost[n][0];
|
| 1657 |
+
}
|
| 1658 |
+
if(n == 7) {
|
| 1659 |
+
return mrg32k3aM2SubSeqHost[n][0];
|
| 1660 |
+
}
|
| 1661 |
+
if(n == 9) {
|
| 1662 |
+
return mrg32k3aM1SeqHost[n][0];
|
| 1663 |
+
}
|
| 1664 |
+
if(n == 11) {
|
| 1665 |
+
return mrg32k3aM2SeqHost[n][0];
|
| 1666 |
+
}
|
| 1667 |
+
return mrg32k3aM1Host[n][0];
|
| 1668 |
+
}
|
| 1669 |
+
|
| 1670 |
+
__host__ static inline double *__get__cr_lgamma_table_host(void) {
|
| 1671 |
+
return __cr_lgamma_table;
|
| 1672 |
+
}
|
| 1673 |
+
#endif // #ifndef __CUDACC_RTC__
|
| 1674 |
+
|
| 1675 |
+
/** @} */
|
| 1676 |
+
|
| 1677 |
+
#endif // !defined(CURAND_KERNEL_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_lognormal.h
ADDED
|
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_LOGNORMAL_H_)
|
| 52 |
+
#define CURAND_LOGNORMAL_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include "curand_mrg32k3a.h"
|
| 65 |
+
#include "curand_mtgp32_kernel.h"
|
| 66 |
+
#include "curand_philox4x32_x.h"
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* \brief Return a log-normally distributed float from an XORWOW generator.
|
| 70 |
+
*
|
| 71 |
+
* Return a single log-normally distributed float derived from a normal
|
| 72 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 73 |
+
* from the XORWOW generator in \p state,
|
| 74 |
+
* increment position of generator by one.
|
| 75 |
+
*
|
| 76 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 77 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 78 |
+
* then returns them one at a time.
|
| 79 |
+
* See ::curand_log_normal2() for a more efficient version that returns
|
| 80 |
+
* both results at once.
|
| 81 |
+
*
|
| 82 |
+
* \param state - Pointer to state to update
|
| 83 |
+
* \param mean - Mean of the related normal distribution
|
| 84 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 85 |
+
*
|
| 86 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 87 |
+
*/
|
| 88 |
+
QUALIFIERS float curand_log_normal(curandStateXORWOW_t *state, float mean, float stddev)
|
| 89 |
+
{
|
| 90 |
+
if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
|
| 91 |
+
unsigned int x, y;
|
| 92 |
+
x = curand(state);
|
| 93 |
+
y = curand(state);
|
| 94 |
+
float2 v = _curand_box_muller(x, y);
|
| 95 |
+
state->boxmuller_extra = expf(mean + (stddev * v.y));
|
| 96 |
+
state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
|
| 97 |
+
return expf(mean + (stddev * v.x));
|
| 98 |
+
}
|
| 99 |
+
state->boxmuller_flag = 0;
|
| 100 |
+
return state->boxmuller_extra;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
/**
|
| 104 |
+
* \brief Return a log-normally distributed float from an Philox4_32_10 generator.
|
| 105 |
+
*
|
| 106 |
+
* Return a single log-normally distributed float derived from a normal
|
| 107 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 108 |
+
* from the Philox4_32_10 generator in \p state,
|
| 109 |
+
* increment position of generator by one.
|
| 110 |
+
*
|
| 111 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 112 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 113 |
+
* then returns them one at a time.
|
| 114 |
+
* See ::curand_log_normal2() for a more efficient version that returns
|
| 115 |
+
* both results at once.
|
| 116 |
+
*
|
| 117 |
+
* \param state - Pointer to state to update
|
| 118 |
+
* \param mean - Mean of the related normal distribution
|
| 119 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 120 |
+
*
|
| 121 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 122 |
+
*/
|
| 123 |
+
|
| 124 |
+
QUALIFIERS float curand_log_normal(curandStatePhilox4_32_10_t *state, float mean, float stddev)
|
| 125 |
+
{
|
| 126 |
+
if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
|
| 127 |
+
unsigned int x, y;
|
| 128 |
+
x = curand(state);
|
| 129 |
+
y = curand(state);
|
| 130 |
+
float2 v = _curand_box_muller(x, y);
|
| 131 |
+
state->boxmuller_extra = expf(mean + (stddev * v.y));
|
| 132 |
+
state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
|
| 133 |
+
return expf(mean + (stddev * v.x));
|
| 134 |
+
}
|
| 135 |
+
state->boxmuller_flag = 0;
|
| 136 |
+
return state->boxmuller_extra;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
/**
|
| 140 |
+
* \brief Return two normally distributed floats from an XORWOW generator.
|
| 141 |
+
*
|
| 142 |
+
* Return two log-normally distributed floats derived from a normal
|
| 143 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 144 |
+
* from the XORWOW generator in \p state,
|
| 145 |
+
* increment position of generator by two.
|
| 146 |
+
*
|
| 147 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 148 |
+
* normally distributed results, then transforms them to log-normal.
|
| 149 |
+
*
|
| 150 |
+
* \param state - Pointer to state to update
|
| 151 |
+
* \param mean - Mean of the related normal distribution
|
| 152 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 153 |
+
*
|
| 154 |
+
* \return Log-normally distributed float2 where each element is from a
|
| 155 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 156 |
+
*/
|
| 157 |
+
QUALIFIERS float2 curand_log_normal2(curandStateXORWOW_t *state, float mean, float stddev)
|
| 158 |
+
{
|
| 159 |
+
float2 v = curand_box_muller(state);
|
| 160 |
+
v.x = expf(mean + (stddev * v.x));
|
| 161 |
+
v.y = expf(mean + (stddev * v.y));
|
| 162 |
+
return v;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
/**
|
| 166 |
+
* \brief Return two normally distributed floats from an Philox4_32_10 generator.
|
| 167 |
+
*
|
| 168 |
+
* Return two log-normally distributed floats derived from a normal
|
| 169 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 170 |
+
* from the Philox4_32_10 generator in \p state,
|
| 171 |
+
* increment position of generator by two.
|
| 172 |
+
*
|
| 173 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 174 |
+
* normally distributed results, then transforms them to log-normal.
|
| 175 |
+
*
|
| 176 |
+
* \param state - Pointer to state to update
|
| 177 |
+
* \param mean - Mean of the related normal distribution
|
| 178 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 179 |
+
*
|
| 180 |
+
* \return Log-normally distributed float2 where each element is from a
|
| 181 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 182 |
+
*/
|
| 183 |
+
QUALIFIERS float2 curand_log_normal2(curandStatePhilox4_32_10_t *state, float mean, float stddev)
|
| 184 |
+
{
|
| 185 |
+
float2 v = curand_box_muller(state);
|
| 186 |
+
v.x = expf(mean + (stddev * v.x));
|
| 187 |
+
v.y = expf(mean + (stddev * v.y));
|
| 188 |
+
return v;
|
| 189 |
+
}
|
| 190 |
+
/**
|
| 191 |
+
* \brief Return four normally distributed floats from an Philox4_32_10 generator.
|
| 192 |
+
*
|
| 193 |
+
* Return four log-normally distributed floats derived from a normal
|
| 194 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 195 |
+
* from the Philox4_32_10 generator in \p state,
|
| 196 |
+
* increment position of generator by four.
|
| 197 |
+
*
|
| 198 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 199 |
+
* normally distributed results, then transforms them to log-normal.
|
| 200 |
+
*
|
| 201 |
+
* \param state - Pointer to state to update
|
| 202 |
+
* \param mean - Mean of the related normal distribution
|
| 203 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 204 |
+
*
|
| 205 |
+
* \return Log-normally distributed float4 where each element is from a
|
| 206 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 207 |
+
*/
|
| 208 |
+
QUALIFIERS float4 curand_log_normal4(curandStatePhilox4_32_10_t *state, float mean, float stddev)
|
| 209 |
+
{
|
| 210 |
+
float4 v = curand_box_muller4(state);
|
| 211 |
+
v.x = expf(mean + (stddev * v.x));
|
| 212 |
+
v.y = expf(mean + (stddev * v.y));
|
| 213 |
+
v.z = expf(mean + (stddev * v.z));
|
| 214 |
+
v.w = expf(mean + (stddev * v.w));
|
| 215 |
+
return v;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
/**
|
| 219 |
+
* \brief Return a log-normally distributed float from an MRG32k3a generator.
|
| 220 |
+
*
|
| 221 |
+
* Return a single log-normally distributed float derived from a normal
|
| 222 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 223 |
+
* from the MRG32k3a generator in \p state,
|
| 224 |
+
* increment position of generator by one.
|
| 225 |
+
*
|
| 226 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 227 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 228 |
+
* then returns them one at a time.
|
| 229 |
+
* See ::curand_log_normal2() for a more efficient version that returns
|
| 230 |
+
* both results at once.
|
| 231 |
+
*
|
| 232 |
+
* \param state - Pointer to state to update
|
| 233 |
+
* \param mean - Mean of the related normal distribution
|
| 234 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 235 |
+
*
|
| 236 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 237 |
+
*/
|
| 238 |
+
QUALIFIERS float curand_log_normal(curandStateMRG32k3a_t *state, float mean, float stddev)
|
| 239 |
+
{
|
| 240 |
+
if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
|
| 241 |
+
float2 v = curand_box_muller_mrg(state);
|
| 242 |
+
state->boxmuller_extra = expf(mean + (stddev * v.y));
|
| 243 |
+
state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
|
| 244 |
+
return expf(mean + (stddev * v.x));
|
| 245 |
+
}
|
| 246 |
+
state->boxmuller_flag = 0;
|
| 247 |
+
return state->boxmuller_extra;
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
/**
|
| 251 |
+
* \brief Return two normally distributed floats from an MRG32k3a generator.
|
| 252 |
+
*
|
| 253 |
+
* Return two log-normally distributed floats derived from a normal
|
| 254 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 255 |
+
* from the MRG32k3a generator in \p state,
|
| 256 |
+
* increment position of generator by two.
|
| 257 |
+
*
|
| 258 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 259 |
+
* normally distributed results, then transforms them to log-normal.
|
| 260 |
+
*
|
| 261 |
+
* \param state - Pointer to state to update
|
| 262 |
+
* \param mean - Mean of the related normal distribution
|
| 263 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 264 |
+
*
|
| 265 |
+
* \return Log-normally distributed float2 where each element is from a
|
| 266 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 267 |
+
*/
|
| 268 |
+
QUALIFIERS float2 curand_log_normal2(curandStateMRG32k3a_t *state, float mean, float stddev)
|
| 269 |
+
{
|
| 270 |
+
float2 v = curand_box_muller_mrg(state);
|
| 271 |
+
v.x = expf(mean + (stddev * v.x));
|
| 272 |
+
v.y = expf(mean + (stddev * v.y));
|
| 273 |
+
return v;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
/**
|
| 277 |
+
* \brief Return a log-normally distributed float from an MTGP32 generator.
|
| 278 |
+
*
|
| 279 |
+
* Return a single log-normally distributed float derived from a normal
|
| 280 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 281 |
+
* from the MTGP32 generator in \p state,
|
| 282 |
+
* increment position of generator.
|
| 283 |
+
*
|
| 284 |
+
* The implementation uses the inverse cumulative distribution function
|
| 285 |
+
* to generate a normally distributed result, then transforms the result
|
| 286 |
+
* to log-normal.
|
| 287 |
+
*
|
| 288 |
+
* \param state - Pointer to state to update
|
| 289 |
+
* \param mean - Mean of the related normal distribution
|
| 290 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 291 |
+
*
|
| 292 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 293 |
+
*/
|
| 294 |
+
QUALIFIERS float curand_log_normal(curandStateMtgp32_t *state, float mean, float stddev)
|
| 295 |
+
{
|
| 296 |
+
return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
/**
|
| 300 |
+
* \brief Return a log-normally distributed float from a Sobol32 generator.
|
| 301 |
+
*
|
| 302 |
+
* Return a single log-normally distributed float derived from a normal
|
| 303 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 304 |
+
* from the Sobol32 generator in \p state,
|
| 305 |
+
* increment position of generator by one.
|
| 306 |
+
*
|
| 307 |
+
* The implementation uses the inverse cumulative distribution function
|
| 308 |
+
* to generate a normally distributed result, then transforms the result
|
| 309 |
+
* to log-normal.
|
| 310 |
+
*
|
| 311 |
+
* \param state - Pointer to state to update
|
| 312 |
+
* \param mean - Mean of the related normal distribution
|
| 313 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 314 |
+
*
|
| 315 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 316 |
+
*/
|
| 317 |
+
QUALIFIERS float curand_log_normal(curandStateSobol32_t *state, float mean, float stddev)
|
| 318 |
+
{
|
| 319 |
+
return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
|
| 320 |
+
}
|
| 321 |
+
/**
|
| 322 |
+
* \brief Return a log-normally distributed float from a scrambled Sobol32 generator.
|
| 323 |
+
*
|
| 324 |
+
* Return a single log-normally distributed float derived from a normal
|
| 325 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 326 |
+
* from the scrambled Sobol32 generator in \p state,
|
| 327 |
+
* increment position of generator by one.
|
| 328 |
+
*
|
| 329 |
+
* The implementation uses the inverse cumulative distribution function
|
| 330 |
+
* to generate a normally distributed result, then transforms the result
|
| 331 |
+
* to log-normal.
|
| 332 |
+
*
|
| 333 |
+
* \param state - Pointer to state to update
|
| 334 |
+
* \param mean - Mean of the related normal distribution
|
| 335 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 336 |
+
*
|
| 337 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 338 |
+
*/
|
| 339 |
+
QUALIFIERS float curand_log_normal(curandStateScrambledSobol32_t *state, float mean, float stddev)
|
| 340 |
+
{
|
| 341 |
+
return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
/**
|
| 345 |
+
* \brief Return a log-normally distributed float from a Sobol64 generator.
|
| 346 |
+
*
|
| 347 |
+
* Return a single log-normally distributed float derived from a normal
|
| 348 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 349 |
+
* from the Sobol64 generator in \p state,
|
| 350 |
+
* increment position of generator by one.
|
| 351 |
+
*
|
| 352 |
+
* The implementation uses the inverse cumulative distribution function
|
| 353 |
+
* to generate normally distributed results, then converts to log-normal
|
| 354 |
+
* distribution.
|
| 355 |
+
*
|
| 356 |
+
* \param state - Pointer to state to update
|
| 357 |
+
* \param mean - Mean of the related normal distribution
|
| 358 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 359 |
+
*
|
| 360 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 361 |
+
*/
|
| 362 |
+
QUALIFIERS float curand_log_normal(curandStateSobol64_t *state, float mean, float stddev)
|
| 363 |
+
{
|
| 364 |
+
return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
/**
|
| 368 |
+
* \brief Return a log-normally distributed float from a scrambled Sobol64 generator.
|
| 369 |
+
*
|
| 370 |
+
* Return a single log-normally distributed float derived from a normal
|
| 371 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 372 |
+
* from the scrambled Sobol64 generator in \p state,
|
| 373 |
+
* increment position of generator by one.
|
| 374 |
+
*
|
| 375 |
+
* The implementation uses the inverse cumulative distribution function
|
| 376 |
+
* to generate normally distributed results, then converts to log-normal
|
| 377 |
+
* distribution.
|
| 378 |
+
*
|
| 379 |
+
* \param state - Pointer to state to update
|
| 380 |
+
* \param mean - Mean of the related normal distribution
|
| 381 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 382 |
+
*
|
| 383 |
+
* \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
|
| 384 |
+
*/
|
| 385 |
+
QUALIFIERS float curand_log_normal(curandStateScrambledSobol64_t *state, float mean, float stddev)
|
| 386 |
+
{
|
| 387 |
+
return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/**
|
| 391 |
+
* \brief Return a log-normally distributed double from an XORWOW generator.
|
| 392 |
+
*
|
| 393 |
+
* Return a single normally distributed double derived from a normal
|
| 394 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 395 |
+
* from the XORWOW generator in \p state,
|
| 396 |
+
* increment position of generator.
|
| 397 |
+
*
|
| 398 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 399 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 400 |
+
* then returns them one at a time.
|
| 401 |
+
* See ::curand_log_normal2_double() for a more efficient version that returns
|
| 402 |
+
* both results at once.
|
| 403 |
+
*
|
| 404 |
+
* \param state - Pointer to state to update
|
| 405 |
+
* \param mean - Mean of the related normal distribution
|
| 406 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 407 |
+
*
|
| 408 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 409 |
+
*/
|
| 410 |
+
|
| 411 |
+
QUALIFIERS double curand_log_normal_double(curandStateXORWOW_t *state, double mean, double stddev)
|
| 412 |
+
{
|
| 413 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
|
| 414 |
+
unsigned int x0, x1, y0, y1;
|
| 415 |
+
x0 = curand(state);
|
| 416 |
+
x1 = curand(state);
|
| 417 |
+
y0 = curand(state);
|
| 418 |
+
y1 = curand(state);
|
| 419 |
+
double2 v = _curand_box_muller_double(x0, x1, y0, y1);
|
| 420 |
+
state->boxmuller_extra_double = exp(mean + (stddev * v.y));
|
| 421 |
+
state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
|
| 422 |
+
return exp(mean + (stddev * v.x));
|
| 423 |
+
}
|
| 424 |
+
state->boxmuller_flag_double = 0;
|
| 425 |
+
return state->boxmuller_extra_double;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
/**
|
| 429 |
+
* \brief Return a log-normally distributed double from an Philox4_32_10 generator.
|
| 430 |
+
*
|
| 431 |
+
* Return a single normally distributed double derived from a normal
|
| 432 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 433 |
+
* from the Philox4_32_10 generator in \p state,
|
| 434 |
+
* increment position of generator.
|
| 435 |
+
*
|
| 436 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 437 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 438 |
+
* then returns them one at a time.
|
| 439 |
+
* See ::curand_log_normal2_double() for a more efficient version that returns
|
| 440 |
+
* both results at once.
|
| 441 |
+
*
|
| 442 |
+
* \param state - Pointer to state to update
|
| 443 |
+
* \param mean - Mean of the related normal distribution
|
| 444 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 445 |
+
*
|
| 446 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 447 |
+
*/
|
| 448 |
+
|
| 449 |
+
QUALIFIERS double curand_log_normal_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
|
| 450 |
+
{
|
| 451 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
|
| 452 |
+
uint4 _x;
|
| 453 |
+
_x = curand4(state);
|
| 454 |
+
double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 455 |
+
state->boxmuller_extra_double = exp(mean + (stddev * v.y));
|
| 456 |
+
state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
|
| 457 |
+
return exp(mean + (stddev * v.x));
|
| 458 |
+
}
|
| 459 |
+
state->boxmuller_flag_double = 0;
|
| 460 |
+
return state->boxmuller_extra_double;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
/**
|
| 465 |
+
* \brief Return two log-normally distributed doubles from an XORWOW generator.
|
| 466 |
+
*
|
| 467 |
+
* Return two log-normally distributed doubles derived from a normal
|
| 468 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 469 |
+
* from the XORWOW generator in \p state,
|
| 470 |
+
* increment position of generator by two.
|
| 471 |
+
*
|
| 472 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 473 |
+
* normally distributed results, and transforms them to log-normal distribution,.
|
| 474 |
+
*
|
| 475 |
+
* \param state - Pointer to state to update
|
| 476 |
+
* \param mean - Mean of the related normal distribution
|
| 477 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 478 |
+
*
|
| 479 |
+
* \return Log-normally distributed double2 where each element is from a
|
| 480 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 481 |
+
*/
|
| 482 |
+
QUALIFIERS double2 curand_log_normal2_double(curandStateXORWOW_t *state, double mean, double stddev)
|
| 483 |
+
{
|
| 484 |
+
double2 v = curand_box_muller_double(state);
|
| 485 |
+
v.x = exp(mean + (stddev * v.x));
|
| 486 |
+
v.y = exp(mean + (stddev * v.y));
|
| 487 |
+
return v;
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
/**
|
| 491 |
+
* \brief Return two log-normally distributed doubles from an Philox4_32_10 generator.
|
| 492 |
+
*
|
| 493 |
+
* Return two log-normally distributed doubles derived from a normal
|
| 494 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 495 |
+
* from the Philox4_32_10 generator in \p state,
|
| 496 |
+
* increment position of generator by four.
|
| 497 |
+
*
|
| 498 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 499 |
+
* normally distributed results, and transforms them to log-normal distribution,.
|
| 500 |
+
*
|
| 501 |
+
* \param state - Pointer to state to update
|
| 502 |
+
* \param mean - Mean of the related normal distribution
|
| 503 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 504 |
+
*
|
| 505 |
+
* \return Log-normally distributed double4 where each element is from a
|
| 506 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 507 |
+
*/
|
| 508 |
+
QUALIFIERS double2 curand_log_normal2_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
|
| 509 |
+
{
|
| 510 |
+
double2 v = curand_box_muller2_double(state);
|
| 511 |
+
v.x = exp(mean + (stddev * v.x));
|
| 512 |
+
v.y = exp(mean + (stddev * v.y));
|
| 513 |
+
return v;
|
| 514 |
+
}
|
| 515 |
+
// nor part of API
|
| 516 |
+
QUALIFIERS double4 curand_log_normal4_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
|
| 517 |
+
{
|
| 518 |
+
double4 v = curand_box_muller4_double(state);
|
| 519 |
+
v.x = exp(mean + (stddev * v.x));
|
| 520 |
+
v.y = exp(mean + (stddev * v.y));
|
| 521 |
+
v.z = exp(mean + (stddev * v.z));
|
| 522 |
+
v.w = exp(mean + (stddev * v.w));
|
| 523 |
+
return v;
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
/**
|
| 527 |
+
* \brief Return a log-normally distributed double from an MRG32k3a generator.
|
| 528 |
+
*
|
| 529 |
+
* Return a single normally distributed double derived from a normal
|
| 530 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 531 |
+
* from the MRG32k3a generator in \p state,
|
| 532 |
+
* increment position of generator.
|
| 533 |
+
*
|
| 534 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 535 |
+
* normally distributed results, transforms them to log-normal distribution,
|
| 536 |
+
* then returns them one at a time.
|
| 537 |
+
* See ::curand_log_normal2_double() for a more efficient version that returns
|
| 538 |
+
* both results at once.
|
| 539 |
+
*
|
| 540 |
+
* \param state - Pointer to state to update
|
| 541 |
+
* \param mean - Mean of the related normal distribution
|
| 542 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 543 |
+
*
|
| 544 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 545 |
+
*/
|
| 546 |
+
QUALIFIERS double curand_log_normal_double(curandStateMRG32k3a_t *state, double mean, double stddev)
|
| 547 |
+
{
|
| 548 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
|
| 549 |
+
double2 v = curand_box_muller_mrg_double(state);
|
| 550 |
+
state->boxmuller_extra_double = exp(mean + (stddev * v.y));
|
| 551 |
+
state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
|
| 552 |
+
return exp(mean + (stddev * v.x));
|
| 553 |
+
}
|
| 554 |
+
state->boxmuller_flag_double = 0;
|
| 555 |
+
return state->boxmuller_extra_double;
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
/**
|
| 559 |
+
* \brief Return two log-normally distributed doubles from an MRG32k3a generator.
|
| 560 |
+
*
|
| 561 |
+
* Return two log-normally distributed doubles derived from a normal
|
| 562 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 563 |
+
* from the MRG32k3a generator in \p state,
|
| 564 |
+
* increment position of generator by two.
|
| 565 |
+
*
|
| 566 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 567 |
+
* normally distributed results, and transforms them to log-normal distribution,.
|
| 568 |
+
*
|
| 569 |
+
* \param state - Pointer to state to update
|
| 570 |
+
* \param mean - Mean of the related normal distribution
|
| 571 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 572 |
+
*
|
| 573 |
+
* \return Log-normally distributed double2 where each element is from a
|
| 574 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 575 |
+
*/
|
| 576 |
+
QUALIFIERS double2 curand_log_normal2_double(curandStateMRG32k3a_t *state, double mean, double stddev)
|
| 577 |
+
{
|
| 578 |
+
double2 v = curand_box_muller_mrg_double(state);
|
| 579 |
+
v.x = exp(mean + (stddev * v.x));
|
| 580 |
+
v.y = exp(mean + (stddev * v.y));
|
| 581 |
+
return v;
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
/**
|
| 585 |
+
* \brief Return a log-normally distributed double from an MTGP32 generator.
|
| 586 |
+
*
|
| 587 |
+
* Return a single log-normally distributed double derived from a normal
|
| 588 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 589 |
+
* from the MTGP32 generator in \p state,
|
| 590 |
+
* increment position of generator.
|
| 591 |
+
*
|
| 592 |
+
* The implementation uses the inverse cumulative distribution function
|
| 593 |
+
* to generate normally distributed results, and transforms them into
|
| 594 |
+
* log-normal distribution.
|
| 595 |
+
*
|
| 596 |
+
* \param state - Pointer to state to update
|
| 597 |
+
* \param mean - Mean of the related normal distribution
|
| 598 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 599 |
+
*
|
| 600 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 601 |
+
*/
|
| 602 |
+
QUALIFIERS double curand_log_normal_double(curandStateMtgp32_t *state, double mean, double stddev)
|
| 603 |
+
{
|
| 604 |
+
return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
/**
|
| 608 |
+
* \brief Return a log-normally distributed double from a Sobol32 generator.
|
| 609 |
+
*
|
| 610 |
+
* Return a single log-normally distributed double derived from a normal
|
| 611 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 612 |
+
* from the Sobol32 generator in \p state,
|
| 613 |
+
* increment position of generator by one.
|
| 614 |
+
*
|
| 615 |
+
* The implementation uses the inverse cumulative distribution function
|
| 616 |
+
* to generate normally distributed results, and transforms them into
|
| 617 |
+
* log-normal distribution.
|
| 618 |
+
*
|
| 619 |
+
* \param state - Pointer to state to update
|
| 620 |
+
* \param mean - Mean of the related normal distribution
|
| 621 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 622 |
+
*
|
| 623 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 624 |
+
*/
|
| 625 |
+
QUALIFIERS double curand_log_normal_double(curandStateSobol32_t *state, double mean, double stddev)
|
| 626 |
+
{
|
| 627 |
+
return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
/**
|
| 631 |
+
* \brief Return a log-normally distributed double from a scrambled Sobol32 generator.
|
| 632 |
+
*
|
| 633 |
+
* Return a single log-normally distributed double derived from a normal
|
| 634 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 635 |
+
* from the scrambled Sobol32 generator in \p state,
|
| 636 |
+
* increment position of generator by one.
|
| 637 |
+
*
|
| 638 |
+
* The implementation uses the inverse cumulative distribution function
|
| 639 |
+
* to generate normally distributed results, and transforms them into
|
| 640 |
+
* log-normal distribution.
|
| 641 |
+
*
|
| 642 |
+
* \param state - Pointer to state to update
|
| 643 |
+
* \param mean - Mean of the related normal distribution
|
| 644 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 645 |
+
*
|
| 646 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 647 |
+
*/
|
| 648 |
+
QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol32_t *state, double mean, double stddev)
|
| 649 |
+
{
|
| 650 |
+
return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
|
| 651 |
+
}
|
| 652 |
+
|
| 653 |
+
/**
|
| 654 |
+
* \brief Return a log-normally distributed double from a Sobol64 generator.
|
| 655 |
+
*
|
| 656 |
+
* Return a single normally distributed double derived from a normal
|
| 657 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 658 |
+
* from the Sobol64 generator in \p state,
|
| 659 |
+
* increment position of generator by one.
|
| 660 |
+
*
|
| 661 |
+
* The implementation uses the inverse cumulative distribution function
|
| 662 |
+
* to generate normally distributed results.
|
| 663 |
+
*
|
| 664 |
+
* \param state - Pointer to state to update
|
| 665 |
+
* \param mean - Mean of the related normal distribution
|
| 666 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 667 |
+
*
|
| 668 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 669 |
+
*/
|
| 670 |
+
QUALIFIERS double curand_log_normal_double(curandStateSobol64_t *state, double mean, double stddev)
|
| 671 |
+
{
|
| 672 |
+
return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
/**
|
| 676 |
+
* \brief Return a log-normally distributed double from a scrambled Sobol64 generator.
|
| 677 |
+
*
|
| 678 |
+
* Return a single normally distributed double derived from a normal
|
| 679 |
+
* distribution with mean \p mean and standard deviation \p stddev
|
| 680 |
+
* from the scrambled Sobol64 generator in \p state,
|
| 681 |
+
* increment position of generator by one.
|
| 682 |
+
*
|
| 683 |
+
* The implementation uses the inverse cumulative distribution function
|
| 684 |
+
* to generate normally distributed results.
|
| 685 |
+
*
|
| 686 |
+
* \param state - Pointer to state to update
|
| 687 |
+
* \param mean - Mean of the related normal distribution
|
| 688 |
+
* \param stddev - Standard deviation of the related normal distribution
|
| 689 |
+
*
|
| 690 |
+
* \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
|
| 691 |
+
*/
|
| 692 |
+
QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol64_t *state, double mean, double stddev)
|
| 693 |
+
{
|
| 694 |
+
return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
|
| 695 |
+
}
|
| 696 |
+
|
| 697 |
+
#endif // !defined(CURAND_LOGNORMAL_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mrg32k3a.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32.h
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef CURAND_MTGP32_H
|
| 51 |
+
#define CURAND_MTGP32_H
|
| 52 |
+
/*
|
| 53 |
+
* @file curand_mtgp32.h
|
| 54 |
+
*
|
| 55 |
+
* @brief Mersenne Twister for Graphic Processors (mtgp32), which
|
| 56 |
+
* generates 32-bit unsigned integers and single precision floating
|
| 57 |
+
* point numbers based on IEEE 754 format.
|
| 58 |
+
*
|
| 59 |
+
* @author Mutsuo Saito (Hiroshima University)
|
| 60 |
+
* @author Makoto Matsumoto (Hiroshima University)
|
| 61 |
+
*
|
| 62 |
+
*/
|
| 63 |
+
/*
|
| 64 |
+
* Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 65 |
+
* University. All rights reserved.
|
| 66 |
+
* Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 67 |
+
* University and University of Tokyo. All rights reserved.
|
| 68 |
+
*
|
| 69 |
+
* Redistribution and use in source and binary forms, with or without
|
| 70 |
+
* modification, are permitted provided that the following conditions are
|
| 71 |
+
* met:
|
| 72 |
+
*
|
| 73 |
+
* * Redistributions of source code must retain the above copyright
|
| 74 |
+
* notice, this list of conditions and the following disclaimer.
|
| 75 |
+
* * Redistributions in binary form must reproduce the above
|
| 76 |
+
* copyright notice, this list of conditions and the following
|
| 77 |
+
* disclaimer in the documentation and/or other materials provided
|
| 78 |
+
* with the distribution.
|
| 79 |
+
* * Neither the name of the Hiroshima University nor the names of
|
| 80 |
+
* its contributors may be used to endorse or promote products
|
| 81 |
+
* derived from this software without specific prior written
|
| 82 |
+
* permission.
|
| 83 |
+
*
|
| 84 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 85 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 86 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 87 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 88 |
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 89 |
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 90 |
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 91 |
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 92 |
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 93 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 94 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 95 |
+
*/
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
#define MTGPDC_MEXP 11213
|
| 99 |
+
#define MTGPDC_N 351
|
| 100 |
+
#define MTGPDC_FLOOR_2P 256
|
| 101 |
+
#define MTGPDC_CEIL_2P 512
|
| 102 |
+
#define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213
|
| 103 |
+
#define MTGP32_STATE_SIZE 1024
|
| 104 |
+
#define MTGP32_STATE_MASK 1023
|
| 105 |
+
#define CURAND_NUM_MTGP32_PARAMS 200
|
| 106 |
+
#define MEXP 11213
|
| 107 |
+
#define THREAD_NUM MTGPDC_FLOOR_2P
|
| 108 |
+
#define LARGE_SIZE (THREAD_NUM * 3)
|
| 109 |
+
#define TBL_SIZE 16
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* \addtogroup DEVICE Device API
|
| 113 |
+
*
|
| 114 |
+
* @{
|
| 115 |
+
*/
|
| 116 |
+
|
| 117 |
+
/*
|
| 118 |
+
* \struct MTGP32_PARAMS_FAST_T
|
| 119 |
+
* MTGP32 parameters.
|
| 120 |
+
* Some element is redundant to keep structure simple.
|
| 121 |
+
*
|
| 122 |
+
* \b pos is a pick up position which is selected to have good
|
| 123 |
+
* performance on graphic processors. 3 < \b pos < Q, where Q is a
|
| 124 |
+
* maximum number such that the size of status array - Q is a power of
|
| 125 |
+
* 2. For example, when \b mexp is 44497, size of 32-bit status array
|
| 126 |
+
* is 696, and Q is 184, then \b pos is between 4 and 183. This means
|
| 127 |
+
* 512 parallel calculations is allowed when \b mexp is 44497.
|
| 128 |
+
*
|
| 129 |
+
* \b poly_sha1 is SHA1 digest of the characteristic polynomial of
|
| 130 |
+
* state transition function. SHA1 is calculated based on printing
|
| 131 |
+
* form of the polynomial. This is important when we use parameters
|
| 132 |
+
* generated by the dynamic creator which
|
| 133 |
+
*
|
| 134 |
+
* \b mask This is a mask to make the dimension of state space have
|
| 135 |
+
* just Mersenne Prime. This is redundant.
|
| 136 |
+
*/
|
| 137 |
+
|
| 138 |
+
struct mtgp32_params_fast;
|
| 139 |
+
|
| 140 |
+
struct mtgp32_params_fast {
|
| 141 |
+
int mexp; /*< Mersenne exponent. This is redundant. */
|
| 142 |
+
int pos; /*< pick up position. */
|
| 143 |
+
int sh1; /*< shift value 1. 0 < sh1 < 32. */
|
| 144 |
+
int sh2; /*< shift value 2. 0 < sh2 < 32. */
|
| 145 |
+
unsigned int tbl[16]; /*< a small matrix. */
|
| 146 |
+
unsigned int tmp_tbl[16]; /*< a small matrix for tempering. */
|
| 147 |
+
unsigned int flt_tmp_tbl[16]; /*< a small matrix for tempering and
|
| 148 |
+
converting to float. */
|
| 149 |
+
unsigned int mask; /*< This is a mask for state space */
|
| 150 |
+
unsigned char poly_sha1[21]; /*< SHA1 digest */
|
| 151 |
+
};
|
| 152 |
+
|
| 153 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 154 |
+
typedef struct mtgp32_params_fast mtgp32_params_fast_t;
|
| 155 |
+
/** \endcond */
|
| 156 |
+
|
| 157 |
+
/*
|
| 158 |
+
* Generator Parameters.
|
| 159 |
+
*/
|
| 160 |
+
struct mtgp32_kernel_params;
|
| 161 |
+
struct mtgp32_kernel_params {
|
| 162 |
+
unsigned int pos_tbl[CURAND_NUM_MTGP32_PARAMS];
|
| 163 |
+
unsigned int param_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
|
| 164 |
+
unsigned int temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
|
| 165 |
+
unsigned int single_temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
|
| 166 |
+
unsigned int sh1_tbl[CURAND_NUM_MTGP32_PARAMS];
|
| 167 |
+
unsigned int sh2_tbl[CURAND_NUM_MTGP32_PARAMS];
|
| 168 |
+
unsigned int mask[1];
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 172 |
+
typedef struct mtgp32_kernel_params mtgp32_kernel_params_t;
|
| 173 |
+
/** \endcond */
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
/*
|
| 178 |
+
* kernel I/O
|
| 179 |
+
* This structure must be initialized before first use.
|
| 180 |
+
*/
|
| 181 |
+
|
| 182 |
+
/* MTGP (Mersenne Twister) RNG */
|
| 183 |
+
/* This generator uses the Mersenne Twister algorithm of
|
| 184 |
+
* http://arxiv.org/abs/1005.4973v2
|
| 185 |
+
* Has period 2^11213.
|
| 186 |
+
*/
|
| 187 |
+
|
| 188 |
+
/**
|
| 189 |
+
* CURAND MTGP32 state
|
| 190 |
+
*/
|
| 191 |
+
struct curandStateMtgp32;
|
| 192 |
+
|
| 193 |
+
struct curandStateMtgp32 {
|
| 194 |
+
unsigned int s[MTGP32_STATE_SIZE];
|
| 195 |
+
int offset;
|
| 196 |
+
int pIdx;
|
| 197 |
+
mtgp32_kernel_params_t * k;
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
/*
|
| 201 |
+
* CURAND MTGP32 state
|
| 202 |
+
*/
|
| 203 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 204 |
+
typedef struct curandStateMtgp32 curandStateMtgp32_t;
|
| 205 |
+
/** \endcond */
|
| 206 |
+
|
| 207 |
+
/** @} */
|
| 208 |
+
|
| 209 |
+
#endif
|
| 210 |
+
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32_host.h
ADDED
|
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/*
|
| 51 |
+
* curand_mtgp32_host.h
|
| 52 |
+
*
|
| 53 |
+
*
|
| 54 |
+
* MTGP32-11213
|
| 55 |
+
*
|
| 56 |
+
* Mersenne Twister RNG for the GPU
|
| 57 |
+
*
|
| 58 |
+
* The period of generated integers is 2<sup>11213</sup>-1.
|
| 59 |
+
*
|
| 60 |
+
* This code generates 32-bit unsigned integers, and
|
| 61 |
+
* single precision floating point numbers uniformly distributed
|
| 62 |
+
* in the range [1, 2). (float r; 1.0 <= r < 2.0)
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
/*
|
| 66 |
+
* Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 67 |
+
* University. All rights reserved.
|
| 68 |
+
* Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 69 |
+
* University and University of Tokyo. All rights reserved.
|
| 70 |
+
*
|
| 71 |
+
* Redistribution and use in source and binary forms, with or without
|
| 72 |
+
* modification, are permitted provided that the following conditions are
|
| 73 |
+
* met:
|
| 74 |
+
*
|
| 75 |
+
* * Redistributions of source code must retain the above copyright
|
| 76 |
+
* notice, this list of conditions and the following disclaimer.
|
| 77 |
+
* * Redistributions in binary form must reproduce the above
|
| 78 |
+
* copyright notice, this list of conditions and the following
|
| 79 |
+
* disclaimer in the documentation and/or other materials provided
|
| 80 |
+
* with the distribution.
|
| 81 |
+
* * Neither the name of the Hiroshima University nor the names of
|
| 82 |
+
* its contributors may be used to endorse or promote products
|
| 83 |
+
* derived from this software without specific prior written
|
| 84 |
+
* permission.
|
| 85 |
+
*
|
| 86 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 87 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 88 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 89 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 90 |
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 91 |
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 92 |
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 93 |
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 94 |
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 95 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 96 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 97 |
+
*/
|
| 98 |
+
#if !defined CURAND_MTGP32_HOST_H
|
| 99 |
+
#define CURAND_MTGP32_HOST_H
|
| 100 |
+
|
| 101 |
+
#if !defined(QUALIFIERS)
|
| 102 |
+
#define QUALIFIERS static inline __device__
|
| 103 |
+
#endif
|
| 104 |
+
|
| 105 |
+
#include <cuda_runtime.h>
|
| 106 |
+
#include <stdlib.h>
|
| 107 |
+
#include <memory.h>
|
| 108 |
+
#include <string.h>
|
| 109 |
+
#include "curand.h"
|
| 110 |
+
#include "curand_mtgp32.h"
|
| 111 |
+
#include "curand_mtgp32dc_p_11213.h"
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
/**
|
| 115 |
+
* \addtogroup DEVICE Device API
|
| 116 |
+
*
|
| 117 |
+
* @{
|
| 118 |
+
*/
|
| 119 |
+
|
| 120 |
+
static const unsigned int non_zero = 0x4d544750;
|
| 121 |
+
|
| 122 |
+
/*
|
| 123 |
+
* This function represents a function used in the initialization
|
| 124 |
+
* by mtgp32_init_by_array() and mtgp32_init_by_str().
|
| 125 |
+
* @param[in] x 32-bit integer
|
| 126 |
+
* @return 32-bit integer
|
| 127 |
+
*/
|
| 128 |
+
static __forceinline__ unsigned int ini_func1(unsigned int x) {
|
| 129 |
+
return (x ^ (x >> 27)) * (1664525);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
/*
|
| 133 |
+
* This function represents a function used in the initialization
|
| 134 |
+
* by mtgp32_init_by_array() and mtgp32_init_by_str().
|
| 135 |
+
* @param[in] x 32-bit integer
|
| 136 |
+
* @return 32-bit integer
|
| 137 |
+
*/
|
| 138 |
+
static __forceinline__ unsigned int ini_func2(unsigned int x) {
|
| 139 |
+
return (x ^ (x >> 27)) * (1566083941);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
/*
|
| 143 |
+
* This function initializes the internal state array with a 32-bit
|
| 144 |
+
* integer seed. The allocated memory should be freed by calling
|
| 145 |
+
* mtgp32_free(). \b para should be one of the elements in the
|
| 146 |
+
* parameter table (mtgp32-param-ref.c).
|
| 147 |
+
*
|
| 148 |
+
* This function is call by cuda program, because cuda program uses
|
| 149 |
+
* another structure and another allocation method.
|
| 150 |
+
*
|
| 151 |
+
* @param[out] array MTGP internal status vector.
|
| 152 |
+
* @param[in] para parameter structure
|
| 153 |
+
* @param[in] seed a 32-bit integer used as the seed.
|
| 154 |
+
*/
|
| 155 |
+
static __forceinline__ __host__
|
| 156 |
+
void mtgp32_init_state(unsigned int state[],
|
| 157 |
+
const mtgp32_params_fast_t *para, unsigned int seed) {
|
| 158 |
+
int i;
|
| 159 |
+
int size = para->mexp / 32 + 1;
|
| 160 |
+
unsigned int hidden_seed;
|
| 161 |
+
unsigned int tmp;
|
| 162 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 163 |
+
tmp = hidden_seed;
|
| 164 |
+
tmp += tmp >> 16;
|
| 165 |
+
tmp += tmp >> 8;
|
| 166 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 167 |
+
state[0] = seed;
|
| 168 |
+
state[1] = hidden_seed;
|
| 169 |
+
for (i = 1; i < size; i++) {
|
| 170 |
+
state[i] ^= (1812433253) * (state[i - 1] ^ (state[i - 1] >> 30)) + i;
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
/*
|
| 175 |
+
* This function initializes the internal state array
|
| 176 |
+
* with a 32-bit integer array. \b para should be one of the elements in
|
| 177 |
+
* the parameter table (mtgp32-param-ref.c).
|
| 178 |
+
*
|
| 179 |
+
* @param[out] mtgp32 MTGP structure.
|
| 180 |
+
* @param[in] para parameter structure
|
| 181 |
+
* @param[in] array a 32-bit integer array used as a seed.
|
| 182 |
+
* @param[in] length length of the array.
|
| 183 |
+
* @return CURAND_STATUS_SUCCESS
|
| 184 |
+
*/
|
| 185 |
+
static __forceinline__ __host__
|
| 186 |
+
int mtgp32_init_by_array(unsigned int state[],
|
| 187 |
+
const mtgp32_params_fast_t *para,
|
| 188 |
+
unsigned int *array, int length) {
|
| 189 |
+
int i, j, count;
|
| 190 |
+
unsigned int r;
|
| 191 |
+
int lag;
|
| 192 |
+
int mid;
|
| 193 |
+
int size = para->mexp / 32 + 1;
|
| 194 |
+
unsigned int hidden_seed;
|
| 195 |
+
unsigned int tmp;
|
| 196 |
+
|
| 197 |
+
if (size >= 623) {
|
| 198 |
+
lag = 11;
|
| 199 |
+
} else if (size >= 68) {
|
| 200 |
+
lag = 7;
|
| 201 |
+
} else if (size >= 39) {
|
| 202 |
+
lag = 5;
|
| 203 |
+
} else {
|
| 204 |
+
lag = 3;
|
| 205 |
+
}
|
| 206 |
+
mid = (size - lag) / 2;
|
| 207 |
+
|
| 208 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 209 |
+
tmp = hidden_seed;
|
| 210 |
+
tmp += tmp >> 16;
|
| 211 |
+
tmp += tmp >> 8;
|
| 212 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 213 |
+
state[0] = hidden_seed;
|
| 214 |
+
|
| 215 |
+
if (length + 1 > size) {
|
| 216 |
+
count = length + 1;
|
| 217 |
+
} else {
|
| 218 |
+
count = size;
|
| 219 |
+
}
|
| 220 |
+
r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
|
| 221 |
+
state[mid] += r;
|
| 222 |
+
r += length;
|
| 223 |
+
state[(mid + lag) % size] += r;
|
| 224 |
+
state[0] = r;
|
| 225 |
+
i = 1;
|
| 226 |
+
count--;
|
| 227 |
+
for (i = 1, j = 0; (j < count) && (j < length); j++) {
|
| 228 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 229 |
+
^ state[(i + size - 1) % size]);
|
| 230 |
+
state[(i + mid) % size] += r;
|
| 231 |
+
r += array[j] + i;
|
| 232 |
+
state[(i + mid + lag) % size] += r;
|
| 233 |
+
state[i] = r;
|
| 234 |
+
i = (i + 1) % size;
|
| 235 |
+
}
|
| 236 |
+
for (; j < count; j++) {
|
| 237 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 238 |
+
^ state[(i + size - 1) % size]);
|
| 239 |
+
state[(i + mid) % size] += r;
|
| 240 |
+
r += i;
|
| 241 |
+
state[(i + mid + lag) % size] += r;
|
| 242 |
+
state[i] = r;
|
| 243 |
+
i = (i + 1) % size;
|
| 244 |
+
}
|
| 245 |
+
for (j = 0; j < size; j++) {
|
| 246 |
+
r = ini_func2(state[i] + state[(i + mid) % size]
|
| 247 |
+
+ state[(i + size - 1) % size]);
|
| 248 |
+
state[(i + mid) % size] ^= r;
|
| 249 |
+
r -= i;
|
| 250 |
+
state[(i + mid + lag) % size] ^= r;
|
| 251 |
+
state[i] = r;
|
| 252 |
+
i = (i + 1) % size;
|
| 253 |
+
}
|
| 254 |
+
if (state[size - 1] == 0) {
|
| 255 |
+
state[size - 1] = non_zero;
|
| 256 |
+
}
|
| 257 |
+
return 0;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
/*
|
| 261 |
+
* This function initializes the internal state array
|
| 262 |
+
* with a character array. \b para should be one of the elements in
|
| 263 |
+
* the parameter table (mtgp32-param-ref.c).
|
| 264 |
+
* This is the same algorithm with mtgp32_init_by_array(), but hope to
|
| 265 |
+
* be more useful.
|
| 266 |
+
*
|
| 267 |
+
* @param[out] mtgp32 MTGP structure.
|
| 268 |
+
* @param[in] para parameter structure
|
| 269 |
+
* @param[in] array a character array used as a seed. (terminated by zero.)
|
| 270 |
+
* @return memory allocation result. if 0 then O.K.
|
| 271 |
+
*/
|
| 272 |
+
static __forceinline__ __host__
|
| 273 |
+
int mtgp32_init_by_str(unsigned int state[],
|
| 274 |
+
const mtgp32_params_fast_t *para, unsigned char *array) {
|
| 275 |
+
int i, j, count;
|
| 276 |
+
unsigned int r;
|
| 277 |
+
int lag;
|
| 278 |
+
int mid;
|
| 279 |
+
int size = para->mexp / 32 + 1;
|
| 280 |
+
int length = (unsigned int)strlen((char *)array);
|
| 281 |
+
unsigned int hidden_seed;
|
| 282 |
+
unsigned int tmp;
|
| 283 |
+
|
| 284 |
+
if (size >= 623) {
|
| 285 |
+
lag = 11;
|
| 286 |
+
} else if (size >= 68) {
|
| 287 |
+
lag = 7;
|
| 288 |
+
} else if (size >= 39) {
|
| 289 |
+
lag = 5;
|
| 290 |
+
} else {
|
| 291 |
+
lag = 3;
|
| 292 |
+
}
|
| 293 |
+
mid = (size - lag) / 2;
|
| 294 |
+
|
| 295 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 296 |
+
tmp = hidden_seed;
|
| 297 |
+
tmp += tmp >> 16;
|
| 298 |
+
tmp += tmp >> 8;
|
| 299 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 300 |
+
state[0] = hidden_seed;
|
| 301 |
+
|
| 302 |
+
if (length + 1 > size) {
|
| 303 |
+
count = length + 1;
|
| 304 |
+
} else {
|
| 305 |
+
count = size;
|
| 306 |
+
}
|
| 307 |
+
r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
|
| 308 |
+
state[mid] += r;
|
| 309 |
+
r += length;
|
| 310 |
+
state[(mid + lag) % size] += r;
|
| 311 |
+
state[0] = r;
|
| 312 |
+
i = 1;
|
| 313 |
+
count--;
|
| 314 |
+
for (i = 1, j = 0; (j < count) && (j < length); j++) {
|
| 315 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 316 |
+
^ state[(i + size - 1) % size]);
|
| 317 |
+
state[(i + mid) % size] += r;
|
| 318 |
+
r += array[j] + i;
|
| 319 |
+
state[(i + mid + lag) % size] += r;
|
| 320 |
+
state[i] = r;
|
| 321 |
+
i = (i + 1) % size;
|
| 322 |
+
}
|
| 323 |
+
for (; j < count; j++) {
|
| 324 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 325 |
+
^ state[(i + size - 1) % size]);
|
| 326 |
+
state[(i + mid) % size] += r;
|
| 327 |
+
r += i;
|
| 328 |
+
state[(i + mid + lag) % size] += r;
|
| 329 |
+
state[i] = r;
|
| 330 |
+
i = (i + 1) % size;
|
| 331 |
+
}
|
| 332 |
+
for (j = 0; j < size; j++) {
|
| 333 |
+
r = ini_func2(state[i] + state[(i + mid) % size]
|
| 334 |
+
+ state[(i + size - 1) % size]);
|
| 335 |
+
state[(i + mid) % size] ^= r;
|
| 336 |
+
r -= i;
|
| 337 |
+
state[(i + mid + lag) % size] ^= r;
|
| 338 |
+
state[i] = r;
|
| 339 |
+
i = (i + 1) % size;
|
| 340 |
+
}
|
| 341 |
+
if (state[size - 1] == 0) {
|
| 342 |
+
state[size - 1] = non_zero;
|
| 343 |
+
}
|
| 344 |
+
return 0;
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
template<typename ParamsType>
|
| 348 |
+
static __forceinline__ __host__
|
| 349 |
+
curandStatus_t curandMakeMTGP32ConstantsImpl(const mtgp32_params_fast_t params[], ParamsType * p, const int block_num)
|
| 350 |
+
{
|
| 351 |
+
const int size1 = sizeof(unsigned int) * block_num;
|
| 352 |
+
const int size2 = sizeof(unsigned int) * block_num * TBL_SIZE;
|
| 353 |
+
unsigned int *h_pos_tbl;
|
| 354 |
+
unsigned int *h_sh1_tbl;
|
| 355 |
+
unsigned int *h_sh2_tbl;
|
| 356 |
+
unsigned int *h_param_tbl;
|
| 357 |
+
unsigned int *h_temper_tbl;
|
| 358 |
+
unsigned int *h_single_temper_tbl;
|
| 359 |
+
unsigned int *h_mask;
|
| 360 |
+
curandStatus_t status = CURAND_STATUS_SUCCESS;
|
| 361 |
+
|
| 362 |
+
h_pos_tbl = (unsigned int *)malloc(size1);
|
| 363 |
+
h_sh1_tbl = (unsigned int *)malloc(size1);
|
| 364 |
+
h_sh2_tbl = (unsigned int *)malloc(size1);
|
| 365 |
+
h_param_tbl = (unsigned int *)malloc(size2);
|
| 366 |
+
h_temper_tbl = (unsigned int *)malloc(size2);
|
| 367 |
+
h_single_temper_tbl = (unsigned int *)malloc(size2);
|
| 368 |
+
h_mask = (unsigned int *)malloc(sizeof(unsigned int));
|
| 369 |
+
if (h_pos_tbl == NULL
|
| 370 |
+
|| h_sh1_tbl == NULL
|
| 371 |
+
|| h_sh2_tbl == NULL
|
| 372 |
+
|| h_param_tbl == NULL
|
| 373 |
+
|| h_temper_tbl == NULL
|
| 374 |
+
|| h_single_temper_tbl == NULL
|
| 375 |
+
|| h_mask == NULL) {
|
| 376 |
+
if (h_pos_tbl != NULL) free(h_pos_tbl);
|
| 377 |
+
if (h_sh1_tbl != NULL) free(h_sh1_tbl);
|
| 378 |
+
if (h_sh2_tbl != NULL) free(h_sh2_tbl);
|
| 379 |
+
if (h_param_tbl != NULL) free(h_param_tbl);
|
| 380 |
+
if (h_temper_tbl != NULL) free(h_temper_tbl);
|
| 381 |
+
if (h_single_temper_tbl != NULL) free(h_single_temper_tbl);
|
| 382 |
+
if (h_mask != NULL) free(h_mask);
|
| 383 |
+
status = CURAND_STATUS_ALLOCATION_FAILED;
|
| 384 |
+
} else {
|
| 385 |
+
|
| 386 |
+
h_mask[0] = params[0].mask;
|
| 387 |
+
for (int i = 0; i < block_num; i++) {
|
| 388 |
+
h_pos_tbl[i] = params[i].pos;
|
| 389 |
+
h_sh1_tbl[i] = params[i].sh1;
|
| 390 |
+
h_sh2_tbl[i] = params[i].sh2;
|
| 391 |
+
for (int j = 0; j < TBL_SIZE; j++) {
|
| 392 |
+
h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
|
| 393 |
+
h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
|
| 394 |
+
h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
if (cudaMemcpy( p->pos_tbl,
|
| 398 |
+
h_pos_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 399 |
+
{
|
| 400 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 401 |
+
} else
|
| 402 |
+
if (cudaMemcpy( p->sh1_tbl,
|
| 403 |
+
h_sh1_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 404 |
+
{
|
| 405 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 406 |
+
} else
|
| 407 |
+
if (cudaMemcpy( p->sh2_tbl,
|
| 408 |
+
h_sh2_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 409 |
+
{
|
| 410 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 411 |
+
} else
|
| 412 |
+
if (cudaMemcpy( p->param_tbl,
|
| 413 |
+
h_param_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 414 |
+
{
|
| 415 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 416 |
+
} else
|
| 417 |
+
if (cudaMemcpy( p->temper_tbl,
|
| 418 |
+
h_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 419 |
+
{
|
| 420 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 421 |
+
} else
|
| 422 |
+
if (cudaMemcpy( p->single_temper_tbl,
|
| 423 |
+
h_single_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 424 |
+
{
|
| 425 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 426 |
+
} else
|
| 427 |
+
if (cudaMemcpy( p->mask,
|
| 428 |
+
h_mask, sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess)
|
| 429 |
+
{
|
| 430 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
if (h_pos_tbl != NULL) free(h_pos_tbl);
|
| 434 |
+
if (h_sh1_tbl != NULL) free(h_sh1_tbl);
|
| 435 |
+
if (h_sh2_tbl != NULL) free(h_sh2_tbl);
|
| 436 |
+
if (h_param_tbl != NULL) free(h_param_tbl);
|
| 437 |
+
if (h_temper_tbl != NULL) free(h_temper_tbl);
|
| 438 |
+
if (h_single_temper_tbl != NULL)free(h_single_temper_tbl);
|
| 439 |
+
if (h_mask != NULL) free(h_mask);
|
| 440 |
+
return status;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
/**
|
| 444 |
+
* \brief Set up constant parameters for the mtgp32 generator
|
| 445 |
+
*
|
| 446 |
+
* This host-side helper function re-organizes CURAND_NUM_MTGP32_PARAMS sets of
|
| 447 |
+
* generator parameters for use by kernel functions and copies the
|
| 448 |
+
* result to the specified location in device memory.
|
| 449 |
+
*
|
| 450 |
+
* \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
|
| 451 |
+
* \param p - pointer to a structure of type mtgp32_kernel_params_t in device memory.
|
| 452 |
+
*
|
| 453 |
+
* \return
|
| 454 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if host memory could not be allocated
|
| 455 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
|
| 456 |
+
* - CURAND_STATUS_SUCCESS otherwise
|
| 457 |
+
*/
|
| 458 |
+
static __forceinline__ __host__
|
| 459 |
+
curandStatus_t curandMakeMTGP32Constants(const mtgp32_params_fast_t params[], mtgp32_kernel_params_t * p)
|
| 460 |
+
{
|
| 461 |
+
return curandMakeMTGP32ConstantsImpl(params, p, CURAND_NUM_MTGP32_PARAMS);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
/**
|
| 465 |
+
* \brief Set up initial states for the mtgp32 generator
|
| 466 |
+
*
|
| 467 |
+
* This host-side helper function initializes a number of states (one parameter set per state) for
|
| 468 |
+
* an mtgp32 generator. To accomplish this it allocates a state array in host memory,
|
| 469 |
+
* initializes that array, and copies the result to device memory.
|
| 470 |
+
*
|
| 471 |
+
* \param s - pointer to an array of states in device memory
|
| 472 |
+
* \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
|
| 473 |
+
* \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory
|
| 474 |
+
* \param n - number of parameter sets/states to initialize
|
| 475 |
+
* \param seed - seed value
|
| 476 |
+
*
|
| 477 |
+
* \return
|
| 478 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if host memory state could not be allocated
|
| 479 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
|
| 480 |
+
* - CURAND_STATUS_SUCCESS otherwise
|
| 481 |
+
*/
|
| 482 |
+
static __forceinline__ __host__
|
| 483 |
+
curandStatus_t CURANDAPI curandMakeMTGP32KernelState(curandStateMtgp32_t *s,
|
| 484 |
+
mtgp32_params_fast_t params[],
|
| 485 |
+
mtgp32_kernel_params_t *k,
|
| 486 |
+
int n,
|
| 487 |
+
unsigned long long seed)
|
| 488 |
+
{
|
| 489 |
+
int i;
|
| 490 |
+
curandStatus_t status = CURAND_STATUS_SUCCESS;
|
| 491 |
+
curandStateMtgp32_t *h_status =(curandStateMtgp32_t *) malloc(sizeof(curandStateMtgp32_t) * n);
|
| 492 |
+
if (h_status == NULL) {
|
| 493 |
+
status = CURAND_STATUS_ALLOCATION_FAILED;
|
| 494 |
+
} else {
|
| 495 |
+
seed = seed ^ (seed >> 32);
|
| 496 |
+
for (i = 0; i < n; i++) {
|
| 497 |
+
mtgp32_init_state(&(h_status[i].s[0]), ¶ms[i],(unsigned int)seed + i + 1);
|
| 498 |
+
h_status[i].offset = 0;
|
| 499 |
+
h_status[i].pIdx = i;
|
| 500 |
+
h_status[i].k = k;
|
| 501 |
+
}
|
| 502 |
+
if (cudaMemcpy(s, h_status,
|
| 503 |
+
sizeof(curandStateMtgp32_t) * n,
|
| 504 |
+
cudaMemcpyHostToDevice) != cudaSuccess) {
|
| 505 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 506 |
+
}
|
| 507 |
+
}
|
| 508 |
+
free(h_status);
|
| 509 |
+
return status;
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
/** @} */
|
| 513 |
+
|
| 514 |
+
#endif
|
| 515 |
+
|
| 516 |
+
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/*
|
| 51 |
+
* curand_mtgp32_kernel.h
|
| 52 |
+
*
|
| 53 |
+
*
|
| 54 |
+
* MTGP32-11213
|
| 55 |
+
*
|
| 56 |
+
* Mersenne Twister RNG for the GPU
|
| 57 |
+
*
|
| 58 |
+
* The period of generated integers is 2<sup>11213</sup>-1.
|
| 59 |
+
*
|
| 60 |
+
* This code generates 32-bit unsigned integers, and
|
| 61 |
+
* single precision floating point numbers uniformly distributed
|
| 62 |
+
* in the range [1, 2). (float r; 1.0 <= r < 2.0)
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
/*
|
| 66 |
+
* Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 67 |
+
* University. All rights reserved.
|
| 68 |
+
* Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 69 |
+
* University and University of Tokyo. All rights reserved.
|
| 70 |
+
*
|
| 71 |
+
* Redistribution and use in source and binary forms, with or without
|
| 72 |
+
* modification, are permitted provided that the following conditions are
|
| 73 |
+
* met:
|
| 74 |
+
*
|
| 75 |
+
* * Redistributions of source code must retain the above copyright
|
| 76 |
+
* notice, this list of conditions and the following disclaimer.
|
| 77 |
+
* * Redistributions in binary form must reproduce the above
|
| 78 |
+
* copyright notice, this list of conditions and the following
|
| 79 |
+
* disclaimer in the documentation and/or other materials provided
|
| 80 |
+
* with the distribution.
|
| 81 |
+
* * Neither the name of the Hiroshima University nor the names of
|
| 82 |
+
* its contributors may be used to endorse or promote products
|
| 83 |
+
* derived from this software without specific prior written
|
| 84 |
+
* permission.
|
| 85 |
+
*
|
| 86 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 87 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 88 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 89 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 90 |
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 91 |
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 92 |
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 93 |
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 94 |
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 95 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 96 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 97 |
+
*/
|
| 98 |
+
#if !defined CURAND_MTGP32_KERNEL_H
|
| 99 |
+
#define CURAND_MTGP32_KERNEL_H
|
| 100 |
+
|
| 101 |
+
#if !defined(QUALIFIERS)
|
| 102 |
+
#define QUALIFIERS static __forceinline__ __device__
|
| 103 |
+
#endif
|
| 104 |
+
|
| 105 |
+
#ifndef __CUDACC_RTC__
|
| 106 |
+
#include <cuda_runtime.h>
|
| 107 |
+
#include <stdlib.h>
|
| 108 |
+
#include <memory.h>
|
| 109 |
+
#include <string.h>
|
| 110 |
+
#endif // ifndef __CUDACC_RTC__
|
| 111 |
+
#include <nv/target>
|
| 112 |
+
#include "curand.h"
|
| 113 |
+
#include "curand_mtgp32.h"
|
| 114 |
+
|
| 115 |
+
/**
|
| 116 |
+
* \addtogroup DEVICE Device API
|
| 117 |
+
*
|
| 118 |
+
* @{
|
| 119 |
+
*/
|
| 120 |
+
|
| 121 |
+
#ifndef __CUDA_ARCH__
|
| 122 |
+
// define blockDim and threadIdx for host compatibility call
|
| 123 |
+
extern const dim3 blockDim;
|
| 124 |
+
extern const uint3 threadIdx;
|
| 125 |
+
#endif
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
/*
|
| 129 |
+
* The function of the recursion formula calculation.
|
| 130 |
+
*
|
| 131 |
+
* @param[in] X1 the farthest part of state array.
|
| 132 |
+
* @param[in] X2 the second farthest part of state array.
|
| 133 |
+
* @param[in] Y a part of state array.
|
| 134 |
+
* @param[in] bid block id.
|
| 135 |
+
* @return output
|
| 136 |
+
*/
|
| 137 |
+
QUALIFIERS unsigned int para_rec(mtgp32_kernel_params_t * k,unsigned int X1, unsigned int X2, unsigned int Y, int bid) {
|
| 138 |
+
unsigned int X = (X1 & k->mask[0]) ^ X2;
|
| 139 |
+
unsigned int MAT;
|
| 140 |
+
|
| 141 |
+
X ^= X << k->sh1_tbl[bid];
|
| 142 |
+
Y = X ^ (Y >> k->sh2_tbl[bid]);
|
| 143 |
+
MAT = k->param_tbl[bid][Y & 0x0f];
|
| 144 |
+
return Y ^ MAT;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/*
|
| 148 |
+
* The tempering function.
|
| 149 |
+
*
|
| 150 |
+
* @param[in] V the output value should be tempered.
|
| 151 |
+
* @param[in] T the tempering helper value.
|
| 152 |
+
* @param[in] bid block id.
|
| 153 |
+
* @return the tempered value.
|
| 154 |
+
*/
|
| 155 |
+
QUALIFIERS unsigned int temper(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
|
| 156 |
+
unsigned int MAT;
|
| 157 |
+
|
| 158 |
+
T ^= T >> 16;
|
| 159 |
+
T ^= T >> 8;
|
| 160 |
+
MAT = k->temper_tbl[bid][T & 0x0f];
|
| 161 |
+
return V ^ MAT;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
/*
|
| 165 |
+
* The tempering and converting function.
|
| 166 |
+
* By using the preset table, converting to IEEE format
|
| 167 |
+
* and tempering are done simultaneously.
|
| 168 |
+
*
|
| 169 |
+
* @param[in] V the output value should be tempered.
|
| 170 |
+
* @param[in] T the tempering helper value.
|
| 171 |
+
* @param[in] bid block id.
|
| 172 |
+
* @return the tempered and converted value.
|
| 173 |
+
*/
|
| 174 |
+
QUALIFIERS unsigned int temper_single(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
|
| 175 |
+
unsigned int MAT;
|
| 176 |
+
unsigned int r;
|
| 177 |
+
|
| 178 |
+
T ^= T >> 16;
|
| 179 |
+
T ^= T >> 8;
|
| 180 |
+
MAT = k->single_temper_tbl[bid][T & 0x0f];
|
| 181 |
+
r = (V >> 9) ^ MAT;
|
| 182 |
+
return r;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/**
|
| 186 |
+
* \brief Return 32-bits of pseudorandomness from a mtgp32 generator.
|
| 187 |
+
*
|
| 188 |
+
* Return 32-bits of pseudorandomness from the mtgp32 generator in \p state,
|
| 189 |
+
* increment position of generator by the number of threads in the block.
|
| 190 |
+
* Note the number of threads in the block can not exceed 256.
|
| 191 |
+
*
|
| 192 |
+
* \param state - Pointer to state to update
|
| 193 |
+
*
|
| 194 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 195 |
+
*/
|
| 196 |
+
QUALIFIERS unsigned int curand(curandStateMtgp32_t *state)
|
| 197 |
+
{
|
| 198 |
+
unsigned int t;
|
| 199 |
+
unsigned int d;
|
| 200 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 201 |
+
unsigned int r;
|
| 202 |
+
unsigned int o;
|
| 203 |
+
|
| 204 |
+
d = blockDim.z * blockDim.y * blockDim.x;
|
| 205 |
+
//assert( d <= 256 );
|
| 206 |
+
t = (blockDim.z * blockDim.y * threadIdx.z) + (blockDim.x * threadIdx.y) + threadIdx.x;
|
| 207 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 208 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 209 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 210 |
+
state->pIdx);
|
| 211 |
+
|
| 212 |
+
state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
|
| 213 |
+
o = temper(state->k, r,
|
| 214 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 215 |
+
state->pIdx);
|
| 216 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 217 |
+
__syncthreads();
|
| 218 |
+
)
|
| 219 |
+
if (t == 0)
|
| 220 |
+
{
|
| 221 |
+
state->offset = (state->offset + d) & MTGP32_STATE_MASK;
|
| 222 |
+
}
|
| 223 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 224 |
+
__syncthreads();
|
| 225 |
+
)
|
| 226 |
+
return o;
|
| 227 |
+
|
| 228 |
+
}
|
| 229 |
+
/**
|
| 230 |
+
* \brief Return 32-bits of pseudorandomness from a specific position in a mtgp32 generator.
|
| 231 |
+
*
|
| 232 |
+
* Return 32-bits of pseudorandomness from position \p index of the mtgp32 generator in \p state,
|
| 233 |
+
* increment position of generator by \p n positions, which must be the total number of positions
|
| 234 |
+
* upddated in the state by the thread block, for this invocation.
|
| 235 |
+
*
|
| 236 |
+
* Note :
|
| 237 |
+
* Thread indices must range from 0...\ n - 1.
|
| 238 |
+
* The number of positions updated may not exceed 256.
|
| 239 |
+
* A thread block may update more than one state, but a given state may not be updated by more than one thread block.
|
| 240 |
+
*
|
| 241 |
+
* \param state - Pointer to state to update
|
| 242 |
+
* \param index - Index (0..255) of the position within the state to draw from and update
|
| 243 |
+
* \param n - The total number of postions in this state that are being updated by this invocation
|
| 244 |
+
*
|
| 245 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 246 |
+
*/
|
| 247 |
+
QUALIFIERS unsigned int curand_mtgp32_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
|
| 248 |
+
{
|
| 249 |
+
unsigned int t;
|
| 250 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 251 |
+
unsigned int r;
|
| 252 |
+
unsigned int o;
|
| 253 |
+
|
| 254 |
+
t = index;
|
| 255 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 256 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 257 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 258 |
+
state->pIdx);
|
| 259 |
+
|
| 260 |
+
state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
|
| 261 |
+
o = temper(state->k, r,
|
| 262 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 263 |
+
state->pIdx);
|
| 264 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 265 |
+
__syncthreads();
|
| 266 |
+
)
|
| 267 |
+
if (index == 0)
|
| 268 |
+
{
|
| 269 |
+
state->offset = (state->offset + n) & MTGP32_STATE_MASK;
|
| 270 |
+
}
|
| 271 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 272 |
+
__syncthreads();
|
| 273 |
+
)
|
| 274 |
+
return o;
|
| 275 |
+
}
|
| 276 |
+
/**
|
| 277 |
+
* \brief Return a uniformly distributed float from a mtgp32 generator.
|
| 278 |
+
*
|
| 279 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 280 |
+
* from the mtgp32 generator in \p state, increment position of generator.
|
| 281 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 282 |
+
* point outputs are never returned.
|
| 283 |
+
*
|
| 284 |
+
* Note: This alternate derivation of a uniform float is provided for completeness
|
| 285 |
+
* with the original source
|
| 286 |
+
*
|
| 287 |
+
* \param state - Pointer to state to update
|
| 288 |
+
*
|
| 289 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 290 |
+
*/
|
| 291 |
+
QUALIFIERS float curand_mtgp32_single(curandStateMtgp32_t *state)
|
| 292 |
+
{
|
| 293 |
+
unsigned int t;
|
| 294 |
+
unsigned int d;
|
| 295 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 296 |
+
unsigned int r;
|
| 297 |
+
unsigned int o_u;
|
| 298 |
+
float o_f;
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
t = blockDim.z * blockDim.y;
|
| 302 |
+
d = t * blockDim.x;
|
| 303 |
+
//assert( d <= 256 );
|
| 304 |
+
t += threadIdx.x;
|
| 305 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 306 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 307 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 308 |
+
state->pIdx);
|
| 309 |
+
|
| 310 |
+
state->s[t] = r;
|
| 311 |
+
o_u = temper_single(state->k, r,
|
| 312 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 313 |
+
state->pIdx);
|
| 314 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 315 |
+
__syncthreads();
|
| 316 |
+
)
|
| 317 |
+
if (threadIdx.x == 0)
|
| 318 |
+
{
|
| 319 |
+
state->offset = (state->offset + d) & MTGP32_STATE_MASK;
|
| 320 |
+
}
|
| 321 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 322 |
+
__syncthreads();
|
| 323 |
+
)
|
| 324 |
+
memcpy(&o_f, &o_u, sizeof(o_u));
|
| 325 |
+
return o_f;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
/**
|
| 329 |
+
* \brief Return a uniformly distributed float from a specific position in a mtgp32 generator.
|
| 330 |
+
*
|
| 331 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 332 |
+
* from position \p index of the mtgp32 generator in \p state, and
|
| 333 |
+
* increment position of generator by \p n positions, which must be the total number of positions
|
| 334 |
+
* upddated in the state by the thread block, for this invocation.
|
| 335 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 336 |
+
* point outputs are never returned.
|
| 337 |
+
*
|
| 338 |
+
* Note 1:
|
| 339 |
+
* Thread indices must range from 0...\p n - 1.
|
| 340 |
+
* The number of positions updated may not exceed 256.
|
| 341 |
+
* A thread block may update more than one state, but a given state may not be updated by more than one thread block.
|
| 342 |
+
*
|
| 343 |
+
* Note 2: This alternate derivation of a uniform float is provided for completeness
|
| 344 |
+
* with the original source
|
| 345 |
+
*
|
| 346 |
+
* \param state - Pointer to state to update
|
| 347 |
+
* \param index - Index (0..255) of the position within the state to draw from and update
|
| 348 |
+
* \param n - The total number of postions in this state that are being updated by this invocation
|
| 349 |
+
*
|
| 350 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 351 |
+
*/
|
| 352 |
+
QUALIFIERS float curand_mtgp32_single_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
|
| 353 |
+
{
|
| 354 |
+
unsigned int t;
|
| 355 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 356 |
+
unsigned int r;
|
| 357 |
+
unsigned int o_u;
|
| 358 |
+
float o_f;
|
| 359 |
+
|
| 360 |
+
t = index;
|
| 361 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 362 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 363 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 364 |
+
state->pIdx);
|
| 365 |
+
|
| 366 |
+
state->s[t] = r;
|
| 367 |
+
o_u = temper_single(state->k, r,
|
| 368 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 369 |
+
state->pIdx);
|
| 370 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 371 |
+
__syncthreads();
|
| 372 |
+
)
|
| 373 |
+
if (threadIdx.x == 0)
|
| 374 |
+
{
|
| 375 |
+
state->offset = (state->offset + n) & MTGP32_STATE_MASK;
|
| 376 |
+
}
|
| 377 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 378 |
+
__syncthreads();
|
| 379 |
+
)
|
| 380 |
+
memcpy(&o_f, &o_u, sizeof(o_u));
|
| 381 |
+
return o_f;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
/** @} */
|
| 385 |
+
|
| 386 |
+
#endif
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_normal.h
ADDED
|
@@ -0,0 +1,840 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_NORMAL_H_)
|
| 52 |
+
#define CURAND_NORMAL_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
#include <nv/target>
|
| 64 |
+
|
| 65 |
+
#include "curand_mrg32k3a.h"
|
| 66 |
+
#include "curand_mtgp32_kernel.h"
|
| 67 |
+
#include "curand_philox4x32_x.h"
|
| 68 |
+
#include "curand_normal_static.h"
|
| 69 |
+
|
| 70 |
+
QUALIFIERS float2 _curand_box_muller(unsigned int x, unsigned int y)
|
| 71 |
+
{
|
| 72 |
+
float2 result;
|
| 73 |
+
float u = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2);
|
| 74 |
+
float v = y * CURAND_2POW32_INV_2PI + (CURAND_2POW32_INV_2PI/2);
|
| 75 |
+
float s;
|
| 76 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 77 |
+
s = sqrtf(-2.0f * logf(u));
|
| 78 |
+
__sincosf(v, &result.x, &result.y);
|
| 79 |
+
,
|
| 80 |
+
s = sqrtf(-2.0f * logf(u));
|
| 81 |
+
result.x = sinf(v);
|
| 82 |
+
result.y = cosf(v);
|
| 83 |
+
)
|
| 84 |
+
result.x *= s;
|
| 85 |
+
result.y *= s;
|
| 86 |
+
return result;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
QUALIFIERS float2 curand_box_muller_mrg(curandStateMRG32k3a_t * state)
|
| 90 |
+
{
|
| 91 |
+
float x, y;
|
| 92 |
+
x = curand_uniform(state);
|
| 93 |
+
y = curand_uniform(state) * CURAND_2PI;
|
| 94 |
+
float2 result;
|
| 95 |
+
float s;
|
| 96 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 97 |
+
s = sqrtf(-2.0f * logf(x));
|
| 98 |
+
__sincosf(y, &result.x, &result.y);
|
| 99 |
+
,
|
| 100 |
+
s = sqrtf(-2.0f * logf(x));
|
| 101 |
+
result.x = sinf(y);
|
| 102 |
+
result.y = cosf(y);
|
| 103 |
+
)
|
| 104 |
+
result.x *= s;
|
| 105 |
+
result.y *= s;
|
| 106 |
+
return result;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
QUALIFIERS double2
|
| 110 |
+
_curand_box_muller_double(unsigned int x0, unsigned int x1,
|
| 111 |
+
unsigned int y0, unsigned int y1)
|
| 112 |
+
{
|
| 113 |
+
double2 result;
|
| 114 |
+
unsigned long long zx = (unsigned long long)x0 ^
|
| 115 |
+
((unsigned long long)x1 << (53 - 32));
|
| 116 |
+
double u = zx * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 117 |
+
unsigned long long zy = (unsigned long long)y0 ^
|
| 118 |
+
((unsigned long long)y1 << (53 - 32));
|
| 119 |
+
double v = zy * (CURAND_2POW53_INV_DOUBLE*2.0) + CURAND_2POW53_INV_DOUBLE;
|
| 120 |
+
double s = sqrt(-2.0 * log(u));
|
| 121 |
+
|
| 122 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 123 |
+
sincospi(v, &result.x, &result.y);
|
| 124 |
+
,
|
| 125 |
+
result.x = sin(v*CURAND_PI_DOUBLE);
|
| 126 |
+
result.y = cos(v*CURAND_PI_DOUBLE);
|
| 127 |
+
)
|
| 128 |
+
result.x *= s;
|
| 129 |
+
result.y *= s;
|
| 130 |
+
|
| 131 |
+
return result;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
QUALIFIERS double2
|
| 135 |
+
curand_box_muller_mrg_double(curandStateMRG32k3a_t * state)
|
| 136 |
+
{
|
| 137 |
+
double x, y;
|
| 138 |
+
double2 result;
|
| 139 |
+
x = curand_uniform_double(state);
|
| 140 |
+
y = curand_uniform_double(state) * 2.0;
|
| 141 |
+
|
| 142 |
+
double s = sqrt(-2.0 * log(x));
|
| 143 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 144 |
+
sincospi(y, &result.x, &result.y);
|
| 145 |
+
,
|
| 146 |
+
result.x = sin(y*CURAND_PI_DOUBLE);
|
| 147 |
+
result.y = cos(y*CURAND_PI_DOUBLE);
|
| 148 |
+
)
|
| 149 |
+
result.x *= s;
|
| 150 |
+
result.y *= s;
|
| 151 |
+
return result;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
template <typename R>
|
| 155 |
+
QUALIFIERS float2 curand_box_muller(R *state)
|
| 156 |
+
{
|
| 157 |
+
float2 result;
|
| 158 |
+
unsigned int x = curand(state);
|
| 159 |
+
unsigned int y = curand(state);
|
| 160 |
+
result = _curand_box_muller(x, y);
|
| 161 |
+
return result;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template <typename R>
|
| 165 |
+
QUALIFIERS float4 curand_box_muller4(R *state)
|
| 166 |
+
{
|
| 167 |
+
float4 result;
|
| 168 |
+
float2 _result;
|
| 169 |
+
uint4 x = curand4(state);
|
| 170 |
+
//unsigned int y = curand(state);
|
| 171 |
+
_result = _curand_box_muller(x.x, x.y);
|
| 172 |
+
result.x = _result.x;
|
| 173 |
+
result.y = _result.y;
|
| 174 |
+
_result = _curand_box_muller(x.z, x.w);
|
| 175 |
+
result.z = _result.x;
|
| 176 |
+
result.w = _result.y;
|
| 177 |
+
return result;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
template <typename R>
|
| 181 |
+
QUALIFIERS double2 curand_box_muller_double(R *state)
|
| 182 |
+
{
|
| 183 |
+
double2 result;
|
| 184 |
+
unsigned int x0 = curand(state);
|
| 185 |
+
unsigned int x1 = curand(state);
|
| 186 |
+
unsigned int y0 = curand(state);
|
| 187 |
+
unsigned int y1 = curand(state);
|
| 188 |
+
result = _curand_box_muller_double(x0, x1, y0, y1);
|
| 189 |
+
return result;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
template <typename R>
|
| 193 |
+
QUALIFIERS double2 curand_box_muller2_double(R *state)
|
| 194 |
+
{
|
| 195 |
+
double2 result;
|
| 196 |
+
uint4 _x;
|
| 197 |
+
_x = curand4(state);
|
| 198 |
+
result = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 199 |
+
return result;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
template <typename R>
|
| 204 |
+
QUALIFIERS double4 curand_box_muller4_double(R *state)
|
| 205 |
+
{
|
| 206 |
+
double4 result;
|
| 207 |
+
double2 _res1;
|
| 208 |
+
double2 _res2;
|
| 209 |
+
uint4 _x;
|
| 210 |
+
uint4 _y;
|
| 211 |
+
_x = curand4(state);
|
| 212 |
+
_y = curand4(state);
|
| 213 |
+
_res1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 214 |
+
_res2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
|
| 215 |
+
result.x = _res1.x;
|
| 216 |
+
result.y = _res1.y;
|
| 217 |
+
result.z = _res2.x;
|
| 218 |
+
result.w = _res2.y;
|
| 219 |
+
return result;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
//QUALIFIERS float _curand_normal_icdf(unsigned int x)
|
| 223 |
+
//{
|
| 224 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 225 |
+
// float s = CURAND_SQRT2;
|
| 226 |
+
// // Mirror to avoid loss of precision
|
| 227 |
+
// if(x > 0x80000000UL) {
|
| 228 |
+
// x = 0xffffffffUL - x;
|
| 229 |
+
// s = -s;
|
| 230 |
+
// }
|
| 231 |
+
// float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 232 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 233 |
+
// return s * erfcinvf(2.0f * p);
|
| 234 |
+
//#else
|
| 235 |
+
// x++; //suppress warnings
|
| 236 |
+
// return 0.0f;
|
| 237 |
+
//#endif
|
| 238 |
+
//}
|
| 239 |
+
//
|
| 240 |
+
//QUALIFIERS float _curand_normal_icdf(unsigned long long x)
|
| 241 |
+
//{
|
| 242 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 243 |
+
// unsigned int t = (unsigned int)(x >> 32);
|
| 244 |
+
// float s = CURAND_SQRT2;
|
| 245 |
+
// // Mirror to avoid loss of precision
|
| 246 |
+
// if(t > 0x80000000UL) {
|
| 247 |
+
// t = 0xffffffffUL - t;
|
| 248 |
+
// s = -s;
|
| 249 |
+
// }
|
| 250 |
+
// float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 251 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 252 |
+
// return s * erfcinvf(2.0f * p);
|
| 253 |
+
//#else
|
| 254 |
+
// x++;
|
| 255 |
+
// return 0.0f;
|
| 256 |
+
//#endif
|
| 257 |
+
//}
|
| 258 |
+
//
|
| 259 |
+
//QUALIFIERS double _curand_normal_icdf_double(unsigned int x)
|
| 260 |
+
//{
|
| 261 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 262 |
+
// double s = CURAND_SQRT2_DOUBLE;
|
| 263 |
+
// // Mirror to avoid loss of precision
|
| 264 |
+
// if(x > 0x80000000UL) {
|
| 265 |
+
// x = 0xffffffffUL - x;
|
| 266 |
+
// s = -s;
|
| 267 |
+
// }
|
| 268 |
+
// double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
|
| 269 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 270 |
+
// return s * erfcinv(2.0 * p);
|
| 271 |
+
//#else
|
| 272 |
+
// x++;
|
| 273 |
+
// return 0.0;
|
| 274 |
+
//#endif
|
| 275 |
+
//}
|
| 276 |
+
//
|
| 277 |
+
//QUALIFIERS double _curand_normal_icdf_double(unsigned long long x)
|
| 278 |
+
//{
|
| 279 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 280 |
+
// double s = CURAND_SQRT2_DOUBLE;
|
| 281 |
+
// x >>= 11;
|
| 282 |
+
// // Mirror to avoid loss of precision
|
| 283 |
+
// if(x > 0x10000000000000UL) {
|
| 284 |
+
// x = 0x1fffffffffffffUL - x;
|
| 285 |
+
// s = -s;
|
| 286 |
+
// }
|
| 287 |
+
// double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 288 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 289 |
+
// return s * erfcinv(2.0 * p);
|
| 290 |
+
//#else
|
| 291 |
+
// x++;
|
| 292 |
+
// return 0.0;
|
| 293 |
+
//#endif
|
| 294 |
+
//}
|
| 295 |
+
//
|
| 296 |
+
|
| 297 |
+
/**
|
| 298 |
+
* \brief Return a normally distributed float from an XORWOW generator.
|
| 299 |
+
*
|
| 300 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 301 |
+
* standard deviation \p 1.0f from the XORWOW generator in \p state,
|
| 302 |
+
* increment position of generator by one.
|
| 303 |
+
*
|
| 304 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 305 |
+
* normally distributed results, then returns them one at a time.
|
| 306 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 307 |
+
* both results at once.
|
| 308 |
+
*
|
| 309 |
+
* \param state - Pointer to state to update
|
| 310 |
+
*
|
| 311 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 312 |
+
*/
|
| 313 |
+
QUALIFIERS float curand_normal(curandStateXORWOW_t *state)
|
| 314 |
+
{
|
| 315 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 316 |
+
unsigned int x, y;
|
| 317 |
+
x = curand(state);
|
| 318 |
+
y = curand(state);
|
| 319 |
+
float2 v = _curand_box_muller(x, y);
|
| 320 |
+
state->boxmuller_extra = v.y;
|
| 321 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 322 |
+
return v.x;
|
| 323 |
+
}
|
| 324 |
+
state->boxmuller_flag = 0;
|
| 325 |
+
return state->boxmuller_extra;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
/**
|
| 329 |
+
* \brief Return a normally distributed float from an Philox4_32_10 generator.
|
| 330 |
+
*
|
| 331 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 332 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 333 |
+
* increment position of generator by one.
|
| 334 |
+
*
|
| 335 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 336 |
+
* normally distributed results, then returns them one at a time.
|
| 337 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 338 |
+
* both results at once.
|
| 339 |
+
*
|
| 340 |
+
* \param state - Pointer to state to update
|
| 341 |
+
*
|
| 342 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 343 |
+
*/
|
| 344 |
+
|
| 345 |
+
QUALIFIERS float curand_normal(curandStatePhilox4_32_10_t *state)
|
| 346 |
+
{
|
| 347 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 348 |
+
unsigned int x, y;
|
| 349 |
+
x = curand(state);
|
| 350 |
+
y = curand(state);
|
| 351 |
+
float2 v = _curand_box_muller(x, y);
|
| 352 |
+
state->boxmuller_extra = v.y;
|
| 353 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 354 |
+
return v.x;
|
| 355 |
+
}
|
| 356 |
+
state->boxmuller_flag = 0;
|
| 357 |
+
return state->boxmuller_extra;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
/**
|
| 363 |
+
* \brief Return a normally distributed float from an MRG32k3a generator.
|
| 364 |
+
*
|
| 365 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 366 |
+
* standard deviation \p 1.0f from the MRG32k3a generator in \p state,
|
| 367 |
+
* increment position of generator by one.
|
| 368 |
+
*
|
| 369 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 370 |
+
* normally distributed results, then returns them one at a time.
|
| 371 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 372 |
+
* both results at once.
|
| 373 |
+
*
|
| 374 |
+
* \param state - Pointer to state to update
|
| 375 |
+
*
|
| 376 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 377 |
+
*/
|
| 378 |
+
QUALIFIERS float curand_normal(curandStateMRG32k3a_t *state)
|
| 379 |
+
{
|
| 380 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 381 |
+
float2 v = curand_box_muller_mrg(state);
|
| 382 |
+
state->boxmuller_extra = v.y;
|
| 383 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 384 |
+
return v.x;
|
| 385 |
+
}
|
| 386 |
+
state->boxmuller_flag = 0;
|
| 387 |
+
return state->boxmuller_extra;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/**
|
| 391 |
+
* \brief Return two normally distributed floats from an XORWOW generator.
|
| 392 |
+
*
|
| 393 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 394 |
+
* standard deviation \p 1.0f from the XORWOW generator in \p state,
|
| 395 |
+
* increment position of generator by two.
|
| 396 |
+
*
|
| 397 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 398 |
+
* normally distributed results.
|
| 399 |
+
*
|
| 400 |
+
* \param state - Pointer to state to update
|
| 401 |
+
*
|
| 402 |
+
* \return Normally distributed float2 where each element is from a
|
| 403 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 404 |
+
*/
|
| 405 |
+
QUALIFIERS float2 curand_normal2(curandStateXORWOW_t *state)
|
| 406 |
+
{
|
| 407 |
+
return curand_box_muller(state);
|
| 408 |
+
}
|
| 409 |
+
/**
|
| 410 |
+
* \brief Return two normally distributed floats from an Philox4_32_10 generator.
|
| 411 |
+
*
|
| 412 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 413 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 414 |
+
* increment position of generator by two.
|
| 415 |
+
*
|
| 416 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 417 |
+
* normally distributed results.
|
| 418 |
+
*
|
| 419 |
+
* \param state - Pointer to state to update
|
| 420 |
+
*
|
| 421 |
+
* \return Normally distributed float2 where each element is from a
|
| 422 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 423 |
+
*/
|
| 424 |
+
QUALIFIERS float2 curand_normal2(curandStatePhilox4_32_10_t *state)
|
| 425 |
+
{
|
| 426 |
+
return curand_box_muller(state);
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
/**
|
| 430 |
+
* \brief Return four normally distributed floats from an Philox4_32_10 generator.
|
| 431 |
+
*
|
| 432 |
+
* Return four normally distributed floats with mean \p 0.0f and
|
| 433 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 434 |
+
* increment position of generator by four.
|
| 435 |
+
*
|
| 436 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 437 |
+
* normally distributed results.
|
| 438 |
+
*
|
| 439 |
+
* \param state - Pointer to state to update
|
| 440 |
+
*
|
| 441 |
+
* \return Normally distributed float2 where each element is from a
|
| 442 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 443 |
+
*/
|
| 444 |
+
QUALIFIERS float4 curand_normal4(curandStatePhilox4_32_10_t *state)
|
| 445 |
+
{
|
| 446 |
+
return curand_box_muller4(state);
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
/**
|
| 452 |
+
* \brief Return two normally distributed floats from an MRG32k3a generator.
|
| 453 |
+
*
|
| 454 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 455 |
+
* standard deviation \p 1.0f from the MRG32k3a generator in \p state,
|
| 456 |
+
* increment position of generator by two.
|
| 457 |
+
*
|
| 458 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 459 |
+
* normally distributed results.
|
| 460 |
+
*
|
| 461 |
+
* \param state - Pointer to state to update
|
| 462 |
+
*
|
| 463 |
+
* \return Normally distributed float2 where each element is from a
|
| 464 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 465 |
+
*/
|
| 466 |
+
QUALIFIERS float2 curand_normal2(curandStateMRG32k3a_t *state)
|
| 467 |
+
{
|
| 468 |
+
return curand_box_muller_mrg(state);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
/**
|
| 472 |
+
* \brief Return a normally distributed float from a MTGP32 generator.
|
| 473 |
+
*
|
| 474 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 475 |
+
* standard deviation \p 1.0f from the MTGP32 generator in \p state,
|
| 476 |
+
* increment position of generator.
|
| 477 |
+
*
|
| 478 |
+
* The implementation uses the inverse cumulative distribution function
|
| 479 |
+
* to generate normally distributed results.
|
| 480 |
+
*
|
| 481 |
+
* \param state - Pointer to state to update
|
| 482 |
+
*
|
| 483 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 484 |
+
*/
|
| 485 |
+
QUALIFIERS float curand_normal(curandStateMtgp32_t *state)
|
| 486 |
+
{
|
| 487 |
+
return _curand_normal_icdf(curand(state));
|
| 488 |
+
}
|
| 489 |
+
/**
|
| 490 |
+
* \brief Return a normally distributed float from a Sobol32 generator.
|
| 491 |
+
*
|
| 492 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 493 |
+
* standard deviation \p 1.0f from the Sobol32 generator in \p state,
|
| 494 |
+
* increment position of generator by one.
|
| 495 |
+
*
|
| 496 |
+
* The implementation uses the inverse cumulative distribution function
|
| 497 |
+
* to generate normally distributed results.
|
| 498 |
+
*
|
| 499 |
+
* \param state - Pointer to state to update
|
| 500 |
+
*
|
| 501 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 502 |
+
*/
|
| 503 |
+
QUALIFIERS float curand_normal(curandStateSobol32_t *state)
|
| 504 |
+
{
|
| 505 |
+
return _curand_normal_icdf(curand(state));
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
/**
|
| 509 |
+
* \brief Return a normally distributed float from a scrambled Sobol32 generator.
|
| 510 |
+
*
|
| 511 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 512 |
+
* standard deviation \p 1.0f from the scrambled Sobol32 generator in \p state,
|
| 513 |
+
* increment position of generator by one.
|
| 514 |
+
*
|
| 515 |
+
* The implementation uses the inverse cumulative distribution function
|
| 516 |
+
* to generate normally distributed results.
|
| 517 |
+
*
|
| 518 |
+
* \param state - Pointer to state to update
|
| 519 |
+
*
|
| 520 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 521 |
+
*/
|
| 522 |
+
QUALIFIERS float curand_normal(curandStateScrambledSobol32_t *state)
|
| 523 |
+
{
|
| 524 |
+
return _curand_normal_icdf(curand(state));
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
/**
|
| 528 |
+
* \brief Return a normally distributed float from a Sobol64 generator.
|
| 529 |
+
*
|
| 530 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 531 |
+
* standard deviation \p 1.0f from the Sobol64 generator in \p state,
|
| 532 |
+
* increment position of generator by one.
|
| 533 |
+
*
|
| 534 |
+
* The implementation uses the inverse cumulative distribution function
|
| 535 |
+
* to generate normally distributed results.
|
| 536 |
+
*
|
| 537 |
+
* \param state - Pointer to state to update
|
| 538 |
+
*
|
| 539 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 540 |
+
*/
|
| 541 |
+
QUALIFIERS float curand_normal(curandStateSobol64_t *state)
|
| 542 |
+
{
|
| 543 |
+
return _curand_normal_icdf(curand(state));
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
/**
|
| 547 |
+
* \brief Return a normally distributed float from a scrambled Sobol64 generator.
|
| 548 |
+
*
|
| 549 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 550 |
+
* standard deviation \p 1.0f from the scrambled Sobol64 generator in \p state,
|
| 551 |
+
* increment position of generator by one.
|
| 552 |
+
*
|
| 553 |
+
* The implementation uses the inverse cumulative distribution function
|
| 554 |
+
* to generate normally distributed results.
|
| 555 |
+
*
|
| 556 |
+
* \param state - Pointer to state to update
|
| 557 |
+
*
|
| 558 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 559 |
+
*/
|
| 560 |
+
QUALIFIERS float curand_normal(curandStateScrambledSobol64_t *state)
|
| 561 |
+
{
|
| 562 |
+
return _curand_normal_icdf(curand(state));
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
/**
|
| 566 |
+
* \brief Return a normally distributed double from an XORWOW generator.
|
| 567 |
+
*
|
| 568 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 569 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 570 |
+
* increment position of generator.
|
| 571 |
+
*
|
| 572 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 573 |
+
* normally distributed results, then returns them one at a time.
|
| 574 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 575 |
+
* both results at once.
|
| 576 |
+
*
|
| 577 |
+
* \param state - Pointer to state to update
|
| 578 |
+
*
|
| 579 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 580 |
+
*/
|
| 581 |
+
QUALIFIERS double curand_normal_double(curandStateXORWOW_t *state)
|
| 582 |
+
{
|
| 583 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 584 |
+
unsigned int x0, x1, y0, y1;
|
| 585 |
+
x0 = curand(state);
|
| 586 |
+
x1 = curand(state);
|
| 587 |
+
y0 = curand(state);
|
| 588 |
+
y1 = curand(state);
|
| 589 |
+
double2 v = _curand_box_muller_double(x0, x1, y0, y1);
|
| 590 |
+
state->boxmuller_extra_double = v.y;
|
| 591 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 592 |
+
return v.x;
|
| 593 |
+
}
|
| 594 |
+
state->boxmuller_flag_double = 0;
|
| 595 |
+
return state->boxmuller_extra_double;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
/**
|
| 599 |
+
* \brief Return a normally distributed double from an Philox4_32_10 generator.
|
| 600 |
+
*
|
| 601 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 602 |
+
* standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
|
| 603 |
+
* increment position of generator.
|
| 604 |
+
*
|
| 605 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 606 |
+
* normally distributed results, then returns them one at a time.
|
| 607 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 608 |
+
* both results at once.
|
| 609 |
+
*
|
| 610 |
+
* \param state - Pointer to state to update
|
| 611 |
+
*
|
| 612 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 613 |
+
*/
|
| 614 |
+
|
| 615 |
+
QUALIFIERS double curand_normal_double(curandStatePhilox4_32_10_t *state)
|
| 616 |
+
{
|
| 617 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 618 |
+
uint4 _x;
|
| 619 |
+
_x = curand4(state);
|
| 620 |
+
double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 621 |
+
state->boxmuller_extra_double = v.y;
|
| 622 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 623 |
+
return v.x;
|
| 624 |
+
}
|
| 625 |
+
state->boxmuller_flag_double = 0;
|
| 626 |
+
return state->boxmuller_extra_double;
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
/**
|
| 631 |
+
* \brief Return a normally distributed double from an MRG32k3a generator.
|
| 632 |
+
*
|
| 633 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 634 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 635 |
+
* increment position of generator.
|
| 636 |
+
*
|
| 637 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 638 |
+
* normally distributed results, then returns them one at a time.
|
| 639 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 640 |
+
* both results at once.
|
| 641 |
+
*
|
| 642 |
+
* \param state - Pointer to state to update
|
| 643 |
+
*
|
| 644 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 645 |
+
*/
|
| 646 |
+
QUALIFIERS double curand_normal_double(curandStateMRG32k3a_t *state)
|
| 647 |
+
{
|
| 648 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 649 |
+
double2 v = curand_box_muller_mrg_double(state);
|
| 650 |
+
state->boxmuller_extra_double = v.y;
|
| 651 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 652 |
+
return v.x;
|
| 653 |
+
}
|
| 654 |
+
state->boxmuller_flag_double = 0;
|
| 655 |
+
return state->boxmuller_extra_double;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
/**
|
| 659 |
+
* \brief Return two normally distributed doubles from an XORWOW generator.
|
| 660 |
+
*
|
| 661 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 662 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 663 |
+
* increment position of generator by 2.
|
| 664 |
+
*
|
| 665 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 666 |
+
* normally distributed results.
|
| 667 |
+
*
|
| 668 |
+
* \param state - Pointer to state to update
|
| 669 |
+
*
|
| 670 |
+
* \return Normally distributed double2 where each element is from a
|
| 671 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 672 |
+
*/
|
| 673 |
+
QUALIFIERS double2 curand_normal2_double(curandStateXORWOW_t *state)
|
| 674 |
+
{
|
| 675 |
+
return curand_box_muller_double(state);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
/**
|
| 679 |
+
* \brief Return two normally distributed doubles from an Philox4_32_10 generator.
|
| 680 |
+
*
|
| 681 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 682 |
+
* standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
|
| 683 |
+
* increment position of generator by 2.
|
| 684 |
+
*
|
| 685 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 686 |
+
* normally distributed results.
|
| 687 |
+
*
|
| 688 |
+
* \param state - Pointer to state to update
|
| 689 |
+
*
|
| 690 |
+
* \return Normally distributed double2 where each element is from a
|
| 691 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 692 |
+
*/
|
| 693 |
+
QUALIFIERS double2 curand_normal2_double(curandStatePhilox4_32_10_t *state)
|
| 694 |
+
{
|
| 695 |
+
uint4 _x;
|
| 696 |
+
double2 result;
|
| 697 |
+
|
| 698 |
+
_x = curand4(state);
|
| 699 |
+
double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 700 |
+
result.x = v1.x;
|
| 701 |
+
result.y = v1.y;
|
| 702 |
+
|
| 703 |
+
return result;
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
// not a part of API
|
| 707 |
+
QUALIFIERS double4 curand_normal4_double(curandStatePhilox4_32_10_t *state)
|
| 708 |
+
{
|
| 709 |
+
uint4 _x;
|
| 710 |
+
uint4 _y;
|
| 711 |
+
double4 result;
|
| 712 |
+
|
| 713 |
+
_x = curand4(state);
|
| 714 |
+
_y = curand4(state);
|
| 715 |
+
double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 716 |
+
double2 v2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
|
| 717 |
+
result.x = v1.x;
|
| 718 |
+
result.y = v1.y;
|
| 719 |
+
result.z = v2.x;
|
| 720 |
+
result.w = v2.y;
|
| 721 |
+
|
| 722 |
+
return result;
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
/**
|
| 727 |
+
* \brief Return two normally distributed doubles from an MRG32k3a generator.
|
| 728 |
+
*
|
| 729 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 730 |
+
* standard deviation \p 1.0 from the MRG32k3a generator in \p state,
|
| 731 |
+
* increment position of generator.
|
| 732 |
+
*
|
| 733 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 734 |
+
* normally distributed results.
|
| 735 |
+
*
|
| 736 |
+
* \param state - Pointer to state to update
|
| 737 |
+
*
|
| 738 |
+
* \return Normally distributed double2 where each element is from a
|
| 739 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 740 |
+
*/
|
| 741 |
+
QUALIFIERS double2 curand_normal2_double(curandStateMRG32k3a_t *state)
|
| 742 |
+
{
|
| 743 |
+
return curand_box_muller_mrg_double(state);
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
/**
|
| 747 |
+
* \brief Return a normally distributed double from an MTGP32 generator.
|
| 748 |
+
*
|
| 749 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 750 |
+
* standard deviation \p 1.0 from the MTGP32 generator in \p state,
|
| 751 |
+
* increment position of generator.
|
| 752 |
+
*
|
| 753 |
+
* The implementation uses the inverse cumulative distribution function
|
| 754 |
+
* to generate normally distributed results.
|
| 755 |
+
*
|
| 756 |
+
* \param state - Pointer to state to update
|
| 757 |
+
*
|
| 758 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 759 |
+
*/
|
| 760 |
+
QUALIFIERS double curand_normal_double(curandStateMtgp32_t *state)
|
| 761 |
+
{
|
| 762 |
+
return _curand_normal_icdf_double(curand(state));
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
/**
|
| 766 |
+
* \brief Return a normally distributed double from an Sobol32 generator.
|
| 767 |
+
*
|
| 768 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 769 |
+
* standard deviation \p 1.0 from the Sobol32 generator in \p state,
|
| 770 |
+
* increment position of generator by one.
|
| 771 |
+
*
|
| 772 |
+
* The implementation uses the inverse cumulative distribution function
|
| 773 |
+
* to generate normally distributed results.
|
| 774 |
+
*
|
| 775 |
+
* \param state - Pointer to state to update
|
| 776 |
+
*
|
| 777 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 778 |
+
*/
|
| 779 |
+
QUALIFIERS double curand_normal_double(curandStateSobol32_t *state)
|
| 780 |
+
{
|
| 781 |
+
return _curand_normal_icdf_double(curand(state));
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
/**
|
| 785 |
+
* \brief Return a normally distributed double from a scrambled Sobol32 generator.
|
| 786 |
+
*
|
| 787 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 788 |
+
* standard deviation \p 1.0 from the scrambled Sobol32 generator in \p state,
|
| 789 |
+
* increment position of generator by one.
|
| 790 |
+
*
|
| 791 |
+
* The implementation uses the inverse cumulative distribution function
|
| 792 |
+
* to generate normally distributed results.
|
| 793 |
+
*
|
| 794 |
+
* \param state - Pointer to state to update
|
| 795 |
+
*
|
| 796 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 797 |
+
*/
|
| 798 |
+
QUALIFIERS double curand_normal_double(curandStateScrambledSobol32_t *state)
|
| 799 |
+
{
|
| 800 |
+
return _curand_normal_icdf_double(curand(state));
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
/**
|
| 804 |
+
* \brief Return a normally distributed double from a Sobol64 generator.
|
| 805 |
+
*
|
| 806 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 807 |
+
* standard deviation \p 1.0 from the Sobol64 generator in \p state,
|
| 808 |
+
* increment position of generator by one.
|
| 809 |
+
*
|
| 810 |
+
* The implementation uses the inverse cumulative distribution function
|
| 811 |
+
* to generate normally distributed results.
|
| 812 |
+
*
|
| 813 |
+
* \param state - Pointer to state to update
|
| 814 |
+
*
|
| 815 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 816 |
+
*/
|
| 817 |
+
QUALIFIERS double curand_normal_double(curandStateSobol64_t *state)
|
| 818 |
+
{
|
| 819 |
+
return _curand_normal_icdf_double(curand(state));
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
/**
|
| 823 |
+
* \brief Return a normally distributed double from a scrambled Sobol64 generator.
|
| 824 |
+
*
|
| 825 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 826 |
+
* standard deviation \p 1.0 from the scrambled Sobol64 generator in \p state,
|
| 827 |
+
* increment position of generator by one.
|
| 828 |
+
*
|
| 829 |
+
* The implementation uses the inverse cumulative distribution function
|
| 830 |
+
* to generate normally distributed results.
|
| 831 |
+
*
|
| 832 |
+
* \param state - Pointer to state to update
|
| 833 |
+
*
|
| 834 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 835 |
+
*/
|
| 836 |
+
QUALIFIERS double curand_normal_double(curandStateScrambledSobol64_t *state)
|
| 837 |
+
{
|
| 838 |
+
return _curand_normal_icdf_double(curand(state));
|
| 839 |
+
}
|
| 840 |
+
#endif // !defined(CURAND_NORMAL_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_normal_static.h
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
#ifndef CURAND_NORMAL_STATIC_H
|
| 49 |
+
#define CURAND_NORMAL_STATIC_H
|
| 50 |
+
|
| 51 |
+
#define QUALIFIERS_STATIC __host__ __device__ __forceinline__
|
| 52 |
+
|
| 53 |
+
#include <nv/target>
|
| 54 |
+
#if defined(HOST_HAVE_ERFCINVF)
|
| 55 |
+
#define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) _NV_BLOCK_EXPAND(t)
|
| 56 |
+
#else
|
| 57 |
+
#define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, t, f)
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
QUALIFIERS_STATIC float _curand_normal_icdf(unsigned int x)
|
| 61 |
+
{
|
| 62 |
+
IF_DEVICE_OR_HAVE_ERFCINVF(
|
| 63 |
+
float s = CURAND_SQRT2;
|
| 64 |
+
// Mirror to avoid loss of precision
|
| 65 |
+
if(x > 0x80000000UL) {
|
| 66 |
+
x = 0xffffffffUL - x;
|
| 67 |
+
s = -s;
|
| 68 |
+
}
|
| 69 |
+
float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 70 |
+
// p is in (0, 0.5], 2p is in (0, 1]
|
| 71 |
+
return s * erfcinvf(2.0f * p);
|
| 72 |
+
,
|
| 73 |
+
x++; //suppress warnings
|
| 74 |
+
return 0.0f;
|
| 75 |
+
)
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
QUALIFIERS_STATIC float _curand_normal_icdf(unsigned long long x)
|
| 79 |
+
{
|
| 80 |
+
IF_DEVICE_OR_HAVE_ERFCINVF(
|
| 81 |
+
unsigned int t = (unsigned int)(x >> 32);
|
| 82 |
+
float s = CURAND_SQRT2;
|
| 83 |
+
// Mirror to avoid loss of precision
|
| 84 |
+
if(t > 0x80000000UL) {
|
| 85 |
+
t = 0xffffffffUL - t;
|
| 86 |
+
s = -s;
|
| 87 |
+
}
|
| 88 |
+
float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 89 |
+
// p is in (0 - 0.5] 2p is in (0 - 1]
|
| 90 |
+
return s * erfcinvf(2.0f * p);
|
| 91 |
+
,
|
| 92 |
+
x++;
|
| 93 |
+
return 0.0f;
|
| 94 |
+
)
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned int x)
|
| 98 |
+
{
|
| 99 |
+
IF_DEVICE_OR_HAVE_ERFCINVF(
|
| 100 |
+
double s = CURAND_SQRT2_DOUBLE;
|
| 101 |
+
// Mirror to avoid loss of precision
|
| 102 |
+
if(x > 0x80000000UL) {
|
| 103 |
+
x = 0xffffffffUL - x;
|
| 104 |
+
s = -s;
|
| 105 |
+
}
|
| 106 |
+
double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
|
| 107 |
+
// p is in (0 - 0.5] 2p is in (0 - 1]
|
| 108 |
+
return s * erfcinv(2.0 * p);
|
| 109 |
+
,
|
| 110 |
+
x++;
|
| 111 |
+
return 0.0;
|
| 112 |
+
)
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned long long x)
|
| 116 |
+
{
|
| 117 |
+
IF_DEVICE_OR_HAVE_ERFCINVF(
|
| 118 |
+
double s = CURAND_SQRT2_DOUBLE;
|
| 119 |
+
x >>= 11;
|
| 120 |
+
// Mirror to avoid loss of precision
|
| 121 |
+
if(x > 0x10000000000000UL) {
|
| 122 |
+
x = 0x1fffffffffffffUL - x;
|
| 123 |
+
s = -s;
|
| 124 |
+
}
|
| 125 |
+
double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 126 |
+
// p is in (0 - 0.5] 2p is in (0 - 1]
|
| 127 |
+
return s * erfcinv(2.0 * p);
|
| 128 |
+
,
|
| 129 |
+
x++;
|
| 130 |
+
return 0.0;
|
| 131 |
+
)
|
| 132 |
+
}
|
| 133 |
+
#undef QUALIFIERS_STATIC
|
| 134 |
+
#endif
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_philox4x32_x.h
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
/*
|
| 49 |
+
Copyright 2010-2011, D. E. Shaw Research.
|
| 50 |
+
All rights reserved.
|
| 51 |
+
|
| 52 |
+
Redistribution and use in source and binary forms, with or without
|
| 53 |
+
modification, are permitted provided that the following conditions are
|
| 54 |
+
met:
|
| 55 |
+
|
| 56 |
+
* Redistributions of source code must retain the above copyright
|
| 57 |
+
notice, this list of conditions, and the following disclaimer.
|
| 58 |
+
|
| 59 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 60 |
+
notice, this list of conditions, and the following disclaimer in the
|
| 61 |
+
documentation and/or other materials provided with the distribution.
|
| 62 |
+
|
| 63 |
+
* Neither the name of D. E. Shaw Research nor the names of its
|
| 64 |
+
contributors may be used to endorse or promote products derived from
|
| 65 |
+
this software without specific prior written permission.
|
| 66 |
+
|
| 67 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 68 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 69 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 70 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 71 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 72 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 73 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 74 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 75 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 76 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 77 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 78 |
+
*/
|
| 79 |
+
|
| 80 |
+
#ifndef CURAND_PHILOX4X32_X__H_
|
| 81 |
+
#define CURAND_PHILOX4X32_X__H_
|
| 82 |
+
#include <nv/target>
|
| 83 |
+
|
| 84 |
+
#if !defined(QUALIFIERS)
|
| 85 |
+
#define QUALIFIERS static __forceinline__ __device__
|
| 86 |
+
#endif
|
| 87 |
+
|
| 88 |
+
#define PHILOX_W32_0 (0x9E3779B9)
|
| 89 |
+
#define PHILOX_W32_1 (0xBB67AE85)
|
| 90 |
+
#define PHILOX_M4x32_0 (0xD2511F53)
|
| 91 |
+
#define PHILOX_M4x32_1 (0xCD9E8D57)
|
| 92 |
+
|
| 93 |
+
struct curandStatePhilox4_32_10 {
|
| 94 |
+
uint4 ctr;
|
| 95 |
+
uint4 output;
|
| 96 |
+
uint2 key;
|
| 97 |
+
unsigned int STATE;
|
| 98 |
+
int boxmuller_flag;
|
| 99 |
+
int boxmuller_flag_double;
|
| 100 |
+
float boxmuller_extra;
|
| 101 |
+
double boxmuller_extra_double;
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
typedef struct curandStatePhilox4_32_10 curandStatePhilox4_32_10_t;
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
QUALIFIERS void Philox_State_Incr(curandStatePhilox4_32_10_t* s, unsigned long long n)
|
| 108 |
+
{
|
| 109 |
+
unsigned int nlo = (unsigned int)(n);
|
| 110 |
+
unsigned int nhi = (unsigned int)(n>>32);
|
| 111 |
+
|
| 112 |
+
s->ctr.x += nlo;
|
| 113 |
+
if( s->ctr.x < nlo )
|
| 114 |
+
nhi++;
|
| 115 |
+
|
| 116 |
+
s->ctr.y += nhi;
|
| 117 |
+
if(nhi <= s->ctr.y)
|
| 118 |
+
return;
|
| 119 |
+
if(++s->ctr.z) return;
|
| 120 |
+
++s->ctr.w;
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
QUALIFIERS void Philox_State_Incr_hi(curandStatePhilox4_32_10_t* s, unsigned long long n)
|
| 124 |
+
{
|
| 125 |
+
unsigned int nlo = (unsigned int)(n);
|
| 126 |
+
unsigned int nhi = (unsigned int)(n>>32);
|
| 127 |
+
|
| 128 |
+
s->ctr.z += nlo;
|
| 129 |
+
if( s->ctr.z < nlo )
|
| 130 |
+
nhi++;
|
| 131 |
+
|
| 132 |
+
s->ctr.w += nhi;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
QUALIFIERS void Philox_State_Incr(curandStatePhilox4_32_10_t* s)
|
| 138 |
+
{
|
| 139 |
+
if(++s->ctr.x) return;
|
| 140 |
+
if(++s->ctr.y) return;
|
| 141 |
+
if(++s->ctr.z) return;
|
| 142 |
+
++s->ctr.w;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
QUALIFIERS unsigned int mulhilo32(unsigned int a, unsigned int b, unsigned int* hip)
|
| 147 |
+
{
|
| 148 |
+
NV_IF_ELSE_TARGET(NV_IS_HOST,
|
| 149 |
+
// host code
|
| 150 |
+
unsigned long long product = ((unsigned long long)a) * ((unsigned long long)b);
|
| 151 |
+
*hip = product >> 32;
|
| 152 |
+
return (unsigned int)product;
|
| 153 |
+
,
|
| 154 |
+
// device code
|
| 155 |
+
*hip = __umulhi(a,b);
|
| 156 |
+
return a*b;
|
| 157 |
+
)
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
QUALIFIERS uint4 _philox4x32round(uint4 ctr, uint2 key)
|
| 161 |
+
{
|
| 162 |
+
unsigned int hi0;
|
| 163 |
+
unsigned int hi1;
|
| 164 |
+
unsigned int lo0 = mulhilo32(PHILOX_M4x32_0, ctr.x, &hi0);
|
| 165 |
+
unsigned int lo1 = mulhilo32(PHILOX_M4x32_1, ctr.z, &hi1);
|
| 166 |
+
|
| 167 |
+
uint4 ret = {hi1^ctr.y^key.x, lo1, hi0^ctr.w^key.y, lo0};
|
| 168 |
+
return ret;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
QUALIFIERS uint4 curand_Philox4x32_10( uint4 c, uint2 k)
|
| 172 |
+
{
|
| 173 |
+
c = _philox4x32round(c, k); // 1
|
| 174 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 175 |
+
c = _philox4x32round(c, k); // 2
|
| 176 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 177 |
+
c = _philox4x32round(c, k); // 3
|
| 178 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 179 |
+
c = _philox4x32round(c, k); // 4
|
| 180 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 181 |
+
c = _philox4x32round(c, k); // 5
|
| 182 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 183 |
+
c = _philox4x32round(c, k); // 6
|
| 184 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 185 |
+
c = _philox4x32round(c, k); // 7
|
| 186 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 187 |
+
c = _philox4x32round(c, k); // 8
|
| 188 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 189 |
+
c = _philox4x32round(c, k); // 9
|
| 190 |
+
k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
|
| 191 |
+
return _philox4x32round(c, k); // 10
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
#endif
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_poisson.h
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_POISSON_H_)
|
| 52 |
+
#define CURAND_POISSON_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include <nv/target>
|
| 65 |
+
|
| 66 |
+
#include "curand_mrg32k3a.h"
|
| 67 |
+
#include "curand_mtgp32_kernel.h"
|
| 68 |
+
#include "curand_philox4x32_x.h"
|
| 69 |
+
|
| 70 |
+
#define CR_CUDART_PI 3.1415926535897931e+0
|
| 71 |
+
#define CR_CUDART_TWO_TO_52 4503599627370496.0
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
QUALIFIERS float __cr_rsqrt(float a)
|
| 75 |
+
{
|
| 76 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 77 |
+
asm ("rsqrt.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 78 |
+
,
|
| 79 |
+
a = 1.0f / sqrtf (a);
|
| 80 |
+
)
|
| 81 |
+
return a;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
QUALIFIERS float __cr_exp (float a)
|
| 85 |
+
{
|
| 86 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 87 |
+
a = a * 1.4426950408889634074;
|
| 88 |
+
asm ("ex2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 89 |
+
,
|
| 90 |
+
a = expf (a);
|
| 91 |
+
)
|
| 92 |
+
return a;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
QUALIFIERS float __cr_log (float a)
|
| 96 |
+
{
|
| 97 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 98 |
+
asm ("lg2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 99 |
+
a = a * 0.69314718055994530942;
|
| 100 |
+
,
|
| 101 |
+
a = logf (a);
|
| 102 |
+
)
|
| 103 |
+
return a;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
QUALIFIERS float __cr_rcp (float a)
|
| 107 |
+
{
|
| 108 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 109 |
+
asm ("rcp.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 110 |
+
,
|
| 111 |
+
a = 1.0f / a;
|
| 112 |
+
)
|
| 113 |
+
return a;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
/* Computes regularized gamma function: gammainc(a,x)/gamma(a) */
|
| 117 |
+
QUALIFIERS float __cr_pgammainc (float a, float x)
|
| 118 |
+
{
|
| 119 |
+
float t, alpha, beta;
|
| 120 |
+
|
| 121 |
+
/* First level parametrization constants */
|
| 122 |
+
float ma1 = 1.43248035075540910f,
|
| 123 |
+
ma2 = 0.12400979329415655f,
|
| 124 |
+
ma3 = 0.00025361074907033f,
|
| 125 |
+
mb1 = 0.21096734870196546f,
|
| 126 |
+
mb2 = 1.97381164089999420f,
|
| 127 |
+
mb3 = 0.94201734077887530f;
|
| 128 |
+
|
| 129 |
+
/* Second level parametrization constants (depends only on a) */
|
| 130 |
+
|
| 131 |
+
alpha = __cr_rsqrt (a - ma2);
|
| 132 |
+
alpha = ma1 * alpha + ma3;
|
| 133 |
+
beta = __cr_rsqrt (a - mb2);
|
| 134 |
+
beta = mb1 * beta + mb3;
|
| 135 |
+
|
| 136 |
+
/* Final approximation (depends on a and x) */
|
| 137 |
+
|
| 138 |
+
t = a - x;
|
| 139 |
+
t = alpha * t - beta;
|
| 140 |
+
t = 1.0f + __cr_exp (t);
|
| 141 |
+
t = t * t;
|
| 142 |
+
t = __cr_rcp (t);
|
| 143 |
+
|
| 144 |
+
/* Negative a,x or a,x=NAN requires special handling */
|
| 145 |
+
//t = !(x > 0 && a >= 0) ? 0.0 : t;
|
| 146 |
+
|
| 147 |
+
return t;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
/* Computes inverse of pgammainc */
|
| 151 |
+
QUALIFIERS float __cr_pgammaincinv (float a, float y)
|
| 152 |
+
{
|
| 153 |
+
float t, alpha, beta;
|
| 154 |
+
|
| 155 |
+
/* First level parametrization constants */
|
| 156 |
+
|
| 157 |
+
float ma1 = 1.43248035075540910f,
|
| 158 |
+
ma2 = 0.12400979329415655f,
|
| 159 |
+
ma3 = 0.00025361074907033f,
|
| 160 |
+
mb1 = 0.21096734870196546f,
|
| 161 |
+
mb2 = 1.97381164089999420f,
|
| 162 |
+
mb3 = 0.94201734077887530f;
|
| 163 |
+
|
| 164 |
+
/* Second level parametrization constants (depends only on a) */
|
| 165 |
+
|
| 166 |
+
alpha = __cr_rsqrt (a - ma2);
|
| 167 |
+
alpha = ma1 * alpha + ma3;
|
| 168 |
+
beta = __cr_rsqrt (a - mb2);
|
| 169 |
+
beta = mb1 * beta + mb3;
|
| 170 |
+
|
| 171 |
+
/* Final approximation (depends on a and y) */
|
| 172 |
+
|
| 173 |
+
t = __cr_rsqrt (y) - 1.0f;
|
| 174 |
+
t = __cr_log (t);
|
| 175 |
+
t = beta + t;
|
| 176 |
+
t = - t * __cr_rcp (alpha) + a;
|
| 177 |
+
/* Negative a,x or a,x=NAN requires special handling */
|
| 178 |
+
//t = !(y > 0 && a >= 0) ? 0.0 : t;
|
| 179 |
+
return t;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
#if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables)
|
| 183 |
+
inline __constant__ double __cr_lgamma_table [] = {
|
| 184 |
+
#else
|
| 185 |
+
static __constant__ double __cr_lgamma_table [] = {
|
| 186 |
+
#endif
|
| 187 |
+
0.000000000000000000e-1,
|
| 188 |
+
0.000000000000000000e-1,
|
| 189 |
+
6.931471805599453094e-1,
|
| 190 |
+
1.791759469228055001e0,
|
| 191 |
+
3.178053830347945620e0,
|
| 192 |
+
4.787491742782045994e0,
|
| 193 |
+
6.579251212010100995e0,
|
| 194 |
+
8.525161361065414300e0,
|
| 195 |
+
1.060460290274525023e1
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
QUALIFIERS double __cr_lgamma_integer(int a)
|
| 200 |
+
{
|
| 201 |
+
double s;
|
| 202 |
+
double t;
|
| 203 |
+
double fa = fabs((float)a);
|
| 204 |
+
double sum;
|
| 205 |
+
|
| 206 |
+
if (a > 8) {
|
| 207 |
+
/* Stirling approximation; coefficients from Hart et al, "Computer
|
| 208 |
+
* Approximations", Wiley 1968. Approximation 5404.
|
| 209 |
+
*/
|
| 210 |
+
s = 1.0 / fa;
|
| 211 |
+
t = s * s;
|
| 212 |
+
sum = -0.1633436431e-2;
|
| 213 |
+
sum = sum * t + 0.83645878922e-3;
|
| 214 |
+
sum = sum * t - 0.5951896861197e-3;
|
| 215 |
+
sum = sum * t + 0.793650576493454e-3;
|
| 216 |
+
sum = sum * t - 0.277777777735865004e-2;
|
| 217 |
+
sum = sum * t + 0.833333333333331018375e-1;
|
| 218 |
+
sum = sum * s + 0.918938533204672;
|
| 219 |
+
s = 0.5 * log (fa);
|
| 220 |
+
t = fa - 0.5;
|
| 221 |
+
s = s * t;
|
| 222 |
+
t = s - fa;
|
| 223 |
+
s = s + sum;
|
| 224 |
+
t = t + s;
|
| 225 |
+
return t;
|
| 226 |
+
} else {
|
| 227 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 228 |
+
return __cr_lgamma_table [(int) fa-1];
|
| 229 |
+
,
|
| 230 |
+
switch(a) {
|
| 231 |
+
case 1: return 0.000000000000000000e-1;
|
| 232 |
+
case 2: return 0.000000000000000000e-1;
|
| 233 |
+
case 3: return 6.931471805599453094e-1;
|
| 234 |
+
case 4: return 1.791759469228055001e0;
|
| 235 |
+
case 5: return 3.178053830347945620e0;
|
| 236 |
+
case 6: return 4.787491742782045994e0;
|
| 237 |
+
case 7: return 6.579251212010100995e0;
|
| 238 |
+
case 8: return 8.525161361065414300e0;
|
| 239 |
+
default: return 1.060460290274525023e1;
|
| 240 |
+
}
|
| 241 |
+
)
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
#define KNUTH_FLOAT_CONST 60.0
|
| 246 |
+
template <typename T>
|
| 247 |
+
// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
|
| 248 |
+
QUALIFIERS unsigned int curand_poisson_knuth(T *state, float lambda)
|
| 249 |
+
{
|
| 250 |
+
unsigned int k = 0;
|
| 251 |
+
float p = expf(lambda);
|
| 252 |
+
do{
|
| 253 |
+
k++;
|
| 254 |
+
p *= curand_uniform(state);
|
| 255 |
+
}while (p > 1.0);
|
| 256 |
+
return k-1;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <typename T>
|
| 260 |
+
// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
|
| 261 |
+
QUALIFIERS uint4 curand_poisson_knuth4(T *state, float lambda)
|
| 262 |
+
{
|
| 263 |
+
uint4 k = {0,0,0,0};
|
| 264 |
+
float exp_lambda = expf(lambda);
|
| 265 |
+
float4 p={ exp_lambda,exp_lambda,exp_lambda,exp_lambda };
|
| 266 |
+
do{
|
| 267 |
+
k.x++;
|
| 268 |
+
p.x *= curand_uniform(state);
|
| 269 |
+
}while (p.x > 1.0);
|
| 270 |
+
do{
|
| 271 |
+
k.y++;
|
| 272 |
+
p.y *= curand_uniform(state);
|
| 273 |
+
}while (p.y > 1.0);
|
| 274 |
+
do{
|
| 275 |
+
k.z++;
|
| 276 |
+
p.z *= curand_uniform(state);
|
| 277 |
+
}while (p.z > 1.0);
|
| 278 |
+
do{
|
| 279 |
+
k.w++;
|
| 280 |
+
p.w *= curand_uniform(state);
|
| 281 |
+
}while (p.w > 1.0);
|
| 282 |
+
|
| 283 |
+
k.x--;
|
| 284 |
+
k.y--;
|
| 285 |
+
k.z--;
|
| 286 |
+
k.w--;
|
| 287 |
+
return k;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
template <typename T>
|
| 291 |
+
// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
|
| 292 |
+
QUALIFIERS unsigned int _curand_M2_double(T x, curandDistributionM2Shift_t distributionM2)
|
| 293 |
+
{
|
| 294 |
+
double u = _curand_uniform_double(x);
|
| 295 |
+
int j = (int) floor(distributionM2->length*u);
|
| 296 |
+
|
| 297 |
+
double histogramVj;
|
| 298 |
+
unsigned int histogramKj;
|
| 299 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 300 |
+
histogramVj = __ldg( &(distributionM2->histogram->V[j]));
|
| 301 |
+
histogramKj = __ldg( &(distributionM2->histogram->K[j]));
|
| 302 |
+
,
|
| 303 |
+
histogramVj = distributionM2->histogram->V[j];
|
| 304 |
+
histogramKj = distributionM2->histogram->K[j];
|
| 305 |
+
)
|
| 306 |
+
//if (u < distributionM2->histogram->V[j]) return distributionM2->shift + j;
|
| 307 |
+
//return distributionM2->shift + distributionM2->histogram->K[j];
|
| 308 |
+
if (u < histogramVj) return distributionM2->shift + j;
|
| 309 |
+
return distributionM2->shift + histogramKj;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
template <typename T>
|
| 313 |
+
// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
|
| 314 |
+
QUALIFIERS uint4 _curand_M2_double4(T x, curandDistributionM2Shift_t distributionM2)
|
| 315 |
+
{
|
| 316 |
+
double4 u;
|
| 317 |
+
uint4 result = {0,0,0,0};
|
| 318 |
+
int4 flag = {1,1,1,1};
|
| 319 |
+
|
| 320 |
+
u.x = _curand_uniform_double(x.x);
|
| 321 |
+
u.y = _curand_uniform_double(x.y);
|
| 322 |
+
u.z = _curand_uniform_double(x.z);
|
| 323 |
+
u.w = _curand_uniform_double(x.w);
|
| 324 |
+
|
| 325 |
+
int4 j;
|
| 326 |
+
j.x = (int) floor(distributionM2->length*u.x);
|
| 327 |
+
j.y = (int) floor(distributionM2->length*u.y);
|
| 328 |
+
j.z = (int) floor(distributionM2->length*u.z);
|
| 329 |
+
j.w = (int) floor(distributionM2->length*u.w);
|
| 330 |
+
// int result;
|
| 331 |
+
|
| 332 |
+
double histogramVjx;
|
| 333 |
+
double histogramVjy;
|
| 334 |
+
double histogramVjz;
|
| 335 |
+
double histogramVjw;
|
| 336 |
+
unsigned int histogramKjx;
|
| 337 |
+
unsigned int histogramKjy;
|
| 338 |
+
unsigned int histogramKjz;
|
| 339 |
+
unsigned int histogramKjw;
|
| 340 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 341 |
+
histogramVjx = __ldg( &(distributionM2->histogram->V[j.x]));
|
| 342 |
+
histogramVjy = __ldg( &(distributionM2->histogram->V[j.y]));
|
| 343 |
+
histogramVjz = __ldg( &(distributionM2->histogram->V[j.z]));
|
| 344 |
+
histogramVjw = __ldg( &(distributionM2->histogram->V[j.w]));
|
| 345 |
+
|
| 346 |
+
histogramKjx = __ldg( &(distributionM2->histogram->K[j.x]));
|
| 347 |
+
histogramKjy = __ldg( &(distributionM2->histogram->K[j.y]));
|
| 348 |
+
histogramKjz = __ldg( &(distributionM2->histogram->K[j.z]));
|
| 349 |
+
histogramKjw = __ldg( &(distributionM2->histogram->K[j.w]));
|
| 350 |
+
,
|
| 351 |
+
histogramVjx = distributionM2->histogram->V[j.x];
|
| 352 |
+
histogramVjy = distributionM2->histogram->V[j.y];
|
| 353 |
+
histogramVjz = distributionM2->histogram->V[j.z];
|
| 354 |
+
histogramVjw = distributionM2->histogram->V[j.w];
|
| 355 |
+
|
| 356 |
+
histogramKjx = distributionM2->histogram->K[j.x];
|
| 357 |
+
histogramKjy = distributionM2->histogram->K[j.y];
|
| 358 |
+
histogramKjz = distributionM2->histogram->K[j.z];
|
| 359 |
+
histogramKjw = distributionM2->histogram->K[j.w];
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
if (u.x < histogramVjx){ result.x = distributionM2->shift + j.x; flag.x = 0; }
|
| 363 |
+
if (u.y < histogramVjy){ result.y = distributionM2->shift + j.y; flag.y = 0; }
|
| 364 |
+
if (u.z < histogramVjz){ result.z = distributionM2->shift + j.z; flag.z = 0; }
|
| 365 |
+
if (u.w < histogramVjw){ result.w = distributionM2->shift + j.w; flag.w = 0; }
|
| 366 |
+
//return distributionM2->shift + distributionM2->histogram->K[j];
|
| 367 |
+
|
| 368 |
+
if(flag.x) result.x = distributionM2->shift + histogramKjx;
|
| 369 |
+
if(flag.y) result.y = distributionM2->shift + histogramKjy;
|
| 370 |
+
if(flag.z) result.z = distributionM2->shift + histogramKjz;
|
| 371 |
+
if(flag.w) result.w = distributionM2->shift + histogramKjw;
|
| 372 |
+
|
| 373 |
+
return result;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
template <typename STATE>
|
| 377 |
+
QUALIFIERS unsigned int curand_M2_double(STATE *state, curandDistributionM2Shift_t distributionM2)
|
| 378 |
+
{
|
| 379 |
+
return _curand_M2_double(curand(state), distributionM2);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
template <typename STATE>
|
| 383 |
+
QUALIFIERS uint4 curand_M2_double4(STATE *state, curandDistributionM2Shift_t distributionM2)
|
| 384 |
+
{
|
| 385 |
+
return _curand_M2_double4(curand4(state), distributionM2);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
template <typename T>
|
| 390 |
+
QUALIFIERS unsigned int _curand_binary_search_double(T x, curandDistributionShift_t distribution)
|
| 391 |
+
{
|
| 392 |
+
double u = _curand_uniform_double(x);
|
| 393 |
+
int min = 0;
|
| 394 |
+
int max = distribution->length-1;
|
| 395 |
+
do{
|
| 396 |
+
int mid = (max + min)/2;
|
| 397 |
+
double probability_mid;
|
| 398 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 399 |
+
probability_mid = __ldg( &(distribution->probability[mid]));
|
| 400 |
+
,
|
| 401 |
+
probability_mid = distribution->probability[mid];
|
| 402 |
+
)
|
| 403 |
+
if (u <= probability_mid){
|
| 404 |
+
max = mid;
|
| 405 |
+
}else{
|
| 406 |
+
min = mid+1;
|
| 407 |
+
}
|
| 408 |
+
}while (min < max);
|
| 409 |
+
return distribution->shift + min;
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
template <typename STATE>
|
| 413 |
+
QUALIFIERS unsigned int curand_binary_search_double(STATE *state, curandDistributionShift_t distribution)
|
| 414 |
+
{
|
| 415 |
+
return _curand_binary_search_double(curand(state), distribution);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
// Generates uniformly distributed double values in range (0.0; 1.0) from uniformly distributed
|
| 419 |
+
// unsigned int. We can't use standard _curand_uniform_double since it can generate 1.0.
|
| 420 |
+
// This is required only for _curand_poisson_ITR_double.
|
| 421 |
+
QUALIFIERS double _curand_uniform_double_excluding_one(unsigned int x)
|
| 422 |
+
{
|
| 423 |
+
return x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
// Overload for unsigned long long.
|
| 427 |
+
// This is required only for _curand_poisson_ITR_double.
|
| 428 |
+
QUALIFIERS double _curand_uniform_double_excluding_one(unsigned long long x)
|
| 429 |
+
{
|
| 430 |
+
return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/4.0);
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
#define MAGIC_DOUBLE_CONST 500.0
|
| 434 |
+
template <typename T>
|
| 435 |
+
//George S. Fishman Discrete-event simulation: modeling, programming, and analysis
|
| 436 |
+
QUALIFIERS unsigned int _curand_poisson_ITR_double(T x, double lambda)
|
| 437 |
+
{
|
| 438 |
+
double L,p = 1.0;
|
| 439 |
+
double q = 1.0;
|
| 440 |
+
unsigned int k = 0;
|
| 441 |
+
int pow=0;
|
| 442 |
+
// This algorithm requires u to be in (0;1) range, however, _curand_uniform_double
|
| 443 |
+
// returns a number in range (0;1]. If u is 1.0 the inner loop never ends. The
|
| 444 |
+
// following operation transforms the range from (0;1] to (0;1).
|
| 445 |
+
double u = _curand_uniform_double_excluding_one(x);
|
| 446 |
+
do{
|
| 447 |
+
if (lambda > (double)(pow+MAGIC_DOUBLE_CONST)){
|
| 448 |
+
L = exp(-MAGIC_DOUBLE_CONST);
|
| 449 |
+
}else{
|
| 450 |
+
L = exp((double)(pow - lambda));
|
| 451 |
+
}
|
| 452 |
+
p *= L;
|
| 453 |
+
q *= L;
|
| 454 |
+
pow += (int) MAGIC_DOUBLE_CONST;
|
| 455 |
+
while (u > q){
|
| 456 |
+
k++;
|
| 457 |
+
p *= ((double)lambda / (double) k);
|
| 458 |
+
q += p;
|
| 459 |
+
}
|
| 460 |
+
}while((double)pow < lambda);
|
| 461 |
+
return k;
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
template <typename T>
|
| 465 |
+
/* Rejection Method for Poisson distribution based on gammainc approximation */
|
| 466 |
+
QUALIFIERS unsigned int curand_poisson_gammainc(T state, float lambda){
|
| 467 |
+
float y, x, t, z,v;
|
| 468 |
+
float logl = __cr_log (lambda);
|
| 469 |
+
while (true) {
|
| 470 |
+
y = curand_uniform (state);
|
| 471 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 472 |
+
x = floorf (x);
|
| 473 |
+
z = curand_uniform (state);
|
| 474 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 475 |
+
z = z*v;
|
| 476 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 477 |
+
if ((z < t) && (v>=1e-20))
|
| 478 |
+
break;
|
| 479 |
+
}
|
| 480 |
+
return (unsigned int)x;
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
template <typename T>
|
| 484 |
+
/* Rejection Method for Poisson distribution based on gammainc approximation */
|
| 485 |
+
QUALIFIERS uint4 curand_poisson_gammainc4(T state, float lambda){
|
| 486 |
+
uint4 result;
|
| 487 |
+
float y, x, t, z,v;
|
| 488 |
+
float logl = __cr_log (lambda);
|
| 489 |
+
while (true) {
|
| 490 |
+
y = curand_uniform(state);
|
| 491 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 492 |
+
x = floorf (x);
|
| 493 |
+
z = curand_uniform (state);
|
| 494 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 495 |
+
z = z*v;
|
| 496 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 497 |
+
if ((z < t) && (v>=1e-20))
|
| 498 |
+
break;
|
| 499 |
+
}
|
| 500 |
+
result.x = (unsigned int)x;
|
| 501 |
+
|
| 502 |
+
while (true) {
|
| 503 |
+
y = curand_uniform(state);
|
| 504 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 505 |
+
x = floorf (x);
|
| 506 |
+
z = curand_uniform (state);
|
| 507 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 508 |
+
z = z*v;
|
| 509 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 510 |
+
if ((z < t) && (v>=1e-20))
|
| 511 |
+
break;
|
| 512 |
+
}
|
| 513 |
+
result.y = (unsigned int)x;
|
| 514 |
+
|
| 515 |
+
while (true) {
|
| 516 |
+
y = curand_uniform(state);
|
| 517 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 518 |
+
x = floorf (x);
|
| 519 |
+
z = curand_uniform (state);
|
| 520 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 521 |
+
z = z*v;
|
| 522 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 523 |
+
if ((z < t) && (v>=1e-20))
|
| 524 |
+
break;
|
| 525 |
+
}
|
| 526 |
+
result.z = (unsigned int)x;
|
| 527 |
+
|
| 528 |
+
while (true) {
|
| 529 |
+
y = curand_uniform(state);
|
| 530 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 531 |
+
x = floorf (x);
|
| 532 |
+
z = curand_uniform (state);
|
| 533 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 534 |
+
z = z*v;
|
| 535 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 536 |
+
if ((z < t) && (v>=1e-20))
|
| 537 |
+
break;
|
| 538 |
+
}
|
| 539 |
+
result.w = (unsigned int)x;
|
| 540 |
+
|
| 541 |
+
return result;
|
| 542 |
+
}
|
| 543 |
+
// Note below that the round to nearest integer, where needed,is done in line with code that
|
| 544 |
+
// assumes the range of values is < 2**32
|
| 545 |
+
|
| 546 |
+
template <typename T>
|
| 547 |
+
QUALIFIERS unsigned int _curand_poisson(T x, double lambda)
|
| 548 |
+
{
|
| 549 |
+
if (lambda < 1000)
|
| 550 |
+
return _curand_poisson_ITR_double(x, lambda);
|
| 551 |
+
return (unsigned int)((sqrt(lambda) * _curand_normal_icdf_double(x)) + lambda + 0.5); //Round to nearest
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
template <typename T>
|
| 555 |
+
QUALIFIERS unsigned int _curand_poisson_from_normal(T x, double lambda)
|
| 556 |
+
{
|
| 557 |
+
return (unsigned int)((sqrt(lambda) * _curand_normal_icdf(x)) + lambda + 0.5); //Round to nearest
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
template <typename STATE>
|
| 561 |
+
QUALIFIERS unsigned int curand_poisson_from_normal(STATE state, double lambda)
|
| 562 |
+
{
|
| 563 |
+
return (unsigned int)((sqrt(lambda) * curand_normal(state)) + lambda + 0.5); //Round to nearest
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
template <typename STATE>
|
| 567 |
+
QUALIFIERS uint4 curand_poisson_from_normal4(STATE state, double lambda)
|
| 568 |
+
{
|
| 569 |
+
uint4 result;
|
| 570 |
+
float4 _res;
|
| 571 |
+
|
| 572 |
+
_res = curand_normal4(state);
|
| 573 |
+
|
| 574 |
+
result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
|
| 575 |
+
result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
|
| 576 |
+
result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
|
| 577 |
+
result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
|
| 578 |
+
return result; //Round to nearest
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
/**
|
| 582 |
+
* \brief Return a Poisson-distributed unsigned int from a XORWOW generator.
|
| 583 |
+
*
|
| 584 |
+
* Return a single unsigned int from a Poisson
|
| 585 |
+
* distribution with lambda \p lambda from the XORWOW generator in \p state,
|
| 586 |
+
* increment the position of the generator by a variable amount, depending
|
| 587 |
+
* on the algorithm used.
|
| 588 |
+
*
|
| 589 |
+
* \param state - Pointer to state to update
|
| 590 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 591 |
+
*
|
| 592 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 593 |
+
*/
|
| 594 |
+
QUALIFIERS unsigned int curand_poisson(curandStateXORWOW_t *state, double lambda)
|
| 595 |
+
{
|
| 596 |
+
if (lambda < 64)
|
| 597 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 598 |
+
if (lambda > 4000)
|
| 599 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 600 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
/**
|
| 604 |
+
* \brief Return a Poisson-distributed unsigned int from a Philox4_32_10 generator.
|
| 605 |
+
*
|
| 606 |
+
* Return a single unsigned int from a Poisson
|
| 607 |
+
* distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
|
| 608 |
+
* increment the position of the generator by a variable amount, depending
|
| 609 |
+
* on the algorithm used.
|
| 610 |
+
*
|
| 611 |
+
* \param state - Pointer to state to update
|
| 612 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 613 |
+
*
|
| 614 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 615 |
+
*/
|
| 616 |
+
QUALIFIERS unsigned int curand_poisson(curandStatePhilox4_32_10_t *state, double lambda)
|
| 617 |
+
{
|
| 618 |
+
if (lambda < 64)
|
| 619 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 620 |
+
if (lambda > 4000)
|
| 621 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 622 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 623 |
+
}
|
| 624 |
+
/**
|
| 625 |
+
* \brief Return four Poisson-distributed unsigned ints from a Philox4_32_10 generator.
|
| 626 |
+
*
|
| 627 |
+
* Return a four unsigned ints from a Poisson
|
| 628 |
+
* distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
|
| 629 |
+
* increment the position of the generator by a variable amount, depending
|
| 630 |
+
* on the algorithm used.
|
| 631 |
+
*
|
| 632 |
+
* \param state - Pointer to state to update
|
| 633 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 634 |
+
*
|
| 635 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 636 |
+
*/
|
| 637 |
+
QUALIFIERS uint4 curand_poisson4(curandStatePhilox4_32_10_t *state, double lambda)
|
| 638 |
+
{
|
| 639 |
+
uint4 result;
|
| 640 |
+
double4 _res;
|
| 641 |
+
if (lambda < 64)
|
| 642 |
+
return curand_poisson_knuth4(state, (float)lambda);
|
| 643 |
+
if (lambda > 4000) {
|
| 644 |
+
_res = curand_normal4_double(state);
|
| 645 |
+
result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
|
| 646 |
+
result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
|
| 647 |
+
result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
|
| 648 |
+
result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
|
| 649 |
+
return result;
|
| 650 |
+
}
|
| 651 |
+
return curand_poisson_gammainc4(state, (float)lambda);
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
/**
|
| 657 |
+
* \brief Return a Poisson-distributed unsigned int from a MRG32k3A generator.
|
| 658 |
+
*
|
| 659 |
+
* Return a single unsigned int from a Poisson
|
| 660 |
+
* distribution with lambda \p lambda from the MRG32k3a generator in \p state,
|
| 661 |
+
* increment the position of the generator by a variable amount, depending
|
| 662 |
+
* on the algorithm used.
|
| 663 |
+
*
|
| 664 |
+
* \param state - Pointer to state to update
|
| 665 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 666 |
+
*
|
| 667 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 668 |
+
*/
|
| 669 |
+
QUALIFIERS unsigned int curand_poisson(curandStateMRG32k3a_t *state, double lambda)
|
| 670 |
+
{
|
| 671 |
+
if (lambda < 64)
|
| 672 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 673 |
+
if (lambda > 4000)
|
| 674 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 675 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
/**
|
| 679 |
+
* \brief Return a Poisson-distributed unsigned int from a MTGP32 generator.
|
| 680 |
+
*
|
| 681 |
+
* Return a single int from a Poisson
|
| 682 |
+
* distribution with lambda \p lambda from the MTGP32 generator in \p state,
|
| 683 |
+
* increment the position of the generator by one.
|
| 684 |
+
*
|
| 685 |
+
* \param state - Pointer to state to update
|
| 686 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 687 |
+
*
|
| 688 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 689 |
+
*/
|
| 690 |
+
QUALIFIERS unsigned int curand_poisson(curandStateMtgp32_t *state, double lambda)
|
| 691 |
+
{
|
| 692 |
+
return _curand_poisson(curand(state), lambda);
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
/**
|
| 696 |
+
* \brief Return a Poisson-distributed unsigned int from a Sobol32 generator.
|
| 697 |
+
*
|
| 698 |
+
* Return a single unsigned int from a Poisson
|
| 699 |
+
* distribution with lambda \p lambda from the Sobol32 generator in \p state,
|
| 700 |
+
* increment the position of the generator by one.
|
| 701 |
+
*
|
| 702 |
+
* \param state - Pointer to state to update
|
| 703 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 704 |
+
*
|
| 705 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 706 |
+
*/
|
| 707 |
+
|
| 708 |
+
QUALIFIERS unsigned int curand_poisson(curandStateSobol32_t *state, double lambda)
|
| 709 |
+
{
|
| 710 |
+
return _curand_poisson(curand(state), lambda);
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
/**
|
| 714 |
+
* \brief Return a Poisson-distributed unsigned int from a scrambled Sobol32 generator.
|
| 715 |
+
*
|
| 716 |
+
* Return a single unsigned int from a Poisson
|
| 717 |
+
* distribution with lambda \p lambda from the scrambled Sobol32 generator in \p state,
|
| 718 |
+
* increment the position of the generator by one.
|
| 719 |
+
*
|
| 720 |
+
* \param state - Pointer to state to update
|
| 721 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 722 |
+
*
|
| 723 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 724 |
+
*/
|
| 725 |
+
QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol32_t *state, double lambda)
|
| 726 |
+
{
|
| 727 |
+
return _curand_poisson(curand(state), lambda);
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
/**
|
| 731 |
+
* \brief Return a Poisson-distributed unsigned int from a Sobol64 generator.
|
| 732 |
+
*
|
| 733 |
+
* Return a single unsigned int from a Poisson
|
| 734 |
+
* distribution with lambda \p lambda from the Sobol64 generator in \p state,
|
| 735 |
+
* increment position of generator by one.
|
| 736 |
+
*
|
| 737 |
+
* \param state - Pointer to state to update
|
| 738 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 739 |
+
*
|
| 740 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 741 |
+
*/
|
| 742 |
+
QUALIFIERS unsigned int curand_poisson(curandStateSobol64_t *state, double lambda)
|
| 743 |
+
{
|
| 744 |
+
return _curand_poisson(curand(state), lambda);
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
/**
|
| 748 |
+
* \brief Return a Poisson-distributed unsigned int from a scrambled Sobol64 generator.
|
| 749 |
+
*
|
| 750 |
+
* Return a single unsigned int from a Poisson
|
| 751 |
+
* distribution with lambda \p lambda from the scrambled Sobol64 generator in \p state,
|
| 752 |
+
* increment position of generator by one.
|
| 753 |
+
*
|
| 754 |
+
* \param state - Pointer to state to update
|
| 755 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 756 |
+
*
|
| 757 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 758 |
+
*/
|
| 759 |
+
QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol64_t *state, double lambda)
|
| 760 |
+
{
|
| 761 |
+
return _curand_poisson(curand(state), lambda);
|
| 762 |
+
}
|
| 763 |
+
#endif // !defined(CURAND_POISSON_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_precalc.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/include/curand_uniform.h
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_UNIFORM_H_)
|
| 52 |
+
#define CURAND_UNIFORM_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include "curand_mrg32k3a.h"
|
| 65 |
+
#include "curand_mtgp32_kernel.h"
|
| 66 |
+
#include "curand_philox4x32_x.h"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
QUALIFIERS float _curand_uniform(unsigned int x)
|
| 70 |
+
{
|
| 71 |
+
return x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
QUALIFIERS float4 _curand_uniform4(uint4 x)
|
| 75 |
+
{
|
| 76 |
+
float4 y;
|
| 77 |
+
y.x = x.x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 78 |
+
y.y = x.y * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 79 |
+
y.z = x.z * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 80 |
+
y.w = x.w * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 81 |
+
return y;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
QUALIFIERS float _curand_uniform(unsigned long long x)
|
| 85 |
+
{
|
| 86 |
+
unsigned int t;
|
| 87 |
+
t = (unsigned int)(x >> 32);
|
| 88 |
+
return t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
QUALIFIERS double _curand_uniform_double(unsigned int x)
|
| 92 |
+
{
|
| 93 |
+
return x * CURAND_2POW32_INV_DOUBLE + CURAND_2POW32_INV_DOUBLE;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
QUALIFIERS double _curand_uniform_double(unsigned long long x)
|
| 97 |
+
{
|
| 98 |
+
return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
QUALIFIERS double _curand_uniform_double_hq(unsigned int x, unsigned int y)
|
| 102 |
+
{
|
| 103 |
+
unsigned long long z = (unsigned long long)x ^
|
| 104 |
+
((unsigned long long)y << (53 - 32));
|
| 105 |
+
return z * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
QUALIFIERS float curand_uniform(curandStateTest_t *state)
|
| 109 |
+
{
|
| 110 |
+
return _curand_uniform(curand(state));
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
QUALIFIERS double curand_uniform_double(curandStateTest_t *state)
|
| 114 |
+
{
|
| 115 |
+
return _curand_uniform_double(curand(state));
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/**
|
| 119 |
+
* \brief Return a uniformly distributed float from an XORWOW generator.
|
| 120 |
+
*
|
| 121 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 122 |
+
* from the XORWOW generator in \p state, increment position of generator.
|
| 123 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 124 |
+
* point outputs are never returned.
|
| 125 |
+
*
|
| 126 |
+
* The implementation may use any number of calls to \p curand() to
|
| 127 |
+
* get enough random bits to create the return value. The current
|
| 128 |
+
* implementation uses one call.
|
| 129 |
+
*
|
| 130 |
+
* \param state - Pointer to state to update
|
| 131 |
+
*
|
| 132 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 133 |
+
*/
|
| 134 |
+
QUALIFIERS float curand_uniform(curandStateXORWOW_t *state)
|
| 135 |
+
{
|
| 136 |
+
return _curand_uniform(curand(state));
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
/**
|
| 140 |
+
* \brief Return a uniformly distributed double from an XORWOW generator.
|
| 141 |
+
*
|
| 142 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 143 |
+
* from the XORWOW generator in \p state, increment position of generator.
|
| 144 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 145 |
+
* point outputs are never returned.
|
| 146 |
+
*
|
| 147 |
+
* The implementation may use any number of calls to \p curand() to
|
| 148 |
+
* get enough random bits to create the return value. The current
|
| 149 |
+
* implementation uses exactly two calls.
|
| 150 |
+
*
|
| 151 |
+
* \param state - Pointer to state to update
|
| 152 |
+
*
|
| 153 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 154 |
+
*/
|
| 155 |
+
QUALIFIERS double curand_uniform_double(curandStateXORWOW_t *state)
|
| 156 |
+
{
|
| 157 |
+
unsigned int x, y;
|
| 158 |
+
x = curand(state);
|
| 159 |
+
y = curand(state);
|
| 160 |
+
return _curand_uniform_double_hq(x, y);
|
| 161 |
+
}
|
| 162 |
+
/**
|
| 163 |
+
* \brief Return a uniformly distributed float from an MRG32k3a generator.
|
| 164 |
+
*
|
| 165 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 166 |
+
* from the MRG32k3a generator in \p state, increment position of generator.
|
| 167 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 168 |
+
* point outputs are never returned.
|
| 169 |
+
*
|
| 170 |
+
* The implementation returns up to 23 bits of mantissa, with the minimum
|
| 171 |
+
* return value \f$ 2^{-32} \f$
|
| 172 |
+
*
|
| 173 |
+
* \param state - Pointer to state to update
|
| 174 |
+
*
|
| 175 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 176 |
+
*/
|
| 177 |
+
QUALIFIERS float curand_uniform(curandStateMRG32k3a_t *state)
|
| 178 |
+
{
|
| 179 |
+
return ((float)(curand_MRG32k3a(state)*MRG32K3A_NORM));
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
/**
|
| 183 |
+
* \brief Return a uniformly distributed double from an MRG32k3a generator.
|
| 184 |
+
*
|
| 185 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 186 |
+
* from the MRG32k3a generator in \p state, increment position of generator.
|
| 187 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 188 |
+
* point outputs are never returned.
|
| 189 |
+
*
|
| 190 |
+
* Note the implementation returns at most 32 random bits of mantissa as
|
| 191 |
+
* outlined in the seminal paper by L'Ecuyer.
|
| 192 |
+
*
|
| 193 |
+
* \param state - Pointer to state to update
|
| 194 |
+
*
|
| 195 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 196 |
+
*/
|
| 197 |
+
QUALIFIERS double curand_uniform_double(curandStateMRG32k3a_t *state)
|
| 198 |
+
{
|
| 199 |
+
return curand_MRG32k3a(state)*MRG32K3A_NORM;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
/**
|
| 205 |
+
* \brief Return a uniformly distributed tuple of 2 doubles from an Philox4_32_10 generator.
|
| 206 |
+
*
|
| 207 |
+
* Return a uniformly distributed 2 doubles (double4) between \p 0.0 and \p 1.0
|
| 208 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator by 4.
|
| 209 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 210 |
+
* point outputs are never returned.
|
| 211 |
+
*
|
| 212 |
+
* \param state - Pointer to state to update
|
| 213 |
+
*
|
| 214 |
+
* \return 2 uniformly distributed doubles between \p 0.0 and \p 1.0
|
| 215 |
+
*/
|
| 216 |
+
|
| 217 |
+
QUALIFIERS double2 curand_uniform2_double(curandStatePhilox4_32_10_t *state)
|
| 218 |
+
{
|
| 219 |
+
uint4 _x;
|
| 220 |
+
double2 result;
|
| 221 |
+
_x = curand4(state);
|
| 222 |
+
result.x = _curand_uniform_double_hq(_x.x,_x.y);
|
| 223 |
+
result.y = _curand_uniform_double_hq(_x.z,_x.w);
|
| 224 |
+
return result;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
// not a part of API
|
| 229 |
+
QUALIFIERS double4 curand_uniform4_double(curandStatePhilox4_32_10_t *state)
|
| 230 |
+
{
|
| 231 |
+
uint4 _x, _y;
|
| 232 |
+
double4 result;
|
| 233 |
+
_x = curand4(state);
|
| 234 |
+
_y = curand4(state);
|
| 235 |
+
result.x = _curand_uniform_double_hq(_x.x,_x.y);
|
| 236 |
+
result.y = _curand_uniform_double_hq(_x.z,_x.w);
|
| 237 |
+
result.z = _curand_uniform_double_hq(_y.x,_y.y);
|
| 238 |
+
result.w = _curand_uniform_double_hq(_y.z,_y.w);
|
| 239 |
+
return result;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
/**
|
| 243 |
+
* \brief Return a uniformly distributed float from a Philox4_32_10 generator.
|
| 244 |
+
*
|
| 245 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 246 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator.
|
| 247 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 248 |
+
* point outputs are never returned.
|
| 249 |
+
*
|
| 250 |
+
* \param state - Pointer to state to update
|
| 251 |
+
*
|
| 252 |
+
* \return uniformly distributed float between \p 0.0 and \p 1.0
|
| 253 |
+
*
|
| 254 |
+
*/
|
| 255 |
+
QUALIFIERS float curand_uniform(curandStatePhilox4_32_10_t *state)
|
| 256 |
+
{
|
| 257 |
+
return _curand_uniform(curand(state));
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
/**
|
| 261 |
+
* \brief Return a uniformly distributed tuple of 4 floats from a Philox4_32_10 generator.
|
| 262 |
+
*
|
| 263 |
+
* Return a uniformly distributed 4 floats between \p 0.0f and \p 1.0f
|
| 264 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator by 4.
|
| 265 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 266 |
+
* point outputs are never returned.
|
| 267 |
+
*
|
| 268 |
+
* \param state - Pointer to state to update
|
| 269 |
+
*
|
| 270 |
+
* \return uniformly distributed float between \p 0.0 and \p 1.0
|
| 271 |
+
*
|
| 272 |
+
*/
|
| 273 |
+
QUALIFIERS float4 curand_uniform4(curandStatePhilox4_32_10_t *state)
|
| 274 |
+
{
|
| 275 |
+
return _curand_uniform4(curand4(state));
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
/**
|
| 279 |
+
* \brief Return a uniformly distributed float from a MTGP32 generator.
|
| 280 |
+
*
|
| 281 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 282 |
+
* from the MTGP32 generator in \p state, increment position of generator.
|
| 283 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 284 |
+
* point outputs are never returned.
|
| 285 |
+
*
|
| 286 |
+
* \param state - Pointer to state to update
|
| 287 |
+
*
|
| 288 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 289 |
+
*/
|
| 290 |
+
QUALIFIERS float curand_uniform(curandStateMtgp32_t *state)
|
| 291 |
+
{
|
| 292 |
+
return _curand_uniform(curand(state));
|
| 293 |
+
}
|
| 294 |
+
/**
|
| 295 |
+
* \brief Return a uniformly distributed double from a MTGP32 generator.
|
| 296 |
+
*
|
| 297 |
+
* Return a uniformly distributed double between \p 0.0f and \p 1.0f
|
| 298 |
+
* from the MTGP32 generator in \p state, increment position of generator.
|
| 299 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 300 |
+
* point outputs are never returned.
|
| 301 |
+
*
|
| 302 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 303 |
+
* precision value.
|
| 304 |
+
*
|
| 305 |
+
* \param state - Pointer to state to update
|
| 306 |
+
*
|
| 307 |
+
* \return uniformly distributed double between \p 0.0f and \p 1.0f
|
| 308 |
+
*/
|
| 309 |
+
QUALIFIERS double curand_uniform_double(curandStateMtgp32_t *state)
|
| 310 |
+
{
|
| 311 |
+
return _curand_uniform_double(curand(state));
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
/**
|
| 315 |
+
* \brief Return a uniformly distributed double from a Philox4_32_10 generator.
|
| 316 |
+
*
|
| 317 |
+
* Return a uniformly distributed double between \p 0.0f and \p 1.0f
|
| 318 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator.
|
| 319 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 320 |
+
* point outputs are never returned.
|
| 321 |
+
*
|
| 322 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 323 |
+
* precision value.
|
| 324 |
+
*
|
| 325 |
+
* \p curand_uniform2_double() is recommended for higher quality uniformly distributed
|
| 326 |
+
* double precision values.
|
| 327 |
+
*
|
| 328 |
+
* \param state - Pointer to state to update
|
| 329 |
+
*
|
| 330 |
+
* \return uniformly distributed double between \p 0.0f and \p 1.0f
|
| 331 |
+
*/
|
| 332 |
+
|
| 333 |
+
QUALIFIERS double curand_uniform_double(curandStatePhilox4_32_10_t *state)
|
| 334 |
+
{
|
| 335 |
+
return _curand_uniform_double(curand(state));
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
/**
|
| 340 |
+
* \brief Return a uniformly distributed float from a Sobol32 generator.
|
| 341 |
+
*
|
| 342 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 343 |
+
* from the Sobol32 generator in \p state, increment position of generator.
|
| 344 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 345 |
+
* point outputs are never returned.
|
| 346 |
+
*
|
| 347 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 348 |
+
*
|
| 349 |
+
* \param state - Pointer to state to update
|
| 350 |
+
*
|
| 351 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 352 |
+
*/
|
| 353 |
+
QUALIFIERS float curand_uniform(curandStateSobol32_t *state)
|
| 354 |
+
{
|
| 355 |
+
return _curand_uniform(curand(state));
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
/**
|
| 359 |
+
* \brief Return a uniformly distributed double from a Sobol32 generator.
|
| 360 |
+
*
|
| 361 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 362 |
+
* from the Sobol32 generator in \p state, increment position of generator.
|
| 363 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 364 |
+
* point outputs are never returned.
|
| 365 |
+
*
|
| 366 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 367 |
+
* to preserve the quasirandom properties of the sequence.
|
| 368 |
+
*
|
| 369 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 370 |
+
* precision value.
|
| 371 |
+
*
|
| 372 |
+
* \param state - Pointer to state to update
|
| 373 |
+
*
|
| 374 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 375 |
+
*/
|
| 376 |
+
QUALIFIERS double curand_uniform_double(curandStateSobol32_t *state)
|
| 377 |
+
{
|
| 378 |
+
return _curand_uniform_double(curand(state));
|
| 379 |
+
}
|
| 380 |
+
/**
|
| 381 |
+
* \brief Return a uniformly distributed float from a scrambled Sobol32 generator.
|
| 382 |
+
*
|
| 383 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 384 |
+
* from the scrambled Sobol32 generator in \p state, increment position of generator.
|
| 385 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 386 |
+
* point outputs are never returned.
|
| 387 |
+
*
|
| 388 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 389 |
+
*
|
| 390 |
+
* \param state - Pointer to state to update
|
| 391 |
+
*
|
| 392 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 393 |
+
*/
|
| 394 |
+
QUALIFIERS float curand_uniform(curandStateScrambledSobol32_t *state)
|
| 395 |
+
{
|
| 396 |
+
return _curand_uniform(curand(state));
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
/**
|
| 400 |
+
* \brief Return a uniformly distributed double from a scrambled Sobol32 generator.
|
| 401 |
+
*
|
| 402 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 403 |
+
* from the scrambled Sobol32 generator in \p state, increment position of generator.
|
| 404 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 405 |
+
* point outputs are never returned.
|
| 406 |
+
*
|
| 407 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 408 |
+
* to preserve the quasirandom properties of the sequence.
|
| 409 |
+
*
|
| 410 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 411 |
+
* precision value.
|
| 412 |
+
*
|
| 413 |
+
* \param state - Pointer to state to update
|
| 414 |
+
*
|
| 415 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 416 |
+
*/
|
| 417 |
+
QUALIFIERS double curand_uniform_double(curandStateScrambledSobol32_t *state)
|
| 418 |
+
{
|
| 419 |
+
return _curand_uniform_double(curand(state));
|
| 420 |
+
}
|
| 421 |
+
/**
|
| 422 |
+
* \brief Return a uniformly distributed float from a Sobol64 generator.
|
| 423 |
+
*
|
| 424 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 425 |
+
* from the Sobol64 generator in \p state, increment position of generator.
|
| 426 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 427 |
+
* point outputs are never returned.
|
| 428 |
+
*
|
| 429 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 430 |
+
*
|
| 431 |
+
* \param state - Pointer to state to update
|
| 432 |
+
*
|
| 433 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 434 |
+
*/
|
| 435 |
+
QUALIFIERS float curand_uniform(curandStateSobol64_t *state)
|
| 436 |
+
{
|
| 437 |
+
return _curand_uniform(curand(state));
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
/**
|
| 441 |
+
* \brief Return a uniformly distributed double from a Sobol64 generator.
|
| 442 |
+
*
|
| 443 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 444 |
+
* from the Sobol64 generator in \p state, increment position of generator.
|
| 445 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 446 |
+
* point outputs are never returned.
|
| 447 |
+
*
|
| 448 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 449 |
+
* to preserve the quasirandom properties of the sequence.
|
| 450 |
+
*
|
| 451 |
+
* \param state - Pointer to state to update
|
| 452 |
+
*
|
| 453 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 454 |
+
*/
|
| 455 |
+
QUALIFIERS double curand_uniform_double(curandStateSobol64_t *state)
|
| 456 |
+
{
|
| 457 |
+
return _curand_uniform_double(curand(state));
|
| 458 |
+
}
|
| 459 |
+
/**
|
| 460 |
+
* \brief Return a uniformly distributed float from a scrambled Sobol64 generator.
|
| 461 |
+
*
|
| 462 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 463 |
+
* from the scrambled Sobol64 generator in \p state, increment position of generator.
|
| 464 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 465 |
+
* point outputs are never returned.
|
| 466 |
+
*
|
| 467 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 468 |
+
*
|
| 469 |
+
* \param state - Pointer to state to update
|
| 470 |
+
*
|
| 471 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 472 |
+
*/
|
| 473 |
+
QUALIFIERS float curand_uniform(curandStateScrambledSobol64_t *state)
|
| 474 |
+
{
|
| 475 |
+
return _curand_uniform(curand(state));
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
/**
|
| 479 |
+
* \brief Return a uniformly distributed double from a scrambled Sobol64 generator.
|
| 480 |
+
*
|
| 481 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 482 |
+
* from the scrambled Sobol64 generator in \p state, increment position of generator.
|
| 483 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 484 |
+
* point outputs are never returned.
|
| 485 |
+
*
|
| 486 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 487 |
+
* to preserve the quasirandom properties of the sequence.
|
| 488 |
+
*
|
| 489 |
+
* \param state - Pointer to state to update
|
| 490 |
+
*
|
| 491 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 492 |
+
*/
|
| 493 |
+
QUALIFIERS double curand_uniform_double(curandStateScrambledSobol64_t *state)
|
| 494 |
+
{
|
| 495 |
+
return _curand_uniform_double(curand(state));
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
#endif // !defined(CURAND_UNIFORM_H_)
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/libcurand.so.10
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12411c1e921cd3cac4e0d2f58024a3e6bbb7cea584d5cd768ad908ec762c5545
|
| 3 |
+
size 96472496
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/include/nccl.h
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*************************************************************************
|
| 2 |
+
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* See LICENSE.txt for license information
|
| 5 |
+
************************************************************************/
|
| 6 |
+
|
| 7 |
+
#ifndef NCCL_H_
|
| 8 |
+
#define NCCL_H_
|
| 9 |
+
|
| 10 |
+
#include <cuda_runtime.h>
|
| 11 |
+
#include <cuda_fp16.h>
|
| 12 |
+
#if CUDART_VERSION >= 11000
|
| 13 |
+
#include <cuda_bf16.h>
|
| 14 |
+
#endif
|
| 15 |
+
#if CUDART_VERSION >= 11080
|
| 16 |
+
#include <cuda_fp8.h>
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#define NCCL_MAJOR 2
|
| 20 |
+
#define NCCL_MINOR 26
|
| 21 |
+
#define NCCL_PATCH 2
|
| 22 |
+
#define NCCL_SUFFIX ""
|
| 23 |
+
|
| 24 |
+
#define NCCL_VERSION_CODE 22602
|
| 25 |
+
#define NCCL_VERSION(X,Y,Z) (((X) <= 2 && (Y) <= 8) ? (X) * 1000 + (Y) * 100 + (Z) : (X) * 10000 + (Y) * 100 + (Z))
|
| 26 |
+
|
| 27 |
+
#ifdef __cplusplus
|
| 28 |
+
extern "C" {
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
#include <limits.h>
|
| 32 |
+
/* Opaque handle to communicator */
|
| 33 |
+
typedef struct ncclComm* ncclComm_t;
|
| 34 |
+
#define NCCL_COMM_NULL NULL
|
| 35 |
+
|
| 36 |
+
#define NCCL_UNIQUE_ID_BYTES 128
|
| 37 |
+
typedef struct { char internal[NCCL_UNIQUE_ID_BYTES]; } ncclUniqueId;
|
| 38 |
+
|
| 39 |
+
/* Error type */
|
| 40 |
+
typedef enum { ncclSuccess = 0,
|
| 41 |
+
ncclUnhandledCudaError = 1,
|
| 42 |
+
ncclSystemError = 2,
|
| 43 |
+
ncclInternalError = 3,
|
| 44 |
+
ncclInvalidArgument = 4,
|
| 45 |
+
ncclInvalidUsage = 5,
|
| 46 |
+
ncclRemoteError = 6,
|
| 47 |
+
ncclInProgress = 7,
|
| 48 |
+
ncclNumResults = 8 } ncclResult_t;
|
| 49 |
+
|
| 50 |
+
#define NCCL_CONFIG_UNDEF_INT INT_MIN
|
| 51 |
+
#define NCCL_CONFIG_UNDEF_PTR NULL
|
| 52 |
+
#define NCCL_SPLIT_NOCOLOR -1
|
| 53 |
+
#define NCCL_UNDEF_FLOAT -1.0f
|
| 54 |
+
|
| 55 |
+
/* Communicator configuration. Users can assign value to attributes to specify the
|
| 56 |
+
* behavior of a communicator. */
|
| 57 |
+
typedef struct ncclConfig_v21700 {
|
| 58 |
+
/* attributes that users should never touch. */
|
| 59 |
+
size_t size;
|
| 60 |
+
unsigned int magic;
|
| 61 |
+
unsigned int version;
|
| 62 |
+
/* attributes that users are able to customize. */
|
| 63 |
+
int blocking;
|
| 64 |
+
int cgaClusterSize;
|
| 65 |
+
int minCTAs;
|
| 66 |
+
int maxCTAs;
|
| 67 |
+
const char *netName;
|
| 68 |
+
int splitShare;
|
| 69 |
+
int trafficClass;
|
| 70 |
+
} ncclConfig_t;
|
| 71 |
+
|
| 72 |
+
/* Config initializer must be assigned to initialize config structure when it is created.
|
| 73 |
+
* Not initialized config will result in NCCL error. */
|
| 74 |
+
#define NCCL_CONFIG_INITIALIZER { \
|
| 75 |
+
sizeof(ncclConfig_t), /* size */ \
|
| 76 |
+
0xcafebeef, /* magic */ \
|
| 77 |
+
NCCL_VERSION(NCCL_MAJOR, NCCL_MINOR, NCCL_PATCH), /* version */ \
|
| 78 |
+
NCCL_CONFIG_UNDEF_INT, /* blocking */ \
|
| 79 |
+
NCCL_CONFIG_UNDEF_INT, /* cgaClusterSize */ \
|
| 80 |
+
NCCL_CONFIG_UNDEF_INT, /* minCTAs */ \
|
| 81 |
+
NCCL_CONFIG_UNDEF_INT, /* maxCTAs */ \
|
| 82 |
+
NCCL_CONFIG_UNDEF_PTR, /* netName */ \
|
| 83 |
+
NCCL_CONFIG_UNDEF_INT, /* splitShare */ \
|
| 84 |
+
NCCL_CONFIG_UNDEF_INT, /* trafficClass */ \
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/* This struct will be used by ncclGroupSimulateEnd() API to query information about simulation. */
|
| 88 |
+
typedef struct ncclSimInfo_v22200 {
|
| 89 |
+
size_t size;
|
| 90 |
+
unsigned int magic;
|
| 91 |
+
unsigned int version;
|
| 92 |
+
float estimatedTime;
|
| 93 |
+
} ncclSimInfo_t;
|
| 94 |
+
|
| 95 |
+
/* NCCL_SIM_INFO_INITIALIZER must be assigned to initialize simInfo structure when it is created.
|
| 96 |
+
* Not initialized simInfo will result in NCCL error. */
|
| 97 |
+
#define NCCL_SIM_INFO_INITIALIZER { \
|
| 98 |
+
sizeof(ncclSimInfo_t), /* size */ \
|
| 99 |
+
0x74685283, /* magic */ \
|
| 100 |
+
NCCL_VERSION(NCCL_MAJOR, NCCL_MINOR, NCCL_PATCH), /* version */ \
|
| 101 |
+
NCCL_UNDEF_FLOAT /* estimated time */ \
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
/* NCCL malloc and free function for all types of NCCL optimizations
|
| 105 |
+
* (e.g. user buffer registration). The actual allocated size might
|
| 106 |
+
* be larger than requested due to granularity requirement. */
|
| 107 |
+
ncclResult_t ncclMemAlloc(void** ptr, size_t size);
|
| 108 |
+
ncclResult_t pncclMemAlloc(void** ptr, size_t size);
|
| 109 |
+
|
| 110 |
+
ncclResult_t ncclMemFree(void *ptr);
|
| 111 |
+
ncclResult_t pncclMemFree(void *ptr);
|
| 112 |
+
|
| 113 |
+
/* Return the NCCL_VERSION_CODE of the NCCL library in the supplied integer.
|
| 114 |
+
* This integer is coded with the MAJOR, MINOR and PATCH level of the
|
| 115 |
+
* NCCL library
|
| 116 |
+
*/
|
| 117 |
+
ncclResult_t ncclGetVersion(int *version);
|
| 118 |
+
ncclResult_t pncclGetVersion(int *version);
|
| 119 |
+
|
| 120 |
+
/* Generates an Id to be used in ncclCommInitRank. ncclGetUniqueId should be
|
| 121 |
+
* called once and the Id should be distributed to all ranks in the
|
| 122 |
+
* communicator before calling ncclCommInitRank. */
|
| 123 |
+
ncclResult_t ncclGetUniqueId(ncclUniqueId* uniqueId);
|
| 124 |
+
ncclResult_t pncclGetUniqueId(ncclUniqueId* uniqueId);
|
| 125 |
+
|
| 126 |
+
/* Create a new communicator (multi thread/process version) with a configuration
|
| 127 |
+
* set by users. */
|
| 128 |
+
ncclResult_t ncclCommInitRankConfig(ncclComm_t* comm, int nranks, ncclUniqueId commId, int rank, ncclConfig_t* config);
|
| 129 |
+
ncclResult_t pncclCommInitRankConfig(ncclComm_t* comm, int nranks, ncclUniqueId commId, int rank, ncclConfig_t* config);
|
| 130 |
+
|
| 131 |
+
/* Creates a new communicator (multi thread/process version).
|
| 132 |
+
* rank must be between 0 and nranks-1 and unique within a communicator clique.
|
| 133 |
+
* Each rank is associated to a CUDA device, which has to be set before calling
|
| 134 |
+
* ncclCommInitRank.
|
| 135 |
+
* ncclCommInitRank implicitly syncronizes with other ranks, so it must be
|
| 136 |
+
* called by different threads/processes or use ncclGroupStart/ncclGroupEnd. */
|
| 137 |
+
ncclResult_t ncclCommInitRank(ncclComm_t* comm, int nranks, ncclUniqueId commId, int rank);
|
| 138 |
+
ncclResult_t pncclCommInitRank(ncclComm_t* comm, int nranks, ncclUniqueId commId, int rank);
|
| 139 |
+
|
| 140 |
+
/* Creates a clique of communicators (single process version).
|
| 141 |
+
* This is a convenience function to create a single-process communicator clique.
|
| 142 |
+
* Returns an array of ndev newly initialized communicators in comm.
|
| 143 |
+
* comm should be pre-allocated with size at least ndev*sizeof(ncclComm_t).
|
| 144 |
+
* If devlist is NULL, the first ndev CUDA devices are used.
|
| 145 |
+
* Order of devlist defines user-order of processors within the communicator. */
|
| 146 |
+
ncclResult_t ncclCommInitAll(ncclComm_t* comm, int ndev, const int* devlist);
|
| 147 |
+
ncclResult_t pncclCommInitAll(ncclComm_t* comm, int ndev, const int* devlist);
|
| 148 |
+
|
| 149 |
+
/* Finalize a communicator. ncclCommFinalize flushes all issued communications,
|
| 150 |
+
* and marks communicator state as ncclInProgress. The state will change to ncclSuccess
|
| 151 |
+
* when the communicator is globally quiescent and related resources are freed; then,
|
| 152 |
+
* calling ncclCommDestroy can locally free the rest of the resources (e.g. communicator
|
| 153 |
+
* itself) without blocking. */
|
| 154 |
+
ncclResult_t ncclCommFinalize(ncclComm_t comm);
|
| 155 |
+
ncclResult_t pncclCommFinalize(ncclComm_t comm);
|
| 156 |
+
|
| 157 |
+
/* Frees local resources associated with communicator object. */
|
| 158 |
+
ncclResult_t ncclCommDestroy(ncclComm_t comm);
|
| 159 |
+
ncclResult_t pncclCommDestroy(ncclComm_t comm);
|
| 160 |
+
|
| 161 |
+
/* Frees resources associated with communicator object and aborts any operations
|
| 162 |
+
* that might still be running on the device. */
|
| 163 |
+
ncclResult_t ncclCommAbort(ncclComm_t comm);
|
| 164 |
+
ncclResult_t pncclCommAbort(ncclComm_t comm);
|
| 165 |
+
|
| 166 |
+
/* Creates one or more communicators from an existing one.
|
| 167 |
+
* Ranks with the same color will end up in the same communicator.
|
| 168 |
+
* Within the new communicator, key will be used to order ranks.
|
| 169 |
+
* NCCL_SPLIT_NOCOLOR as color will indicate the rank will not be part of any group
|
| 170 |
+
* and will therefore return a NULL communicator.
|
| 171 |
+
* If config is NULL, the new communicator will inherit the original communicator's
|
| 172 |
+
* configuration*/
|
| 173 |
+
ncclResult_t ncclCommSplit(ncclComm_t comm, int color, int key, ncclComm_t *newcomm, ncclConfig_t* config);
|
| 174 |
+
ncclResult_t pncclCommSplit(ncclComm_t comm, int color, int key, ncclComm_t *newcomm, ncclConfig_t* config);
|
| 175 |
+
|
| 176 |
+
/* Creates a new communicator (multi thread/process version), similar to ncclCommInitRankConfig.
|
| 177 |
+
* Allows to use more than one ncclUniqueId (up to one per rank), indicated by nId, to accelerate the init operation.
|
| 178 |
+
* The number of ncclUniqueIds and their order must be the same for every rank.
|
| 179 |
+
*/
|
| 180 |
+
ncclResult_t ncclCommInitRankScalable(ncclComm_t* newcomm, int nranks, int myrank, int nId, ncclUniqueId* commIds, ncclConfig_t* config);
|
| 181 |
+
ncclResult_t pncclCommInitRankScalable(ncclComm_t* newcomm, int nranks, int myrank, int nId, ncclUniqueId* commIds, ncclConfig_t* config);
|
| 182 |
+
|
| 183 |
+
/* Returns a string for each error code. */
|
| 184 |
+
const char* ncclGetErrorString(ncclResult_t result);
|
| 185 |
+
const char* pncclGetErrorString(ncclResult_t result);
|
| 186 |
+
|
| 187 |
+
/* Returns a human-readable message of the last error that occurred. */
|
| 188 |
+
const char* ncclGetLastError(ncclComm_t comm);
|
| 189 |
+
const char* pncclGetLastError(ncclComm_t comm);
|
| 190 |
+
|
| 191 |
+
/* Reload environment variables that determine logging. */
|
| 192 |
+
void ncclResetDebugInit();
|
| 193 |
+
void pncclResetDebugInit();
|
| 194 |
+
|
| 195 |
+
/* Checks whether the comm has encountered any asynchronous errors */
|
| 196 |
+
ncclResult_t ncclCommGetAsyncError(ncclComm_t comm, ncclResult_t *asyncError);
|
| 197 |
+
ncclResult_t pncclCommGetAsyncError(ncclComm_t comm, ncclResult_t *asyncError);
|
| 198 |
+
|
| 199 |
+
/* Gets the number of ranks in the communicator clique. */
|
| 200 |
+
ncclResult_t ncclCommCount(const ncclComm_t comm, int* count);
|
| 201 |
+
ncclResult_t pncclCommCount(const ncclComm_t comm, int* count);
|
| 202 |
+
|
| 203 |
+
/* Returns the cuda device number associated with the communicator. */
|
| 204 |
+
ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* device);
|
| 205 |
+
ncclResult_t pncclCommCuDevice(const ncclComm_t comm, int* device);
|
| 206 |
+
|
| 207 |
+
/* Returns the user-ordered "rank" associated with the communicator. */
|
| 208 |
+
ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank);
|
| 209 |
+
ncclResult_t pncclCommUserRank(const ncclComm_t comm, int* rank);
|
| 210 |
+
|
| 211 |
+
/* Register CUDA buffer for zero-copy operation */
|
| 212 |
+
ncclResult_t ncclCommRegister(const ncclComm_t comm, void* buff, size_t size, void** handle);
|
| 213 |
+
ncclResult_t pncclCommRegister(const ncclComm_t comm, void* buff, size_t size, void** handle);
|
| 214 |
+
|
| 215 |
+
/* Deregister CUDA buffer */
|
| 216 |
+
ncclResult_t ncclCommDeregister(const ncclComm_t comm, void* handle);
|
| 217 |
+
ncclResult_t pncclCommDeregister(const ncclComm_t comm, void* handle);
|
| 218 |
+
|
| 219 |
+
/* Reduction operation selector */
|
| 220 |
+
typedef enum { ncclNumOps_dummy = 5 } ncclRedOp_dummy_t;
|
| 221 |
+
typedef enum { ncclSum = 0,
|
| 222 |
+
ncclProd = 1,
|
| 223 |
+
ncclMax = 2,
|
| 224 |
+
ncclMin = 3,
|
| 225 |
+
ncclAvg = 4,
|
| 226 |
+
/* ncclNumOps: The number of built-in ncclRedOp_t values. Also
|
| 227 |
+
* serves as the least possible value for dynamic ncclRedOp_t's
|
| 228 |
+
* as constructed by ncclRedOpCreate*** functions. */
|
| 229 |
+
ncclNumOps = 5,
|
| 230 |
+
/* ncclMaxRedOp: The largest valid value for ncclRedOp_t.
|
| 231 |
+
* It is defined to be the largest signed value (since compilers
|
| 232 |
+
* are permitted to use signed enums) that won't grow
|
| 233 |
+
* sizeof(ncclRedOp_t) when compared to previous NCCL versions to
|
| 234 |
+
* maintain ABI compatibility. */
|
| 235 |
+
ncclMaxRedOp = 0x7fffffff>>(32-8*sizeof(ncclRedOp_dummy_t))
|
| 236 |
+
} ncclRedOp_t;
|
| 237 |
+
|
| 238 |
+
/* Data types */
|
| 239 |
+
typedef enum { ncclInt8 = 0, ncclChar = 0,
|
| 240 |
+
ncclUint8 = 1,
|
| 241 |
+
ncclInt32 = 2, ncclInt = 2,
|
| 242 |
+
ncclUint32 = 3,
|
| 243 |
+
ncclInt64 = 4,
|
| 244 |
+
ncclUint64 = 5,
|
| 245 |
+
ncclFloat16 = 6, ncclHalf = 6,
|
| 246 |
+
ncclFloat32 = 7, ncclFloat = 7,
|
| 247 |
+
ncclFloat64 = 8, ncclDouble = 8,
|
| 248 |
+
ncclBfloat16 = 9,
|
| 249 |
+
ncclFloat8e4m3 = 10,
|
| 250 |
+
ncclFloat8e5m2 = 11,
|
| 251 |
+
ncclNumTypes = 12
|
| 252 |
+
} ncclDataType_t;
|
| 253 |
+
|
| 254 |
+
/* ncclScalarResidence_t: Location and dereferencing logic for scalar arguments. */
|
| 255 |
+
typedef enum {
|
| 256 |
+
/* ncclScalarDevice: The scalar is in device-visible memory and will be
|
| 257 |
+
* dereferenced while the collective is running. */
|
| 258 |
+
ncclScalarDevice = 0,
|
| 259 |
+
|
| 260 |
+
/* ncclScalarHostImmediate: The scalar is in host-visible memory and will be
|
| 261 |
+
* dereferenced before the ncclRedOpCreate***() function returns. */
|
| 262 |
+
ncclScalarHostImmediate = 1
|
| 263 |
+
} ncclScalarResidence_t;
|
| 264 |
+
|
| 265 |
+
/*
|
| 266 |
+
* ncclRedOpCreatePreMulSum
|
| 267 |
+
*
|
| 268 |
+
* Creates a new reduction operator which pre-multiplies input values by a given
|
| 269 |
+
* scalar locally before reducing them with peer values via summation. For use
|
| 270 |
+
* only with collectives launched against *comm* and *datatype*. The
|
| 271 |
+
* *residence* argument indicates how/when the memory pointed to by *scalar*
|
| 272 |
+
* will be dereferenced. Upon return, the newly created operator's handle
|
| 273 |
+
* is stored in *op*.
|
| 274 |
+
*/
|
| 275 |
+
ncclResult_t ncclRedOpCreatePreMulSum(ncclRedOp_t *op, void *scalar, ncclDataType_t datatype, ncclScalarResidence_t residence, ncclComm_t comm);
|
| 276 |
+
ncclResult_t pncclRedOpCreatePreMulSum(ncclRedOp_t *op, void *scalar, ncclDataType_t datatype, ncclScalarResidence_t residence, ncclComm_t comm);
|
| 277 |
+
|
| 278 |
+
/*
|
| 279 |
+
* ncclRedOpDestroy
|
| 280 |
+
*
|
| 281 |
+
* Destroys the reduction operator *op*. The operator must have been created by
|
| 282 |
+
* ncclRedOpCreatePreMul with the matching communicator *comm*. An operator may be
|
| 283 |
+
* destroyed as soon as the last NCCL function which is given that operator returns.
|
| 284 |
+
*/
|
| 285 |
+
ncclResult_t ncclRedOpDestroy(ncclRedOp_t op, ncclComm_t comm);
|
| 286 |
+
ncclResult_t pncclRedOpDestroy(ncclRedOp_t op, ncclComm_t comm);
|
| 287 |
+
|
| 288 |
+
/*
|
| 289 |
+
* Collective communication operations
|
| 290 |
+
*
|
| 291 |
+
* Collective communication operations must be called separately for each
|
| 292 |
+
* communicator in a communicator clique.
|
| 293 |
+
*
|
| 294 |
+
* They return when operations have been enqueued on the CUDA stream.
|
| 295 |
+
*
|
| 296 |
+
* Since they may perform inter-CPU synchronization, each call has to be done
|
| 297 |
+
* from a different thread or process, or need to use Group Semantics (see
|
| 298 |
+
* below).
|
| 299 |
+
*/
|
| 300 |
+
|
| 301 |
+
/*
|
| 302 |
+
* Reduce
|
| 303 |
+
*
|
| 304 |
+
* Reduces data arrays of length count in sendbuff into recvbuff using op
|
| 305 |
+
* operation.
|
| 306 |
+
* recvbuff may be NULL on all calls except for root device.
|
| 307 |
+
* root is the rank (not the CUDA device) where data will reside after the
|
| 308 |
+
* operation is complete.
|
| 309 |
+
*
|
| 310 |
+
* In-place operation will happen if sendbuff == recvbuff.
|
| 311 |
+
*/
|
| 312 |
+
ncclResult_t ncclReduce(const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype,
|
| 313 |
+
ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream);
|
| 314 |
+
ncclResult_t pncclReduce(const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype,
|
| 315 |
+
ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream);
|
| 316 |
+
|
| 317 |
+
/*
|
| 318 |
+
* (deprecated) Broadcast (in-place)
|
| 319 |
+
*
|
| 320 |
+
* Copies count values from root to all other devices.
|
| 321 |
+
* root is the rank (not the CUDA device) where data resides before the
|
| 322 |
+
* operation is started.
|
| 323 |
+
*
|
| 324 |
+
* This operation is implicitely in place.
|
| 325 |
+
*/
|
| 326 |
+
ncclResult_t ncclBcast(void* buff, size_t count, ncclDataType_t datatype, int root,
|
| 327 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 328 |
+
ncclResult_t pncclBcast(void* buff, size_t count, ncclDataType_t datatype, int root,
|
| 329 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 330 |
+
|
| 331 |
+
/*
|
| 332 |
+
* Broadcast
|
| 333 |
+
*
|
| 334 |
+
* Copies count values from root to all other devices.
|
| 335 |
+
* root is the rank (not the CUDA device) where data resides before the
|
| 336 |
+
* operation is started.
|
| 337 |
+
*
|
| 338 |
+
* In-place operation will happen if sendbuff == recvbuff.
|
| 339 |
+
*/
|
| 340 |
+
ncclResult_t ncclBroadcast(const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype, int root,
|
| 341 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 342 |
+
ncclResult_t pncclBroadcast(const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype, int root,
|
| 343 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 344 |
+
|
| 345 |
+
/*
|
| 346 |
+
* All-Reduce
|
| 347 |
+
*
|
| 348 |
+
* Reduces data arrays of length count in sendbuff using op operation, and
|
| 349 |
+
* leaves identical copies of result on each recvbuff.
|
| 350 |
+
*
|
| 351 |
+
* In-place operation will happen if sendbuff == recvbuff.
|
| 352 |
+
*/
|
| 353 |
+
ncclResult_t ncclAllReduce(const void* sendbuff, void* recvbuff, size_t count,
|
| 354 |
+
ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, cudaStream_t stream);
|
| 355 |
+
ncclResult_t pncclAllReduce(const void* sendbuff, void* recvbuff, size_t count,
|
| 356 |
+
ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, cudaStream_t stream);
|
| 357 |
+
|
| 358 |
+
/*
|
| 359 |
+
* Reduce-Scatter
|
| 360 |
+
*
|
| 361 |
+
* Reduces data in sendbuff using op operation and leaves reduced result
|
| 362 |
+
* scattered over the devices so that recvbuff on rank i will contain the i-th
|
| 363 |
+
* block of the result.
|
| 364 |
+
* Assumes sendcount is equal to nranks*recvcount, which means that sendbuff
|
| 365 |
+
* should have a size of at least nranks*recvcount elements.
|
| 366 |
+
*
|
| 367 |
+
* In-place operations will happen if recvbuff == sendbuff + rank * recvcount.
|
| 368 |
+
*/
|
| 369 |
+
ncclResult_t ncclReduceScatter(const void* sendbuff, void* recvbuff,
|
| 370 |
+
size_t recvcount, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm,
|
| 371 |
+
cudaStream_t stream);
|
| 372 |
+
ncclResult_t pncclReduceScatter(const void* sendbuff, void* recvbuff,
|
| 373 |
+
size_t recvcount, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm,
|
| 374 |
+
cudaStream_t stream);
|
| 375 |
+
|
| 376 |
+
/*
|
| 377 |
+
* All-Gather
|
| 378 |
+
*
|
| 379 |
+
* Each device gathers sendcount values from other GPUs into recvbuff,
|
| 380 |
+
* receiving data from rank i at offset i*sendcount.
|
| 381 |
+
* Assumes recvcount is equal to nranks*sendcount, which means that recvbuff
|
| 382 |
+
* should have a size of at least nranks*sendcount elements.
|
| 383 |
+
*
|
| 384 |
+
* In-place operations will happen if sendbuff == recvbuff + rank * sendcount.
|
| 385 |
+
*/
|
| 386 |
+
ncclResult_t ncclAllGather(const void* sendbuff, void* recvbuff, size_t sendcount,
|
| 387 |
+
ncclDataType_t datatype, ncclComm_t comm, cudaStream_t stream);
|
| 388 |
+
ncclResult_t pncclAllGather(const void* sendbuff, void* recvbuff, size_t sendcount,
|
| 389 |
+
ncclDataType_t datatype, ncclComm_t comm, cudaStream_t stream);
|
| 390 |
+
|
| 391 |
+
/*
|
| 392 |
+
* Send
|
| 393 |
+
*
|
| 394 |
+
* Send data from sendbuff to rank peer.
|
| 395 |
+
*
|
| 396 |
+
* Rank peer needs to call ncclRecv with the same datatype and the same count from this
|
| 397 |
+
* rank.
|
| 398 |
+
*
|
| 399 |
+
* This operation is blocking for the GPU. If multiple ncclSend and ncclRecv operations
|
| 400 |
+
* need to progress concurrently to complete, they must be fused within a ncclGroupStart/
|
| 401 |
+
* ncclGroupEnd section.
|
| 402 |
+
*/
|
| 403 |
+
ncclResult_t ncclSend(const void* sendbuff, size_t count, ncclDataType_t datatype, int peer,
|
| 404 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 405 |
+
ncclResult_t pncclSend(const void* sendbuff, size_t count, ncclDataType_t datatype, int peer,
|
| 406 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 407 |
+
|
| 408 |
+
/*
|
| 409 |
+
* Receive
|
| 410 |
+
*
|
| 411 |
+
* Receive data from rank peer into recvbuff.
|
| 412 |
+
*
|
| 413 |
+
* Rank peer needs to call ncclSend with the same datatype and the same count to this
|
| 414 |
+
* rank.
|
| 415 |
+
*
|
| 416 |
+
* This operation is blocking for the GPU. If multiple ncclSend and ncclRecv operations
|
| 417 |
+
* need to progress concurrently to complete, they must be fused within a ncclGroupStart/
|
| 418 |
+
* ncclGroupEnd section.
|
| 419 |
+
*/
|
| 420 |
+
ncclResult_t pncclRecv(void* recvbuff, size_t count, ncclDataType_t datatype, int peer,
|
| 421 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 422 |
+
ncclResult_t ncclRecv(void* recvbuff, size_t count, ncclDataType_t datatype, int peer,
|
| 423 |
+
ncclComm_t comm, cudaStream_t stream);
|
| 424 |
+
|
| 425 |
+
/*
|
| 426 |
+
* Group semantics
|
| 427 |
+
*
|
| 428 |
+
* When managing multiple GPUs from a single thread, and since NCCL collective
|
| 429 |
+
* calls may perform inter-CPU synchronization, we need to "group" calls for
|
| 430 |
+
* different ranks/devices into a single call.
|
| 431 |
+
*
|
| 432 |
+
* Grouping NCCL calls as being part of the same collective operation is done
|
| 433 |
+
* using ncclGroupStart and ncclGroupEnd. ncclGroupStart will enqueue all
|
| 434 |
+
* collective calls until the ncclGroupEnd call, which will wait for all calls
|
| 435 |
+
* to be complete. Note that for collective communication, ncclGroupEnd only
|
| 436 |
+
* guarantees that the operations are enqueued on the streams, not that
|
| 437 |
+
* the operation is effectively done.
|
| 438 |
+
*
|
| 439 |
+
* Both collective communication and ncclCommInitRank can be used in conjunction
|
| 440 |
+
* of ncclGroupStart/ncclGroupEnd, but not together.
|
| 441 |
+
*
|
| 442 |
+
* Group semantics also allow to fuse multiple operations on the same device
|
| 443 |
+
* to improve performance (for aggregated collective calls), or to permit
|
| 444 |
+
* concurrent progress of multiple send/receive operations.
|
| 445 |
+
*/
|
| 446 |
+
|
| 447 |
+
/*
|
| 448 |
+
* Group Start
|
| 449 |
+
*
|
| 450 |
+
* Start a group call. All calls to NCCL until ncclGroupEnd will be fused into
|
| 451 |
+
* a single NCCL operation. Nothing will be started on the CUDA stream until
|
| 452 |
+
* ncclGroupEnd.
|
| 453 |
+
*/
|
| 454 |
+
ncclResult_t ncclGroupStart();
|
| 455 |
+
ncclResult_t pncclGroupStart();
|
| 456 |
+
|
| 457 |
+
/*
|
| 458 |
+
* Group End
|
| 459 |
+
*
|
| 460 |
+
* End a group call. Start a fused NCCL operation consisting of all calls since
|
| 461 |
+
* ncclGroupStart. Operations on the CUDA stream depending on the NCCL operations
|
| 462 |
+
* need to be called after ncclGroupEnd.
|
| 463 |
+
*/
|
| 464 |
+
ncclResult_t ncclGroupEnd();
|
| 465 |
+
ncclResult_t pncclGroupEnd();
|
| 466 |
+
|
| 467 |
+
/*
|
| 468 |
+
* Group Simulate End
|
| 469 |
+
*
|
| 470 |
+
* Simulate a ncclGroupEnd() call and return NCCL's simulation info in a struct.
|
| 471 |
+
*/
|
| 472 |
+
ncclResult_t ncclGroupSimulateEnd(ncclSimInfo_t* simInfo);
|
| 473 |
+
ncclResult_t pncclGroupSimulateEnd(ncclSimInfo_t* simInfo);
|
| 474 |
+
|
| 475 |
+
#ifdef __cplusplus
|
| 476 |
+
} // end extern "C"
|
| 477 |
+
#endif
|
| 478 |
+
|
| 479 |
+
#endif // end include guard
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/__init__.py
ADDED
|
File without changes
|
tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|