Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- infer_4_37_2/lib/python3.10/site-packages/httptools/__init__.py +6 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h +253 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h +1677 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h +516 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h +386 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h +840 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h +763 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_precalc.h +0 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h +498 -0
- infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 +3 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so +3 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so +3 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/arrays.pyi +40 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so +0 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/hashing.pyi +9 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/lib.pyi +213 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so +3 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/properties.pyi +27 -0
- infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/tslib.pyi +37 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__init__.py +24 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/approximations.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cfunctions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cnodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cutils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cxxnodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/fnodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/futils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/pynodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/abstract_nodes.py +18 -0
- janus/lib/python3.10/site-packages/sympy/codegen/approximations.py +187 -0
- janus/lib/python3.10/site-packages/sympy/codegen/cfunctions.py +536 -0
- janus/lib/python3.10/site-packages/sympy/codegen/cnodes.py +156 -0
- janus/lib/python3.10/site-packages/sympy/codegen/cxxnodes.py +14 -0
- janus/lib/python3.10/site-packages/sympy/codegen/futils.py +40 -0
- janus/lib/python3.10/site-packages/sympy/codegen/pynodes.py +11 -0
- janus/lib/python3.10/site-packages/sympy/codegen/tests/__init__.py +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/tests/__pycache__/test_abstract_nodes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/codegen/tests/test_ast.py +661 -0
- janus/lib/python3.10/site-packages/sympy/codegen/tests/test_cfunctions.py +165 -0
- janus/lib/python3.10/site-packages/sympy/codegen/tests/test_cnodes.py +112 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/generators.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1130,3 +1130,7 @@ infer_4_37_2/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolver.so.11
|
|
| 1130 |
infer_4_37_2/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 1131 |
infer_4_37_2/lib/python3.10/site-packages/httptools/parser/parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1132 |
infer_4_37_2/lib/python3.10/site-packages/httptools/parser/url_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1130 |
infer_4_37_2/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 1131 |
infer_4_37_2/lib/python3.10/site-packages/httptools/parser/parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1132 |
infer_4_37_2/lib/python3.10/site-packages/httptools/parser/url_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1133 |
+
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1134 |
+
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1135 |
+
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
| 1136 |
+
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
infer_4_37_2/lib/python3.10/site-packages/httptools/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import parser
|
| 2 |
+
from .parser import * # NOQA
|
| 3 |
+
|
| 4 |
+
from ._version import __version__ # NOQA
|
| 5 |
+
|
| 6 |
+
__all__ = parser.__all__ + ('__version__',) # NOQA
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_DISCRETE_H_)
|
| 52 |
+
#define CURAND_DISCRETE_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include "curand_mrg32k3a.h"
|
| 65 |
+
#include "curand_mtgp32_kernel.h"
|
| 66 |
+
#include "curand_philox4x32_x.h"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
template <typename T>
|
| 70 |
+
QUALIFIERS unsigned int _curand_discrete(T x, curandDiscreteDistribution_t discrete_distribution){
|
| 71 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 72 |
+
return _curand_M2_double(x, discrete_distribution->M2);
|
| 73 |
+
}
|
| 74 |
+
return (unsigned int)((discrete_distribution->stddev * _curand_normal_icdf_double(x)) + discrete_distribution->mean + 0.5);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
template <typename STATE>
|
| 79 |
+
QUALIFIERS unsigned int curand__discrete(STATE state, curandDiscreteDistribution_t discrete_distribution){
|
| 80 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 81 |
+
return curand_M2_double(state, discrete_distribution->M2);
|
| 82 |
+
}
|
| 83 |
+
return (unsigned int)((discrete_distribution->stddev * curand_normal_double(state)) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template <typename STATE>
|
| 87 |
+
QUALIFIERS uint4 curand__discrete4(STATE state, curandDiscreteDistribution_t discrete_distribution){
|
| 88 |
+
if (discrete_distribution->method == CURAND_M2){
|
| 89 |
+
return curand_M2_double4(state, discrete_distribution->M2);
|
| 90 |
+
}
|
| 91 |
+
double4 _res;
|
| 92 |
+
uint4 result;
|
| 93 |
+
_res = curand_normal4_double(state);
|
| 94 |
+
result.x = (unsigned int)((discrete_distribution->stddev * _res.x) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 95 |
+
result.y = (unsigned int)((discrete_distribution->stddev * _res.y) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 96 |
+
result.z = (unsigned int)((discrete_distribution->stddev * _res.z) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 97 |
+
result.w = (unsigned int)((discrete_distribution->stddev * _res.w) + discrete_distribution->mean + 0.5); //Round to nearest
|
| 98 |
+
return result;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
/*
|
| 102 |
+
* \brief Return a discrete distributed unsigned int from a XORWOW generator.
|
| 103 |
+
*
|
| 104 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 105 |
+
* distribution defined by \p discrete_distribution from the XORWOW generator in \p state,
|
| 106 |
+
* increment position of generator by one.
|
| 107 |
+
*
|
| 108 |
+
* \param state - Pointer to state to update
|
| 109 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 110 |
+
*
|
| 111 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 112 |
+
*/
|
| 113 |
+
QUALIFIERS unsigned int curand_discrete(curandStateXORWOW_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 114 |
+
{
|
| 115 |
+
return curand__discrete(state, discrete_distribution);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/*
|
| 119 |
+
* \brief Return a discrete distributed unsigned int from a Philox4_32_10 generator.
|
| 120 |
+
*
|
| 121 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 122 |
+
* distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
|
| 123 |
+
* increment position of generator by one.
|
| 124 |
+
*
|
| 125 |
+
* \param state - Pointer to state to update
|
| 126 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 127 |
+
*
|
| 128 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 129 |
+
*/
|
| 130 |
+
QUALIFIERS unsigned int curand_discrete(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 131 |
+
{
|
| 132 |
+
return curand__discrete(state, discrete_distribution);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
/*
|
| 136 |
+
* \brief Return four discrete distributed unsigned ints from a Philox4_32_10 generator.
|
| 137 |
+
*
|
| 138 |
+
* Return four single discrete distributed unsigned ints derived from a
|
| 139 |
+
* distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
|
| 140 |
+
* increment position of generator by one.
|
| 141 |
+
*
|
| 142 |
+
* \param state - Pointer to state to update
|
| 143 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 144 |
+
*
|
| 145 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 146 |
+
*/
|
| 147 |
+
QUALIFIERS uint4 curand_discrete4(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 148 |
+
{
|
| 149 |
+
return curand__discrete4(state, discrete_distribution);
|
| 150 |
+
}
|
| 151 |
+
/*
|
| 152 |
+
* \brief Return a discrete distributed unsigned int from a MRG32k3a generator.
|
| 153 |
+
*
|
| 154 |
+
* Re turn a single discrete distributed unsigned int derived from a
|
| 155 |
+
* distribution defined by \p discrete_distribution from the MRG32k3a generator in \p state,
|
| 156 |
+
* increment position of generator by one.
|
| 157 |
+
*
|
| 158 |
+
* \param state - Pointer to state to update
|
| 159 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 160 |
+
*
|
| 161 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 162 |
+
*/
|
| 163 |
+
QUALIFIERS unsigned int curand_discrete(curandStateMRG32k3a_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 164 |
+
{
|
| 165 |
+
return curand__discrete(state, discrete_distribution);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
/*
|
| 169 |
+
* \brief Return a discrete distributed unsigned int from a MTGP32 generator.
|
| 170 |
+
*
|
| 171 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 172 |
+
* distribution defined by \p discrete_distribution from the MTGP32 generator in \p state,
|
| 173 |
+
* increment position of generator by one.
|
| 174 |
+
*
|
| 175 |
+
* \param state - Pointer to state to update
|
| 176 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 177 |
+
*
|
| 178 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 179 |
+
*/
|
| 180 |
+
QUALIFIERS unsigned int curand_discrete(curandStateMtgp32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 181 |
+
{
|
| 182 |
+
return curand__discrete(state, discrete_distribution);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/*
|
| 186 |
+
* \brief Return a discrete distributed unsigned int from a Sobol32 generator.
|
| 187 |
+
*
|
| 188 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 189 |
+
* distribution defined by \p discrete_distribution from the Sobol32 generator in \p state,
|
| 190 |
+
* increment position of generator by one.
|
| 191 |
+
*
|
| 192 |
+
* \param state - Pointer to state to update
|
| 193 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 194 |
+
*
|
| 195 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 196 |
+
*/
|
| 197 |
+
QUALIFIERS unsigned int curand_discrete(curandStateSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 198 |
+
{
|
| 199 |
+
return curand__discrete(state, discrete_distribution);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
/*
|
| 203 |
+
* \brief Return a discrete distributed unsigned int from a scrambled Sobol32 generator.
|
| 204 |
+
*
|
| 205 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 206 |
+
* distribution defined by \p discrete_distribution from the scrambled Sobol32 generator in \p state,
|
| 207 |
+
* increment position of generator by one.
|
| 208 |
+
*
|
| 209 |
+
* \param state - Pointer to state to update
|
| 210 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 211 |
+
*
|
| 212 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 213 |
+
*/
|
| 214 |
+
QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 215 |
+
{
|
| 216 |
+
return curand__discrete(state, discrete_distribution);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
/*
|
| 220 |
+
* \brief Return a discrete distributed unsigned int from a Sobol64 generator.
|
| 221 |
+
*
|
| 222 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 223 |
+
* distribution defined by \p discrete_distribution from the Sobol64 generator in \p state,
|
| 224 |
+
* increment position of generator by one.
|
| 225 |
+
*
|
| 226 |
+
* \param state - Pointer to state to update
|
| 227 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 228 |
+
*
|
| 229 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 230 |
+
*/
|
| 231 |
+
QUALIFIERS unsigned int curand_discrete(curandStateSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 232 |
+
{
|
| 233 |
+
return curand__discrete(state, discrete_distribution);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
/*
|
| 237 |
+
* \brief Return a discrete distributed unsigned int from a scrambled Sobol64 generator.
|
| 238 |
+
*
|
| 239 |
+
* Return a single discrete distributed unsigned int derived from a
|
| 240 |
+
* distribution defined by \p discrete_distribution from the scrambled Sobol64 generator in \p state,
|
| 241 |
+
* increment position of generator by one.
|
| 242 |
+
*
|
| 243 |
+
* \param state - Pointer to state to update
|
| 244 |
+
* \param discrete_distribution - ancillary structure for discrete distribution
|
| 245 |
+
*
|
| 246 |
+
* \return unsigned int distributed by distribution defined by \p discrete_distribution.
|
| 247 |
+
*/
|
| 248 |
+
QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
|
| 249 |
+
{
|
| 250 |
+
return curand__discrete(state, discrete_distribution);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
#endif // !defined(CURAND_DISCRETE_H_)
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h
ADDED
|
@@ -0,0 +1,1677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_KERNEL_H_)
|
| 52 |
+
#define CURAND_KERNEL_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#if !defined(QUALIFIERS)
|
| 61 |
+
#define QUALIFIERS static __forceinline__ __device__
|
| 62 |
+
#endif
|
| 63 |
+
|
| 64 |
+
/* To prevent unused parameter warnings */
|
| 65 |
+
#if !defined(GCC_UNUSED_PARAMETER)
|
| 66 |
+
#if defined(__GNUC__)
|
| 67 |
+
#define GCC_UNUSED_PARAMETER __attribute__((unused))
|
| 68 |
+
#else
|
| 69 |
+
#define GCC_UNUSED_PARAMETER
|
| 70 |
+
#endif /* defined(__GNUC__) */
|
| 71 |
+
#endif /* !defined(GCC_UNUSED_PARAMETER) */
|
| 72 |
+
|
| 73 |
+
#include <nv/target>
|
| 74 |
+
|
| 75 |
+
#ifdef __CUDACC_RTC__
|
| 76 |
+
#define CURAND_DETAIL_USE_CUDA_STL
|
| 77 |
+
#endif
|
| 78 |
+
|
| 79 |
+
#if __cplusplus >= 201103L
|
| 80 |
+
# ifdef CURAND_DETAIL_USE_CUDA_STL
|
| 81 |
+
# define CURAND_STD cuda::std
|
| 82 |
+
# include <cuda/std/type_traits>
|
| 83 |
+
# else
|
| 84 |
+
# define CURAND_STD std
|
| 85 |
+
# include <type_traits>
|
| 86 |
+
# endif // CURAND_DETAIL_USE_CUDA_STL
|
| 87 |
+
#else
|
| 88 |
+
// To support C++03 compilation
|
| 89 |
+
# define CURAND_STD curand_detail
|
| 90 |
+
namespace curand_detail {
|
| 91 |
+
template<bool B, class T = void>
|
| 92 |
+
struct enable_if {};
|
| 93 |
+
|
| 94 |
+
template<class T>
|
| 95 |
+
struct enable_if<true, T> { typedef T type; };
|
| 96 |
+
|
| 97 |
+
template<class T, class U>
|
| 98 |
+
struct is_same { static const bool value = false; };
|
| 99 |
+
|
| 100 |
+
template<class T>
|
| 101 |
+
struct is_same<T, T> { static const bool value = true; };
|
| 102 |
+
} // namespace curand_detail
|
| 103 |
+
#endif // __cplusplus >= 201103L
|
| 104 |
+
|
| 105 |
+
#ifndef __CUDACC_RTC__
|
| 106 |
+
#include <math.h>
|
| 107 |
+
#endif // __CUDACC_RTC__
|
| 108 |
+
|
| 109 |
+
#include "curand.h"
|
| 110 |
+
#include "curand_discrete.h"
|
| 111 |
+
#include "curand_precalc.h"
|
| 112 |
+
#include "curand_mrg32k3a.h"
|
| 113 |
+
#include "curand_mtgp32_kernel.h"
|
| 114 |
+
#include "curand_philox4x32_x.h"
|
| 115 |
+
#include "curand_globals.h"
|
| 116 |
+
|
| 117 |
+
/* Test RNG */
|
| 118 |
+
/* This generator uses the formula:
|
| 119 |
+
x_n = x_(n-1) + 1 mod 2^32
|
| 120 |
+
x_0 = (unsigned int)seed * 3
|
| 121 |
+
Subsequences are spaced 31337 steps apart.
|
| 122 |
+
*/
|
| 123 |
+
struct curandStateTest {
|
| 124 |
+
unsigned int v;
|
| 125 |
+
};
|
| 126 |
+
|
| 127 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 128 |
+
typedef struct curandStateTest curandStateTest_t;
|
| 129 |
+
/** \endcond */
|
| 130 |
+
|
| 131 |
+
/* XORSHIFT FAMILY RNGs */
|
| 132 |
+
/* These generators are a family proposed by Marsaglia. They keep state
|
| 133 |
+
in 32 bit chunks, then use repeated shift and xor operations to scramble
|
| 134 |
+
the bits. The following generators are a combination of a simple Weyl
|
| 135 |
+
generator with an N variable XORSHIFT generator.
|
| 136 |
+
*/
|
| 137 |
+
|
| 138 |
+
/* XORSHIFT RNG */
|
| 139 |
+
/* This generator uses the xorwow formula of
|
| 140 |
+
www.jstatsoft.org/v08/i14/paper page 5
|
| 141 |
+
Has period 2^192 - 2^32.
|
| 142 |
+
*/
|
| 143 |
+
/**
|
| 144 |
+
* CURAND XORWOW state
|
| 145 |
+
*/
|
| 146 |
+
struct curandStateXORWOW;
|
| 147 |
+
|
| 148 |
+
/*
|
| 149 |
+
* Implementation details not in reference documentation */
|
| 150 |
+
struct curandStateXORWOW {
|
| 151 |
+
unsigned int d, v[5];
|
| 152 |
+
int boxmuller_flag;
|
| 153 |
+
int boxmuller_flag_double;
|
| 154 |
+
float boxmuller_extra;
|
| 155 |
+
double boxmuller_extra_double;
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
/*
|
| 159 |
+
* CURAND XORWOW state
|
| 160 |
+
*/
|
| 161 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 162 |
+
typedef struct curandStateXORWOW curandStateXORWOW_t;
|
| 163 |
+
|
| 164 |
+
#define EXTRA_FLAG_NORMAL 0x00000001
|
| 165 |
+
#define EXTRA_FLAG_LOG_NORMAL 0x00000002
|
| 166 |
+
/** \endcond */
|
| 167 |
+
|
| 168 |
+
/* Combined Multiple Recursive Generators */
|
| 169 |
+
/* These generators are a family proposed by L'Ecuyer. They keep state
|
| 170 |
+
in sets of doubles, then use repeated modular arithmetic multiply operations
|
| 171 |
+
to scramble the bits in each set, and combine the result.
|
| 172 |
+
*/
|
| 173 |
+
|
| 174 |
+
/* MRG32k3a RNG */
|
| 175 |
+
/* This generator uses the MRG32k3A formula of
|
| 176 |
+
http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf
|
| 177 |
+
Has period 2^191.
|
| 178 |
+
*/
|
| 179 |
+
|
| 180 |
+
/* moduli for the recursions */
|
| 181 |
+
/** \cond UNHIDE_DEFINES */
|
| 182 |
+
#define MRG32K3A_MOD1 4294967087.
|
| 183 |
+
#define MRG32K3A_MOD2 4294944443.
|
| 184 |
+
|
| 185 |
+
/* Constants used in generation */
|
| 186 |
+
|
| 187 |
+
#define MRG32K3A_A12 1403580.
|
| 188 |
+
#define MRG32K3A_A13N 810728.
|
| 189 |
+
#define MRG32K3A_A21 527612.
|
| 190 |
+
#define MRG32K3A_A23N 1370589.
|
| 191 |
+
#define MRG32K3A_NORM (2.3283065498378288e-10)
|
| 192 |
+
//
|
| 193 |
+
// #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1))
|
| 194 |
+
// above constant, used verbatim, rounds differently on some host systems.
|
| 195 |
+
#define MRG32K3A_BITS_NORM 1.000000048662
|
| 196 |
+
|
| 197 |
+
/** \endcond */
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
/**
|
| 203 |
+
* CURAND MRG32K3A state
|
| 204 |
+
*/
|
| 205 |
+
struct curandStateMRG32k3a;
|
| 206 |
+
|
| 207 |
+
/* Implementation details not in reference documentation */
|
| 208 |
+
struct curandStateMRG32k3a {
|
| 209 |
+
unsigned int s1[3];
|
| 210 |
+
unsigned int s2[3];
|
| 211 |
+
int boxmuller_flag;
|
| 212 |
+
int boxmuller_flag_double;
|
| 213 |
+
float boxmuller_extra;
|
| 214 |
+
double boxmuller_extra_double;
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
/*
|
| 218 |
+
* CURAND MRG32K3A state
|
| 219 |
+
*/
|
| 220 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 221 |
+
typedef struct curandStateMRG32k3a curandStateMRG32k3a_t;
|
| 222 |
+
/** \endcond */
|
| 223 |
+
|
| 224 |
+
/* SOBOL QRNG */
|
| 225 |
+
/**
|
| 226 |
+
* CURAND Sobol32 state
|
| 227 |
+
*/
|
| 228 |
+
struct curandStateSobol32;
|
| 229 |
+
|
| 230 |
+
/* Implementation details not in reference documentation */
|
| 231 |
+
struct curandStateSobol32 {
|
| 232 |
+
unsigned int i, x, c;
|
| 233 |
+
unsigned int direction_vectors[32];
|
| 234 |
+
};
|
| 235 |
+
|
| 236 |
+
/*
|
| 237 |
+
* CURAND Sobol32 state
|
| 238 |
+
*/
|
| 239 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 240 |
+
typedef struct curandStateSobol32 curandStateSobol32_t;
|
| 241 |
+
/** \endcond */
|
| 242 |
+
|
| 243 |
+
/**
|
| 244 |
+
* CURAND Scrambled Sobol32 state
|
| 245 |
+
*/
|
| 246 |
+
struct curandStateScrambledSobol32;
|
| 247 |
+
|
| 248 |
+
/* Implementation details not in reference documentation */
|
| 249 |
+
struct curandStateScrambledSobol32 {
|
| 250 |
+
unsigned int i, x, c;
|
| 251 |
+
unsigned int direction_vectors[32];
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
/*
|
| 255 |
+
* CURAND Scrambled Sobol32 state
|
| 256 |
+
*/
|
| 257 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 258 |
+
typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t;
|
| 259 |
+
/** \endcond */
|
| 260 |
+
|
| 261 |
+
/**
|
| 262 |
+
* CURAND Sobol64 state
|
| 263 |
+
*/
|
| 264 |
+
struct curandStateSobol64;
|
| 265 |
+
|
| 266 |
+
/* Implementation details not in reference documentation */
|
| 267 |
+
struct curandStateSobol64 {
|
| 268 |
+
unsigned long long i, x, c;
|
| 269 |
+
unsigned long long direction_vectors[64];
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
/*
|
| 273 |
+
* CURAND Sobol64 state
|
| 274 |
+
*/
|
| 275 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 276 |
+
typedef struct curandStateSobol64 curandStateSobol64_t;
|
| 277 |
+
/** \endcond */
|
| 278 |
+
|
| 279 |
+
/**
|
| 280 |
+
* CURAND Scrambled Sobol64 state
|
| 281 |
+
*/
|
| 282 |
+
struct curandStateScrambledSobol64;
|
| 283 |
+
|
| 284 |
+
/* Implementation details not in reference documentation */
|
| 285 |
+
struct curandStateScrambledSobol64 {
|
| 286 |
+
unsigned long long i, x, c;
|
| 287 |
+
unsigned long long direction_vectors[64];
|
| 288 |
+
};
|
| 289 |
+
|
| 290 |
+
/*
|
| 291 |
+
* CURAND Scrambled Sobol64 state
|
| 292 |
+
*/
|
| 293 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 294 |
+
typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t;
|
| 295 |
+
/** \endcond */
|
| 296 |
+
|
| 297 |
+
/*
|
| 298 |
+
* Default RNG
|
| 299 |
+
*/
|
| 300 |
+
/** \cond UNHIDE_TYPEDEFS */
|
| 301 |
+
typedef struct curandStateXORWOW curandState_t;
|
| 302 |
+
typedef struct curandStateXORWOW curandState;
|
| 303 |
+
/** \endcond */
|
| 304 |
+
|
| 305 |
+
/****************************************************************************/
|
| 306 |
+
/* Utility functions needed by RNGs */
|
| 307 |
+
/****************************************************************************/
|
| 308 |
+
/** \cond UNHIDE_UTILITIES */
|
| 309 |
+
/*
|
| 310 |
+
multiply vector by matrix, store in result
|
| 311 |
+
matrix is n x n, measured in 32 bit units
|
| 312 |
+
matrix is stored in row major order
|
| 313 |
+
vector and result cannot be same pointer
|
| 314 |
+
*/
|
| 315 |
+
template<int N>
|
| 316 |
+
QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix)
|
| 317 |
+
{
|
| 318 |
+
unsigned int result[N] = { 0 };
|
| 319 |
+
for(int i = 0; i < N; i++) {
|
| 320 |
+
#ifdef __CUDA_ARCH__
|
| 321 |
+
#pragma unroll 16
|
| 322 |
+
#endif
|
| 323 |
+
for(int j = 0; j < 32; j++) {
|
| 324 |
+
if(vector[i] & (1 << j)) {
|
| 325 |
+
for(int k = 0; k < N; k++) {
|
| 326 |
+
result[k] ^= matrix[N * (i * 32 + j) + k];
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
for(int i = 0; i < N; i++) {
|
| 332 |
+
vector[i] = result[i];
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix,
|
| 337 |
+
unsigned int *result, int n)
|
| 338 |
+
{
|
| 339 |
+
for(int i = 0; i < n; i++) {
|
| 340 |
+
result[i] = 0;
|
| 341 |
+
}
|
| 342 |
+
for(int i = 0; i < n; i++) {
|
| 343 |
+
for(int j = 0; j < 32; j++) {
|
| 344 |
+
if(vector[i] & (1 << j)) {
|
| 345 |
+
for(int k = 0; k < n; k++) {
|
| 346 |
+
result[k] ^= matrix[n * (i * 32 + j) + k];
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
/* generate identity matrix */
|
| 354 |
+
QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n)
|
| 355 |
+
{
|
| 356 |
+
int r;
|
| 357 |
+
for(int i = 0; i < n * 32; i++) {
|
| 358 |
+
for(int j = 0; j < n; j++) {
|
| 359 |
+
r = i & 31;
|
| 360 |
+
if(i / 32 == j) {
|
| 361 |
+
matrix[i * n + j] = (1 << r);
|
| 362 |
+
} else {
|
| 363 |
+
matrix[i * n + j] = 0;
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
/* multiply matrixA by matrixB, store back in matrixA
|
| 370 |
+
matrixA and matrixB must not be same matrix */
|
| 371 |
+
QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n)
|
| 372 |
+
{
|
| 373 |
+
unsigned int result[MAX_XOR_N];
|
| 374 |
+
for(int i = 0; i < n * 32; i++) {
|
| 375 |
+
__curand_matvec(matrixA + i * n, matrixB, result, n);
|
| 376 |
+
for(int j = 0; j < n; j++) {
|
| 377 |
+
matrixA[i * n + j] = result[j];
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
/* copy vectorA to vector */
|
| 383 |
+
QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n)
|
| 384 |
+
{
|
| 385 |
+
for(int i = 0; i < n; i++) {
|
| 386 |
+
vector[i] = vectorA[i];
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/* copy matrixA to matrix */
|
| 391 |
+
QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n)
|
| 392 |
+
{
|
| 393 |
+
for(int i = 0; i < n * n * 32; i++) {
|
| 394 |
+
matrix[i] = matrixA[i];
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
/* compute matrixA to power p, store result in matrix */
|
| 399 |
+
QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA,
|
| 400 |
+
unsigned long long p, int n)
|
| 401 |
+
{
|
| 402 |
+
unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32];
|
| 403 |
+
unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32];
|
| 404 |
+
__curand_matidentity(matrix, n);
|
| 405 |
+
__curand_matcopy(matrixR, matrixA, n);
|
| 406 |
+
while(p) {
|
| 407 |
+
if(p & 1) {
|
| 408 |
+
__curand_matmat(matrix, matrixR, n);
|
| 409 |
+
}
|
| 410 |
+
__curand_matcopy(matrixS, matrixR, n);
|
| 411 |
+
__curand_matmat(matrixR, matrixS, n);
|
| 412 |
+
p >>= 1;
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
/****************************************************************************/
|
| 417 |
+
/* Utility functions needed by MRG32k3a RNG */
|
| 418 |
+
/* Matrix operations modulo some integer less than 2**32, done in */
|
| 419 |
+
/* double precision floating point, with care not to overflow 53 bits */
|
| 420 |
+
/****************************************************************************/
|
| 421 |
+
|
| 422 |
+
/* return i mod m. */
|
| 423 |
+
/* assumes i and m are integers represented accurately in doubles */
|
| 424 |
+
|
| 425 |
+
QUALIFIERS double curand_MRGmod(double i, double m)
|
| 426 |
+
{
|
| 427 |
+
double quo;
|
| 428 |
+
double rem;
|
| 429 |
+
quo = floor(i/m);
|
| 430 |
+
rem = i - (quo*m);
|
| 431 |
+
if (rem < 0.0) rem += m;
|
| 432 |
+
return rem;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
/* Multiplication modulo m. Inputs i and j less than 2**32 */
|
| 436 |
+
/* Ensure intermediate results do not exceed 2**53 */
|
| 437 |
+
|
| 438 |
+
QUALIFIERS double curand_MRGmodMul(double i, double j, double m)
|
| 439 |
+
{
|
| 440 |
+
double tempHi;
|
| 441 |
+
double tempLo;
|
| 442 |
+
|
| 443 |
+
tempHi = floor(i/131072.0);
|
| 444 |
+
tempLo = i - (tempHi*131072.0);
|
| 445 |
+
tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m);
|
| 446 |
+
|
| 447 |
+
if (tempLo < 0.0) tempLo += m;
|
| 448 |
+
return tempLo;
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
/* multiply 3 by 3 matrices of doubles, modulo m */
|
| 452 |
+
|
| 453 |
+
QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m)
|
| 454 |
+
{
|
| 455 |
+
int i,j;
|
| 456 |
+
double temp[3][3];
|
| 457 |
+
for (i=0; i<3; i++){
|
| 458 |
+
for (j=0; j<3; j++){
|
| 459 |
+
temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) +
|
| 460 |
+
curand_MRGmodMul(i1[i][1], i2[1][j], m) +
|
| 461 |
+
curand_MRGmodMul(i1[i][2], i2[2][j], m));
|
| 462 |
+
temp[i][j] = curand_MRGmod( temp[i][j], m );
|
| 463 |
+
}
|
| 464 |
+
}
|
| 465 |
+
for (i=0; i<3; i++){
|
| 466 |
+
for (j=0; j<3; j++){
|
| 467 |
+
o[i][j] = (unsigned int)temp[i][j];
|
| 468 |
+
}
|
| 469 |
+
}
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
/* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */
|
| 473 |
+
|
| 474 |
+
QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m)
|
| 475 |
+
{
|
| 476 |
+
int k;
|
| 477 |
+
double t[3];
|
| 478 |
+
for (k = 0; k < 3; k++) {
|
| 479 |
+
t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) +
|
| 480 |
+
curand_MRGmodMul(i[k][1], v[1], m) +
|
| 481 |
+
curand_MRGmodMul(i[k][2], v[2], m) );
|
| 482 |
+
t[k] = curand_MRGmod( t[k], m );
|
| 483 |
+
}
|
| 484 |
+
for (k = 0; k < 3; k++) {
|
| 485 |
+
v[k] = (unsigned int)t[k];
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
/* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */
|
| 491 |
+
/* input is index zero of an array of 3 by 3 matrices m, */
|
| 492 |
+
/* each m = m[0]**(2**index) */
|
| 493 |
+
|
| 494 |
+
QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow )
|
| 495 |
+
{
|
| 496 |
+
int i,j;
|
| 497 |
+
for ( i = 0; i < 3; i++ ) {
|
| 498 |
+
for ( j = 0; j < 3; j++ ) {
|
| 499 |
+
o[i][j] = 0;
|
| 500 |
+
if ( i == j ) o[i][j] = 1;
|
| 501 |
+
}
|
| 502 |
+
}
|
| 503 |
+
i = 0;
|
| 504 |
+
curand_MRGmatVecMul3x3(o,o[0],m);
|
| 505 |
+
while (pow) {
|
| 506 |
+
if ( pow & 1ll ) {
|
| 507 |
+
curand_MRGmatMul3x3(in[i], o, o, m);
|
| 508 |
+
}
|
| 509 |
+
i++;
|
| 510 |
+
pow >>= 1;
|
| 511 |
+
}
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
/* raise a 3 by 3 matrix of doubles to the power */
|
| 515 |
+
/* 2 to the power (pow modulo 191), modulo m */
|
| 516 |
+
|
| 517 |
+
QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow )
|
| 518 |
+
{
|
| 519 |
+
unsigned int temp[3][3];
|
| 520 |
+
int i,j;
|
| 521 |
+
pow = pow % 191;
|
| 522 |
+
for ( i = 0; i < 3; i++ ) {
|
| 523 |
+
for ( j = 0; j < 3; j++ ) {
|
| 524 |
+
temp[i][j] = (unsigned int)in[i][j];
|
| 525 |
+
}
|
| 526 |
+
}
|
| 527 |
+
while (pow) {
|
| 528 |
+
curand_MRGmatMul3x3(temp, temp, temp, m);
|
| 529 |
+
pow--;
|
| 530 |
+
}
|
| 531 |
+
for ( i = 0; i < 3; i++ ) {
|
| 532 |
+
for ( j = 0; j < 3; j++ ) {
|
| 533 |
+
o[i][j] = temp[i][j];
|
| 534 |
+
}
|
| 535 |
+
}
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
/** \endcond */
|
| 539 |
+
|
| 540 |
+
/****************************************************************************/
|
| 541 |
+
/* Kernel implementations of RNGs */
|
| 542 |
+
/****************************************************************************/
|
| 543 |
+
|
| 544 |
+
/* Test RNG */
|
| 545 |
+
|
| 546 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 547 |
+
unsigned long long subsequence,
|
| 548 |
+
unsigned long long offset,
|
| 549 |
+
curandStateTest_t *state)
|
| 550 |
+
{
|
| 551 |
+
state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \
|
| 552 |
+
(unsigned int)offset;
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
QUALIFIERS unsigned int curand(curandStateTest_t *state)
|
| 557 |
+
{
|
| 558 |
+
unsigned int r = state->v++;
|
| 559 |
+
return r;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state)
|
| 563 |
+
{
|
| 564 |
+
state->v += (unsigned int)n;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
/* XORWOW RNG */
|
| 568 |
+
|
| 569 |
+
template <typename T, int n>
|
| 570 |
+
QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[])
|
| 571 |
+
{
|
| 572 |
+
T state;
|
| 573 |
+
// Generate matrix that advances one step
|
| 574 |
+
// matrix has n * n * 32 32-bit elements
|
| 575 |
+
// solve for matrix by stepping single bit states
|
| 576 |
+
for(int i = 0; i < 32 * n; i++) {
|
| 577 |
+
state.d = 0;
|
| 578 |
+
for(int j = 0; j < n; j++) {
|
| 579 |
+
state.v[j] = 0;
|
| 580 |
+
}
|
| 581 |
+
state.v[i / 32] = (1 << (i & 31));
|
| 582 |
+
curand(&state);
|
| 583 |
+
for(int j = 0; j < n; j++) {
|
| 584 |
+
matrix[i * n + j] = state.v[j];
|
| 585 |
+
}
|
| 586 |
+
}
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
template <typename T, int n>
|
| 590 |
+
QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch)
|
| 591 |
+
{
|
| 592 |
+
// unsigned int matrix[n * n * 32];
|
| 593 |
+
unsigned int *matrix = scratch;
|
| 594 |
+
// unsigned int matrixA[n * n * 32];
|
| 595 |
+
unsigned int *matrixA = scratch + (n * n * 32);
|
| 596 |
+
// unsigned int vector[n];
|
| 597 |
+
unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
|
| 598 |
+
// unsigned int result[n];
|
| 599 |
+
unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
|
| 600 |
+
unsigned long long p = x;
|
| 601 |
+
for(int i = 0; i < n; i++) {
|
| 602 |
+
vector[i] = state->v[i];
|
| 603 |
+
}
|
| 604 |
+
int matrix_num = 0;
|
| 605 |
+
while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) {
|
| 606 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 607 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 608 |
+
__curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n);
|
| 609 |
+
,
|
| 610 |
+
__curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n);
|
| 611 |
+
)
|
| 612 |
+
__curand_veccopy(vector, result, n);
|
| 613 |
+
}
|
| 614 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 615 |
+
matrix_num++;
|
| 616 |
+
}
|
| 617 |
+
if(p) {
|
| 618 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 619 |
+
__curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 620 |
+
__curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 621 |
+
,
|
| 622 |
+
__curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 623 |
+
__curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 624 |
+
)
|
| 625 |
+
}
|
| 626 |
+
while(p) {
|
| 627 |
+
for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
|
| 628 |
+
__curand_matvec(vector, matrixA, result, n);
|
| 629 |
+
__curand_veccopy(vector, result, n);
|
| 630 |
+
}
|
| 631 |
+
p >>= SKIPAHEAD_BLOCKSIZE;
|
| 632 |
+
if(p) {
|
| 633 |
+
for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
|
| 634 |
+
__curand_matmat(matrix, matrixA, n);
|
| 635 |
+
__curand_matcopy(matrixA, matrix, n);
|
| 636 |
+
}
|
| 637 |
+
}
|
| 638 |
+
}
|
| 639 |
+
for(int i = 0; i < n; i++) {
|
| 640 |
+
state->v[i] = vector[i];
|
| 641 |
+
}
|
| 642 |
+
state->d += 362437 * (unsigned int)x;
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
template <typename T, int n>
|
| 646 |
+
QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch)
|
| 647 |
+
{
|
| 648 |
+
// unsigned int matrix[n * n * 32];
|
| 649 |
+
unsigned int *matrix = scratch;
|
| 650 |
+
// unsigned int matrixA[n * n * 32];
|
| 651 |
+
unsigned int *matrixA = scratch + (n * n * 32);
|
| 652 |
+
// unsigned int vector[n];
|
| 653 |
+
unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
|
| 654 |
+
// unsigned int result[n];
|
| 655 |
+
unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
|
| 656 |
+
unsigned long long p = x;
|
| 657 |
+
for(int i = 0; i < n; i++) {
|
| 658 |
+
vector[i] = state->v[i];
|
| 659 |
+
}
|
| 660 |
+
int matrix_num = 0;
|
| 661 |
+
while(p && matrix_num < PRECALC_NUM_MATRICES - 1) {
|
| 662 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 663 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 664 |
+
__curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n);
|
| 665 |
+
,
|
| 666 |
+
__curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n);
|
| 667 |
+
)
|
| 668 |
+
__curand_veccopy(vector, result, n);
|
| 669 |
+
}
|
| 670 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 671 |
+
matrix_num++;
|
| 672 |
+
}
|
| 673 |
+
if(p) {
|
| 674 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 675 |
+
__curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 676 |
+
__curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
|
| 677 |
+
,
|
| 678 |
+
__curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 679 |
+
__curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
|
| 680 |
+
)
|
| 681 |
+
}
|
| 682 |
+
while(p) {
|
| 683 |
+
for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
|
| 684 |
+
__curand_matvec(vector, matrixA, result, n);
|
| 685 |
+
__curand_veccopy(vector, result, n);
|
| 686 |
+
}
|
| 687 |
+
p >>= SKIPAHEAD_BLOCKSIZE;
|
| 688 |
+
if(p) {
|
| 689 |
+
for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
|
| 690 |
+
__curand_matmat(matrix, matrixA, n);
|
| 691 |
+
__curand_matcopy(matrixA, matrix, n);
|
| 692 |
+
}
|
| 693 |
+
}
|
| 694 |
+
}
|
| 695 |
+
for(int i = 0; i < n; i++) {
|
| 696 |
+
state->v[i] = vector[i];
|
| 697 |
+
}
|
| 698 |
+
/* No update of state->d needed, guaranteed to be a multiple of 2^32 */
|
| 699 |
+
}
|
| 700 |
+
|
| 701 |
+
template <typename T, int N>
|
| 702 |
+
QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state)
|
| 703 |
+
{
|
| 704 |
+
unsigned long long p = x;
|
| 705 |
+
int matrix_num = 0;
|
| 706 |
+
while(p) {
|
| 707 |
+
for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
|
| 708 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 709 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix[matrix_num]);
|
| 710 |
+
,
|
| 711 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix_host[matrix_num]);
|
| 712 |
+
)
|
| 713 |
+
}
|
| 714 |
+
p >>= PRECALC_BLOCK_SIZE;
|
| 715 |
+
matrix_num++;
|
| 716 |
+
}
|
| 717 |
+
state->d += 362437 * (unsigned int)x;
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
template <typename T, int N>
|
| 721 |
+
QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state)
|
| 722 |
+
{
|
| 723 |
+
int matrix_num = 0;
|
| 724 |
+
while(x) {
|
| 725 |
+
for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) {
|
| 726 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 727 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix[matrix_num]);
|
| 728 |
+
,
|
| 729 |
+
__curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix_host[matrix_num]);
|
| 730 |
+
)
|
| 731 |
+
}
|
| 732 |
+
x >>= PRECALC_BLOCK_SIZE;
|
| 733 |
+
matrix_num++;
|
| 734 |
+
}
|
| 735 |
+
/* No update of state->d needed, guaranteed to be a multiple of 2^32 */
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
/**
|
| 739 |
+
* \brief Update XORWOW state to skip \p n elements.
|
| 740 |
+
*
|
| 741 |
+
* Update the XORWOW state in \p state to skip ahead \p n elements.
|
| 742 |
+
*
|
| 743 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 744 |
+
* will take more time to complete.
|
| 745 |
+
*
|
| 746 |
+
* \param n - Number of elements to skip
|
| 747 |
+
* \param state - Pointer to state to update
|
| 748 |
+
*/
|
| 749 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state)
|
| 750 |
+
{
|
| 751 |
+
_skipahead_inplace<curandStateXORWOW_t, 5>(n, state);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
/**
|
| 755 |
+
* \brief Update XORWOW state to skip ahead \p n subsequences.
|
| 756 |
+
*
|
| 757 |
+
* Update the XORWOW state in \p state to skip ahead \p n subsequences. Each
|
| 758 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 759 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
|
| 760 |
+
*
|
| 761 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 762 |
+
* will take more time to complete.
|
| 763 |
+
*
|
| 764 |
+
* \param n - Number of subsequences to skip
|
| 765 |
+
* \param state - Pointer to state to update
|
| 766 |
+
*/
|
| 767 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state)
|
| 768 |
+
{
|
| 769 |
+
_skipahead_sequence_inplace<curandStateXORWOW_t, 5>(n, state);
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
QUALIFIERS void _curand_init_scratch(unsigned long long seed,
|
| 773 |
+
unsigned long long subsequence,
|
| 774 |
+
unsigned long long offset,
|
| 775 |
+
curandStateXORWOW_t *state,
|
| 776 |
+
unsigned int *scratch)
|
| 777 |
+
{
|
| 778 |
+
// Break up seed, apply salt
|
| 779 |
+
// Constants are arbitrary nonzero values
|
| 780 |
+
unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
|
| 781 |
+
unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
|
| 782 |
+
// Simple multiplication to mix up bits
|
| 783 |
+
// Constants are arbitrary odd values
|
| 784 |
+
unsigned int t0 = 1099087573UL * s0;
|
| 785 |
+
unsigned int t1 = 2591861531UL * s1;
|
| 786 |
+
state->d = 6615241 + t1 + t0;
|
| 787 |
+
state->v[0] = 123456789UL + t0;
|
| 788 |
+
state->v[1] = 362436069UL ^ t0;
|
| 789 |
+
state->v[2] = 521288629UL + t1;
|
| 790 |
+
state->v[3] = 88675123UL ^ t1;
|
| 791 |
+
state->v[4] = 5783321UL + t0;
|
| 792 |
+
_skipahead_sequence_scratch<curandStateXORWOW_t, 5>(subsequence, state, scratch);
|
| 793 |
+
_skipahead_scratch<curandStateXORWOW_t, 5>(offset, state, scratch);
|
| 794 |
+
state->boxmuller_flag = 0;
|
| 795 |
+
state->boxmuller_flag_double = 0;
|
| 796 |
+
state->boxmuller_extra = 0.f;
|
| 797 |
+
state->boxmuller_extra_double = 0.;
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
+
QUALIFIERS void _curand_init_inplace(unsigned long long seed,
|
| 801 |
+
unsigned long long subsequence,
|
| 802 |
+
unsigned long long offset,
|
| 803 |
+
curandStateXORWOW_t *state)
|
| 804 |
+
{
|
| 805 |
+
// Break up seed, apply salt
|
| 806 |
+
// Constants are arbitrary nonzero values
|
| 807 |
+
unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
|
| 808 |
+
unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
|
| 809 |
+
// Simple multiplication to mix up bits
|
| 810 |
+
// Constants are arbitrary odd values
|
| 811 |
+
unsigned int t0 = 1099087573UL * s0;
|
| 812 |
+
unsigned int t1 = 2591861531UL * s1;
|
| 813 |
+
state->d = 6615241 + t1 + t0;
|
| 814 |
+
state->v[0] = 123456789UL + t0;
|
| 815 |
+
state->v[1] = 362436069UL ^ t0;
|
| 816 |
+
state->v[2] = 521288629UL + t1;
|
| 817 |
+
state->v[3] = 88675123UL ^ t1;
|
| 818 |
+
state->v[4] = 5783321UL + t0;
|
| 819 |
+
_skipahead_sequence_inplace<curandStateXORWOW_t, 5>(subsequence, state);
|
| 820 |
+
_skipahead_inplace<curandStateXORWOW_t, 5>(offset, state);
|
| 821 |
+
state->boxmuller_flag = 0;
|
| 822 |
+
state->boxmuller_flag_double = 0;
|
| 823 |
+
state->boxmuller_extra = 0.f;
|
| 824 |
+
state->boxmuller_extra_double = 0.;
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
/**
|
| 828 |
+
* \brief Initialize XORWOW state.
|
| 829 |
+
*
|
| 830 |
+
* Initialize XORWOW state in \p state with the given \p seed, \p subsequence,
|
| 831 |
+
* and \p offset.
|
| 832 |
+
*
|
| 833 |
+
* All input values of \p seed, \p subsequence, and \p offset are legal. Large
|
| 834 |
+
* values for \p subsequence and \p offset require more computation and so will
|
| 835 |
+
* take more time to complete.
|
| 836 |
+
*
|
| 837 |
+
* A value of 0 for \p seed sets the state to the values of the original
|
| 838 |
+
* published version of the \p xorwow algorithm.
|
| 839 |
+
*
|
| 840 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 841 |
+
* \param subsequence - Subsequence to start at
|
| 842 |
+
* \param offset - Absolute offset into sequence
|
| 843 |
+
* \param state - Pointer to state to initialize
|
| 844 |
+
*/
|
| 845 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 846 |
+
unsigned long long subsequence,
|
| 847 |
+
unsigned long long offset,
|
| 848 |
+
curandStateXORWOW_t *state)
|
| 849 |
+
{
|
| 850 |
+
_curand_init_inplace(seed, subsequence, offset, state);
|
| 851 |
+
}
|
| 852 |
+
|
| 853 |
+
/**
|
| 854 |
+
* \brief Return 32-bits of pseudorandomness from an XORWOW generator.
|
| 855 |
+
*
|
| 856 |
+
* Return 32-bits of pseudorandomness from the XORWOW generator in \p state,
|
| 857 |
+
* increment position of generator by one.
|
| 858 |
+
*
|
| 859 |
+
* \param state - Pointer to state to update
|
| 860 |
+
*
|
| 861 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 862 |
+
*/
|
| 863 |
+
QUALIFIERS unsigned int curand(curandStateXORWOW_t *state)
|
| 864 |
+
{
|
| 865 |
+
unsigned int t;
|
| 866 |
+
t = (state->v[0] ^ (state->v[0] >> 2));
|
| 867 |
+
state->v[0] = state->v[1];
|
| 868 |
+
state->v[1] = state->v[2];
|
| 869 |
+
state->v[2] = state->v[3];
|
| 870 |
+
state->v[3] = state->v[4];
|
| 871 |
+
state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1));
|
| 872 |
+
state->d += 362437;
|
| 873 |
+
return state->v[4] + state->d;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
/**
|
| 878 |
+
* \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator.
|
| 879 |
+
*
|
| 880 |
+
* Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state,
|
| 881 |
+
* increment position of generator by one.
|
| 882 |
+
*
|
| 883 |
+
* \param state - Pointer to state to update
|
| 884 |
+
*
|
| 885 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 886 |
+
*/
|
| 887 |
+
|
| 888 |
+
QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
|
| 889 |
+
{
|
| 890 |
+
// Maintain the invariant: output[STATE] is always "good" and
|
| 891 |
+
// is the next value to be returned by curand.
|
| 892 |
+
unsigned int ret;
|
| 893 |
+
switch(state->STATE++){
|
| 894 |
+
default:
|
| 895 |
+
ret = state->output.x;
|
| 896 |
+
break;
|
| 897 |
+
case 1:
|
| 898 |
+
ret = state->output.y;
|
| 899 |
+
break;
|
| 900 |
+
case 2:
|
| 901 |
+
ret = state->output.z;
|
| 902 |
+
break;
|
| 903 |
+
case 3:
|
| 904 |
+
ret = state->output.w;
|
| 905 |
+
break;
|
| 906 |
+
}
|
| 907 |
+
if(state->STATE == 4){
|
| 908 |
+
Philox_State_Incr(state);
|
| 909 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 910 |
+
state->STATE = 0;
|
| 911 |
+
}
|
| 912 |
+
return ret;
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
/**
|
| 916 |
+
* \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator.
|
| 917 |
+
*
|
| 918 |
+
* Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state,
|
| 919 |
+
* increment position of generator by four.
|
| 920 |
+
*
|
| 921 |
+
* \param state - Pointer to state to update
|
| 922 |
+
*
|
| 923 |
+
* \return 128-bits of pseudorandomness as a uint4, all bits valid to use.
|
| 924 |
+
*/
|
| 925 |
+
|
| 926 |
+
QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state)
|
| 927 |
+
{
|
| 928 |
+
uint4 r;
|
| 929 |
+
|
| 930 |
+
uint4 tmp = state->output;
|
| 931 |
+
Philox_State_Incr(state);
|
| 932 |
+
state->output= curand_Philox4x32_10(state->ctr,state->key);
|
| 933 |
+
switch(state->STATE){
|
| 934 |
+
case 0:
|
| 935 |
+
return tmp;
|
| 936 |
+
case 1:
|
| 937 |
+
r.x = tmp.y;
|
| 938 |
+
r.y = tmp.z;
|
| 939 |
+
r.z = tmp.w;
|
| 940 |
+
r.w = state->output.x;
|
| 941 |
+
break;
|
| 942 |
+
case 2:
|
| 943 |
+
r.x = tmp.z;
|
| 944 |
+
r.y = tmp.w;
|
| 945 |
+
r.z = state->output.x;
|
| 946 |
+
r.w = state->output.y;
|
| 947 |
+
break;
|
| 948 |
+
case 3:
|
| 949 |
+
r.x = tmp.w;
|
| 950 |
+
r.y = state->output.x;
|
| 951 |
+
r.z = state->output.y;
|
| 952 |
+
r.w = state->output.z;
|
| 953 |
+
break;
|
| 954 |
+
default:
|
| 955 |
+
// NOT possible but needed to avoid compiler warnings
|
| 956 |
+
return tmp;
|
| 957 |
+
}
|
| 958 |
+
return r;
|
| 959 |
+
}
|
| 960 |
+
|
| 961 |
+
/**
|
| 962 |
+
* \brief Update Philox4_32_10 state to skip \p n elements.
|
| 963 |
+
*
|
| 964 |
+
* Update the Philox4_32_10 state in \p state to skip ahead \p n elements.
|
| 965 |
+
*
|
| 966 |
+
* All values of \p n are valid.
|
| 967 |
+
*
|
| 968 |
+
* \param n - Number of elements to skip
|
| 969 |
+
* \param state - Pointer to state to update
|
| 970 |
+
*/
|
| 971 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state)
|
| 972 |
+
{
|
| 973 |
+
state->STATE += (n & 3);
|
| 974 |
+
n /= 4;
|
| 975 |
+
if( state->STATE > 3 ){
|
| 976 |
+
n += 1;
|
| 977 |
+
state->STATE -= 4;
|
| 978 |
+
}
|
| 979 |
+
Philox_State_Incr(state, n);
|
| 980 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 981 |
+
}
|
| 982 |
+
|
| 983 |
+
/**
|
| 984 |
+
* \brief Update Philox4_32_10 state to skip ahead \p n subsequences.
|
| 985 |
+
*
|
| 986 |
+
* Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each
|
| 987 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 988 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly * n elements.
|
| 989 |
+
*
|
| 990 |
+
* All values of \p n are valid.
|
| 991 |
+
*
|
| 992 |
+
* \param n - Number of subsequences to skip
|
| 993 |
+
* \param state - Pointer to state to update
|
| 994 |
+
*/
|
| 995 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state)
|
| 996 |
+
{
|
| 997 |
+
Philox_State_Incr_hi(state, n);
|
| 998 |
+
state->output = curand_Philox4x32_10(state->ctr,state->key);
|
| 999 |
+
}
|
| 1000 |
+
|
| 1001 |
+
/**
|
| 1002 |
+
* \brief Initialize Philox4_32_10 state.
|
| 1003 |
+
*
|
| 1004 |
+
* Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence,
|
| 1005 |
+
* and \p offset.
|
| 1006 |
+
*
|
| 1007 |
+
* All input values for \p seed, \p subseqence and \p offset are legal. Each of the
|
| 1008 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>64</sup></ph>\endxmlonly possible
|
| 1009 |
+
* values of seed selects an independent sequence of length
|
| 1010 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>130</sup></ph>\endxmlonly.
|
| 1011 |
+
* The first
|
| 1012 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup> * subsequence + offset</ph>\endxmlonly.
|
| 1013 |
+
* values of the sequence are skipped.
|
| 1014 |
+
* I.e., subsequences are of length
|
| 1015 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly.
|
| 1016 |
+
*
|
| 1017 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 1018 |
+
* \param subsequence - Subsequence to start at
|
| 1019 |
+
* \param offset - Absolute offset into subsequence
|
| 1020 |
+
* \param state - Pointer to state to initialize
|
| 1021 |
+
*/
|
| 1022 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 1023 |
+
unsigned long long subsequence,
|
| 1024 |
+
unsigned long long offset,
|
| 1025 |
+
curandStatePhilox4_32_10_t *state)
|
| 1026 |
+
{
|
| 1027 |
+
state->ctr = make_uint4(0, 0, 0, 0);
|
| 1028 |
+
state->key.x = (unsigned int)seed;
|
| 1029 |
+
state->key.y = (unsigned int)(seed>>32);
|
| 1030 |
+
state->STATE = 0;
|
| 1031 |
+
state->boxmuller_flag = 0;
|
| 1032 |
+
state->boxmuller_flag_double = 0;
|
| 1033 |
+
state->boxmuller_extra = 0.f;
|
| 1034 |
+
state->boxmuller_extra_double = 0.;
|
| 1035 |
+
skipahead_sequence(subsequence, state);
|
| 1036 |
+
skipahead(offset, state);
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
|
| 1040 |
+
/* MRG32k3a RNG */
|
| 1041 |
+
|
| 1042 |
+
/* Base generator for MRG32k3a */
|
| 1043 |
+
QUALIFIERS unsigned long long __curand_umad(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b, GCC_UNUSED_PARAMETER unsigned long long c)
|
| 1044 |
+
{
|
| 1045 |
+
unsigned long long r = 0;
|
| 1046 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1047 |
+
asm("mad.wide.u32 %0, %1, %2, %3;"
|
| 1048 |
+
: "=l"(r) : "r"(a), "r"(b), "l"(c));
|
| 1049 |
+
)
|
| 1050 |
+
return r;
|
| 1051 |
+
}
|
| 1052 |
+
QUALIFIERS unsigned long long __curand_umul(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b)
|
| 1053 |
+
{
|
| 1054 |
+
unsigned long long r = 0;
|
| 1055 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1056 |
+
asm("mul.wide.u32 %0, %1, %2;"
|
| 1057 |
+
: "=l"(r) : "r"(a), "r"(b));
|
| 1058 |
+
)
|
| 1059 |
+
return r;
|
| 1060 |
+
}
|
| 1061 |
+
QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state)
|
| 1062 |
+
{
|
| 1063 |
+
NV_IF_TARGET(NV_PROVIDES_SM_61,
|
| 1064 |
+
const unsigned int m1 = 4294967087u;
|
| 1065 |
+
const unsigned int m2 = 4294944443u;
|
| 1066 |
+
const unsigned int m1c = 209u;
|
| 1067 |
+
const unsigned int m2c = 22853u;
|
| 1068 |
+
const unsigned int a12 = 1403580u;
|
| 1069 |
+
const unsigned int a13n = 810728u;
|
| 1070 |
+
const unsigned int a21 = 527612u;
|
| 1071 |
+
const unsigned int a23n = 1370589u;
|
| 1072 |
+
|
| 1073 |
+
unsigned long long p1;
|
| 1074 |
+
unsigned long long p2;
|
| 1075 |
+
const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]);
|
| 1076 |
+
p1 = __curand_umad(a12, state->s1[1], p3);
|
| 1077 |
+
|
| 1078 |
+
// Putting addition inside and changing umul to umad
|
| 1079 |
+
// slowed this function down on GV100
|
| 1080 |
+
p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff);
|
| 1081 |
+
if (p1 >= m1) p1 -= m1;
|
| 1082 |
+
|
| 1083 |
+
state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1;
|
| 1084 |
+
const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]);
|
| 1085 |
+
p2 = __curand_umad(a21, state->s2[2], p4);
|
| 1086 |
+
|
| 1087 |
+
// Putting addition inside and changing umul to umad
|
| 1088 |
+
// slowed this function down on GV100
|
| 1089 |
+
p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
|
| 1090 |
+
p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
|
| 1091 |
+
if (p2 >= m2) p2 -= m2;
|
| 1092 |
+
|
| 1093 |
+
state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2;
|
| 1094 |
+
|
| 1095 |
+
const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2;
|
| 1096 |
+
if(p1 <= p2) return p5 + m1;
|
| 1097 |
+
return p5;
|
| 1098 |
+
)
|
| 1099 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 1100 |
+
/* nj's implementation */
|
| 1101 |
+
const double m1 = 4294967087.;
|
| 1102 |
+
const double m2 = 4294944443.;
|
| 1103 |
+
const double a12 = 1403580.;
|
| 1104 |
+
const double a13n = 810728.;
|
| 1105 |
+
const double a21 = 527612.;
|
| 1106 |
+
const double a23n = 1370589.;
|
| 1107 |
+
|
| 1108 |
+
const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */
|
| 1109 |
+
const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */
|
| 1110 |
+
const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */
|
| 1111 |
+
const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */
|
| 1112 |
+
|
| 1113 |
+
double q;
|
| 1114 |
+
double p1;
|
| 1115 |
+
double p2;
|
| 1116 |
+
p1 = a12 * state->s1[1] - a13n * state->s1[0];
|
| 1117 |
+
q = trunc (fma (p1, rh1, p1 * rl1));
|
| 1118 |
+
p1 -= q * m1;
|
| 1119 |
+
if (p1 < 0.0) p1 += m1;
|
| 1120 |
+
state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1;
|
| 1121 |
+
p2 = a21 * state->s2[2] - a23n * state->s2[0];
|
| 1122 |
+
q = trunc (fma (p2, rh2, p2 * rl2));
|
| 1123 |
+
p2 -= q * m2;
|
| 1124 |
+
if (p2 < 0.0) p2 += m2;
|
| 1125 |
+
state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2;
|
| 1126 |
+
if (p1 <= p2) return (p1 - p2 + m1);
|
| 1127 |
+
else return (p1 - p2);
|
| 1128 |
+
)
|
| 1129 |
+
/* end nj's implementation */
|
| 1130 |
+
double p1;
|
| 1131 |
+
double p2;
|
| 1132 |
+
double r;
|
| 1133 |
+
p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]);
|
| 1134 |
+
p1 = curand_MRGmod(p1, MRG32K3A_MOD1);
|
| 1135 |
+
if (p1 < 0.0) p1 += MRG32K3A_MOD1;
|
| 1136 |
+
state->s1[0] = state->s1[1];
|
| 1137 |
+
state->s1[1] = state->s1[2];
|
| 1138 |
+
state->s1[2] = (unsigned int)p1;
|
| 1139 |
+
p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]);
|
| 1140 |
+
p2 = curand_MRGmod(p2, MRG32K3A_MOD2);
|
| 1141 |
+
if (p2 < 0) p2 += MRG32K3A_MOD2;
|
| 1142 |
+
state->s2[0] = state->s2[1];
|
| 1143 |
+
state->s2[1] = state->s2[2];
|
| 1144 |
+
state->s2[2] = (unsigned int)p2;
|
| 1145 |
+
r = p1 - p2;
|
| 1146 |
+
if (r <= 0) r += MRG32K3A_MOD1;
|
| 1147 |
+
return r;
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
|
| 1151 |
+
/**
|
| 1152 |
+
* \brief Return 32-bits of pseudorandomness from an MRG32k3a generator.
|
| 1153 |
+
*
|
| 1154 |
+
* Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state,
|
| 1155 |
+
* increment position of generator by one.
|
| 1156 |
+
*
|
| 1157 |
+
* \param state - Pointer to state to update
|
| 1158 |
+
*
|
| 1159 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 1160 |
+
*/
|
| 1161 |
+
QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state)
|
| 1162 |
+
{
|
| 1163 |
+
double dRet;
|
| 1164 |
+
dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM;
|
| 1165 |
+
return (unsigned int)dRet;
|
| 1166 |
+
}
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
|
| 1170 |
+
/**
|
| 1171 |
+
* \brief Update MRG32k3a state to skip \p n elements.
|
| 1172 |
+
*
|
| 1173 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n elements.
|
| 1174 |
+
*
|
| 1175 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 1176 |
+
* will take more time to complete.
|
| 1177 |
+
*
|
| 1178 |
+
* \param n - Number of elements to skip
|
| 1179 |
+
* \param state - Pointer to state to update
|
| 1180 |
+
*/
|
| 1181 |
+
QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1182 |
+
{
|
| 1183 |
+
unsigned int t[3][3];
|
| 1184 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1185 |
+
curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n);
|
| 1186 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1187 |
+
curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n);
|
| 1188 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1189 |
+
,
|
| 1190 |
+
curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n);
|
| 1191 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1192 |
+
curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n);
|
| 1193 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1194 |
+
)
|
| 1195 |
+
}
|
| 1196 |
+
|
| 1197 |
+
/**
|
| 1198 |
+
* \brief Update MRG32k3a state to skip ahead \p n subsequences.
|
| 1199 |
+
*
|
| 1200 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each
|
| 1201 |
+
* subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly
|
| 1202 |
+
*
|
| 1203 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>76</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 1204 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
|
| 1205 |
+
*
|
| 1206 |
+
* Valid values of \p n are 0 to \xmlonly<ph outputclass="xmlonly">2<sup>51</sup></ph>\endxmlonly. Note \p n will be masked to 51 bits
|
| 1207 |
+
*
|
| 1208 |
+
* \param n - Number of subsequences to skip
|
| 1209 |
+
* \param state - Pointer to state to update
|
| 1210 |
+
*/
|
| 1211 |
+
QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1212 |
+
{
|
| 1213 |
+
unsigned int t[3][3];
|
| 1214 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1215 |
+
curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n);
|
| 1216 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1217 |
+
curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n);
|
| 1218 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1219 |
+
,
|
| 1220 |
+
curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n);
|
| 1221 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1222 |
+
curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n);
|
| 1223 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1224 |
+
)
|
| 1225 |
+
}
|
| 1226 |
+
|
| 1227 |
+
/**
|
| 1228 |
+
* \brief Update MRG32k3a state to skip ahead \p n sequences.
|
| 1229 |
+
*
|
| 1230 |
+
* Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each
|
| 1231 |
+
* sequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
|
| 1232 |
+
* \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly * n elements.
|
| 1233 |
+
*
|
| 1234 |
+
* All values of \p n are valid. Large values require more computation and so
|
| 1235 |
+
* will take more time to complete.
|
| 1236 |
+
*
|
| 1237 |
+
* \param n - Number of sequences to skip
|
| 1238 |
+
* \param state - Pointer to state to update
|
| 1239 |
+
*/
|
| 1240 |
+
QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state)
|
| 1241 |
+
{
|
| 1242 |
+
unsigned int t[3][3];
|
| 1243 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1244 |
+
curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n);
|
| 1245 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1246 |
+
curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n);
|
| 1247 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1248 |
+
,
|
| 1249 |
+
curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n);
|
| 1250 |
+
curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
|
| 1251 |
+
curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n);
|
| 1252 |
+
curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
|
| 1253 |
+
)
|
| 1254 |
+
}
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
/**
|
| 1258 |
+
* \brief Initialize MRG32k3a state.
|
| 1259 |
+
*
|
| 1260 |
+
* Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence,
|
| 1261 |
+
* and \p offset.
|
| 1262 |
+
*
|
| 1263 |
+
* All input values of \p seed, \p subsequence, and \p offset are legal.
|
| 1264 |
+
* \p subsequence will be truncated to 51 bits to avoid running into the next sequence
|
| 1265 |
+
*
|
| 1266 |
+
* A value of 0 for \p seed sets the state to the values of the original
|
| 1267 |
+
* published version of the \p MRG32k3a algorithm.
|
| 1268 |
+
*
|
| 1269 |
+
* \param seed - Arbitrary bits to use as a seed
|
| 1270 |
+
* \param subsequence - Subsequence to start at
|
| 1271 |
+
* \param offset - Absolute offset into sequence
|
| 1272 |
+
* \param state - Pointer to state to initialize
|
| 1273 |
+
*/
|
| 1274 |
+
QUALIFIERS void curand_init(unsigned long long seed,
|
| 1275 |
+
unsigned long long subsequence,
|
| 1276 |
+
unsigned long long offset,
|
| 1277 |
+
curandStateMRG32k3a_t *state)
|
| 1278 |
+
{
|
| 1279 |
+
int i;
|
| 1280 |
+
for ( i=0; i<3; i++ ) {
|
| 1281 |
+
state->s1[i] = 12345u;
|
| 1282 |
+
state->s2[i] = 12345u;
|
| 1283 |
+
}
|
| 1284 |
+
if (seed != 0ull) {
|
| 1285 |
+
unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL;
|
| 1286 |
+
unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL);
|
| 1287 |
+
state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1);
|
| 1288 |
+
state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1);
|
| 1289 |
+
state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1);
|
| 1290 |
+
state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2);
|
| 1291 |
+
state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2);
|
| 1292 |
+
state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2);
|
| 1293 |
+
}
|
| 1294 |
+
skipahead_subsequence( subsequence, state );
|
| 1295 |
+
skipahead( offset, state );
|
| 1296 |
+
state->boxmuller_flag = 0;
|
| 1297 |
+
state->boxmuller_flag_double = 0;
|
| 1298 |
+
state->boxmuller_extra = 0.f;
|
| 1299 |
+
state->boxmuller_extra_double = 0.;
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
/**
|
| 1303 |
+
* \brief Update Sobol32 state to skip \p n elements.
|
| 1304 |
+
*
|
| 1305 |
+
* Update the Sobol32 state in \p state to skip ahead \p n elements.
|
| 1306 |
+
*
|
| 1307 |
+
* All values of \p n are valid.
|
| 1308 |
+
*
|
| 1309 |
+
* \param n - Number of elements to skip
|
| 1310 |
+
* \param state - Pointer to state to update
|
| 1311 |
+
*/
|
| 1312 |
+
template <typename T>
|
| 1313 |
+
QUALIFIERS
|
| 1314 |
+
typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol32_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol32_t*, T>::value>::type
|
| 1315 |
+
skipahead(unsigned int n, T state)
|
| 1316 |
+
{
|
| 1317 |
+
unsigned int i_gray;
|
| 1318 |
+
state->x = state->c;
|
| 1319 |
+
state->i += n;
|
| 1320 |
+
/* Convert state->i to gray code */
|
| 1321 |
+
i_gray = state->i ^ (state->i >> 1);
|
| 1322 |
+
for(unsigned int k = 0; k < 32; k++) {
|
| 1323 |
+
if(i_gray & (1 << k)) {
|
| 1324 |
+
state->x ^= state->direction_vectors[k];
|
| 1325 |
+
}
|
| 1326 |
+
}
|
| 1327 |
+
return;
|
| 1328 |
+
}
|
| 1329 |
+
|
| 1330 |
+
/**
|
| 1331 |
+
* \brief Update Sobol64 state to skip \p n elements.
|
| 1332 |
+
*
|
| 1333 |
+
* Update the Sobol64 state in \p state to skip ahead \p n elements.
|
| 1334 |
+
*
|
| 1335 |
+
* All values of \p n are valid.
|
| 1336 |
+
*
|
| 1337 |
+
* \param n - Number of elements to skip
|
| 1338 |
+
* \param state - Pointer to state to update
|
| 1339 |
+
*/
|
| 1340 |
+
template <typename T>
|
| 1341 |
+
QUALIFIERS
|
| 1342 |
+
typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol64_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol64_t*, T>::value>::type
|
| 1343 |
+
skipahead(unsigned long long n, T state)
|
| 1344 |
+
{
|
| 1345 |
+
unsigned long long i_gray;
|
| 1346 |
+
state->x = state->c;
|
| 1347 |
+
state->i += n;
|
| 1348 |
+
/* Convert state->i to gray code */
|
| 1349 |
+
i_gray = state->i ^ (state->i >> 1);
|
| 1350 |
+
for(unsigned k = 0; k < 64; k++) {
|
| 1351 |
+
if(i_gray & (1ULL << k)) {
|
| 1352 |
+
state->x ^= state->direction_vectors[k];
|
| 1353 |
+
}
|
| 1354 |
+
}
|
| 1355 |
+
return;
|
| 1356 |
+
}
|
| 1357 |
+
|
| 1358 |
+
/**
|
| 1359 |
+
* \brief Initialize Sobol32 state.
|
| 1360 |
+
*
|
| 1361 |
+
* Initialize Sobol32 state in \p state with the given \p direction \p vectors and
|
| 1362 |
+
* \p offset.
|
| 1363 |
+
*
|
| 1364 |
+
* The direction vector is a device pointer to an array of 32 unsigned ints.
|
| 1365 |
+
* All input values of \p offset are legal.
|
| 1366 |
+
*
|
| 1367 |
+
* \param direction_vectors - Pointer to array of 32 unsigned ints representing the
|
| 1368 |
+
* direction vectors for the desired dimension
|
| 1369 |
+
* \param offset - Absolute offset into sequence
|
| 1370 |
+
* \param state - Pointer to state to initialize
|
| 1371 |
+
*/
|
| 1372 |
+
QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
|
| 1373 |
+
unsigned int offset,
|
| 1374 |
+
curandStateSobol32_t *state)
|
| 1375 |
+
{
|
| 1376 |
+
state->i = 0;
|
| 1377 |
+
state->c = 0;
|
| 1378 |
+
for(int i = 0; i < 32; i++) {
|
| 1379 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1380 |
+
}
|
| 1381 |
+
state->x = 0;
|
| 1382 |
+
skipahead<curandStateSobol32_t *>(offset, state);
|
| 1383 |
+
}
|
| 1384 |
+
/**
|
| 1385 |
+
* \brief Initialize Scrambled Sobol32 state.
|
| 1386 |
+
*
|
| 1387 |
+
* Initialize Sobol32 state in \p state with the given \p direction \p vectors and
|
| 1388 |
+
* \p offset.
|
| 1389 |
+
*
|
| 1390 |
+
* The direction vector is a device pointer to an array of 32 unsigned ints.
|
| 1391 |
+
* All input values of \p offset are legal.
|
| 1392 |
+
*
|
| 1393 |
+
* \param direction_vectors - Pointer to array of 32 unsigned ints representing the
|
| 1394 |
+
direction vectors for the desired dimension
|
| 1395 |
+
* \param scramble_c Scramble constant
|
| 1396 |
+
* \param offset - Absolute offset into sequence
|
| 1397 |
+
* \param state - Pointer to state to initialize
|
| 1398 |
+
*/
|
| 1399 |
+
QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
|
| 1400 |
+
unsigned int scramble_c,
|
| 1401 |
+
unsigned int offset,
|
| 1402 |
+
curandStateScrambledSobol32_t *state)
|
| 1403 |
+
{
|
| 1404 |
+
state->i = 0;
|
| 1405 |
+
state->c = scramble_c;
|
| 1406 |
+
for(int i = 0; i < 32; i++) {
|
| 1407 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1408 |
+
}
|
| 1409 |
+
state->x = state->c;
|
| 1410 |
+
skipahead<curandStateScrambledSobol32_t *>(offset, state);
|
| 1411 |
+
}
|
| 1412 |
+
|
| 1413 |
+
QUALIFIERS int __curand_find_trailing_zero(unsigned int x)
|
| 1414 |
+
{
|
| 1415 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1416 |
+
int y = __ffs(~x);
|
| 1417 |
+
if(y)
|
| 1418 |
+
return y - 1;
|
| 1419 |
+
return 31;
|
| 1420 |
+
,
|
| 1421 |
+
int i = 1;
|
| 1422 |
+
while(x & 1) {
|
| 1423 |
+
i++;
|
| 1424 |
+
x >>= 1;
|
| 1425 |
+
}
|
| 1426 |
+
i = i - 1;
|
| 1427 |
+
return i == 32 ? 31 : i;
|
| 1428 |
+
)
|
| 1429 |
+
}
|
| 1430 |
+
|
| 1431 |
+
QUALIFIERS int __curand_find_trailing_zero(unsigned long long x)
|
| 1432 |
+
{
|
| 1433 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 1434 |
+
int y = __ffsll(~x);
|
| 1435 |
+
if(y)
|
| 1436 |
+
return y - 1;
|
| 1437 |
+
return 63;
|
| 1438 |
+
,
|
| 1439 |
+
int i = 1;
|
| 1440 |
+
while(x & 1) {
|
| 1441 |
+
i++;
|
| 1442 |
+
x >>= 1;
|
| 1443 |
+
}
|
| 1444 |
+
i = i - 1;
|
| 1445 |
+
return i == 64 ? 63 : i;
|
| 1446 |
+
)
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
/**
|
| 1450 |
+
* \brief Initialize Sobol64 state.
|
| 1451 |
+
*
|
| 1452 |
+
* Initialize Sobol64 state in \p state with the given \p direction \p vectors and
|
| 1453 |
+
* \p offset.
|
| 1454 |
+
*
|
| 1455 |
+
* The direction vector is a device pointer to an array of 64 unsigned long longs.
|
| 1456 |
+
* All input values of \p offset are legal.
|
| 1457 |
+
*
|
| 1458 |
+
* \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
|
| 1459 |
+
direction vectors for the desired dimension
|
| 1460 |
+
* \param offset - Absolute offset into sequence
|
| 1461 |
+
* \param state - Pointer to state to initialize
|
| 1462 |
+
*/
|
| 1463 |
+
QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
|
| 1464 |
+
unsigned long long offset,
|
| 1465 |
+
curandStateSobol64_t *state)
|
| 1466 |
+
{
|
| 1467 |
+
state->i = 0;
|
| 1468 |
+
state->c = 0;
|
| 1469 |
+
for(int i = 0; i < 64; i++) {
|
| 1470 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1471 |
+
}
|
| 1472 |
+
state->x = 0;
|
| 1473 |
+
skipahead<curandStateSobol64_t *>(offset, state);
|
| 1474 |
+
}
|
| 1475 |
+
|
| 1476 |
+
/**
|
| 1477 |
+
* \brief Initialize Scrambled Sobol64 state.
|
| 1478 |
+
*
|
| 1479 |
+
* Initialize Sobol64 state in \p state with the given \p direction \p vectors and
|
| 1480 |
+
* \p offset.
|
| 1481 |
+
*
|
| 1482 |
+
* The direction vector is a device pointer to an array of 64 unsigned long longs.
|
| 1483 |
+
* All input values of \p offset are legal.
|
| 1484 |
+
*
|
| 1485 |
+
* \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
|
| 1486 |
+
direction vectors for the desired dimension
|
| 1487 |
+
* \param scramble_c Scramble constant
|
| 1488 |
+
* \param offset - Absolute offset into sequence
|
| 1489 |
+
* \param state - Pointer to state to initialize
|
| 1490 |
+
*/
|
| 1491 |
+
QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
|
| 1492 |
+
unsigned long long scramble_c,
|
| 1493 |
+
unsigned long long offset,
|
| 1494 |
+
curandStateScrambledSobol64_t *state)
|
| 1495 |
+
{
|
| 1496 |
+
state->i = 0;
|
| 1497 |
+
state->c = scramble_c;
|
| 1498 |
+
for(int i = 0; i < 64; i++) {
|
| 1499 |
+
state->direction_vectors[i] = direction_vectors[i];
|
| 1500 |
+
}
|
| 1501 |
+
state->x = state->c;
|
| 1502 |
+
skipahead<curandStateScrambledSobol64_t *>(offset, state);
|
| 1503 |
+
}
|
| 1504 |
+
|
| 1505 |
+
/**
|
| 1506 |
+
* \brief Return 32-bits of quasirandomness from a Sobol32 generator.
|
| 1507 |
+
*
|
| 1508 |
+
* Return 32-bits of quasirandomness from the Sobol32 generator in \p state,
|
| 1509 |
+
* increment position of generator by one.
|
| 1510 |
+
*
|
| 1511 |
+
* \param state - Pointer to state to update
|
| 1512 |
+
*
|
| 1513 |
+
* \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
|
| 1514 |
+
*/
|
| 1515 |
+
|
| 1516 |
+
QUALIFIERS unsigned int curand(curandStateSobol32_t * state)
|
| 1517 |
+
{
|
| 1518 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1519 |
+
the trailing zero bit of i
|
| 1520 |
+
*/
|
| 1521 |
+
unsigned int res = state->x;
|
| 1522 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1523 |
+
state->i ++;
|
| 1524 |
+
return res;
|
| 1525 |
+
}
|
| 1526 |
+
|
| 1527 |
+
/**
|
| 1528 |
+
* \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator.
|
| 1529 |
+
*
|
| 1530 |
+
* Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
|
| 1531 |
+
* increment position of generator by one.
|
| 1532 |
+
*
|
| 1533 |
+
* \param state - Pointer to state to update
|
| 1534 |
+
*
|
| 1535 |
+
* \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
|
| 1536 |
+
*/
|
| 1537 |
+
|
| 1538 |
+
QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state)
|
| 1539 |
+
{
|
| 1540 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1541 |
+
the trailing zero bit of i
|
| 1542 |
+
*/
|
| 1543 |
+
unsigned int res = state->x;
|
| 1544 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1545 |
+
state->i ++;
|
| 1546 |
+
return res;
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
/**
|
| 1550 |
+
* \brief Return 64-bits of quasirandomness from a Sobol64 generator.
|
| 1551 |
+
*
|
| 1552 |
+
* Return 64-bits of quasirandomness from the Sobol64 generator in \p state,
|
| 1553 |
+
* increment position of generator by one.
|
| 1554 |
+
*
|
| 1555 |
+
* \param state - Pointer to state to update
|
| 1556 |
+
*
|
| 1557 |
+
* \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
|
| 1558 |
+
*/
|
| 1559 |
+
|
| 1560 |
+
QUALIFIERS unsigned long long curand(curandStateSobol64_t * state)
|
| 1561 |
+
{
|
| 1562 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1563 |
+
the trailing zero bit of i
|
| 1564 |
+
*/
|
| 1565 |
+
unsigned long long res = state->x;
|
| 1566 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1567 |
+
state->i ++;
|
| 1568 |
+
return res;
|
| 1569 |
+
}
|
| 1570 |
+
|
| 1571 |
+
/**
|
| 1572 |
+
* \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator.
|
| 1573 |
+
*
|
| 1574 |
+
* Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
|
| 1575 |
+
* increment position of generator by one.
|
| 1576 |
+
*
|
| 1577 |
+
* \param state - Pointer to state to update
|
| 1578 |
+
*
|
| 1579 |
+
* \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
|
| 1580 |
+
*/
|
| 1581 |
+
|
| 1582 |
+
QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state)
|
| 1583 |
+
{
|
| 1584 |
+
/* Moving from i to i+1 element in gray code is flipping one bit,
|
| 1585 |
+
the trailing zero bit of i
|
| 1586 |
+
*/
|
| 1587 |
+
unsigned long long res = state->x;
|
| 1588 |
+
state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
|
| 1589 |
+
state->i ++;
|
| 1590 |
+
return res;
|
| 1591 |
+
}
|
| 1592 |
+
|
| 1593 |
+
#include "curand_uniform.h"
|
| 1594 |
+
#include "curand_normal.h"
|
| 1595 |
+
#include "curand_lognormal.h"
|
| 1596 |
+
#include "curand_poisson.h"
|
| 1597 |
+
#include "curand_discrete2.h"
|
| 1598 |
+
|
| 1599 |
+
__device__ static inline unsigned int *__get_precalculated_matrix(int n)
|
| 1600 |
+
{
|
| 1601 |
+
if(n == 0) {
|
| 1602 |
+
return precalc_xorwow_matrix[n];
|
| 1603 |
+
}
|
| 1604 |
+
if(n == 2) {
|
| 1605 |
+
return precalc_xorwow_offset_matrix[n];
|
| 1606 |
+
}
|
| 1607 |
+
return precalc_xorwow_matrix[n];
|
| 1608 |
+
}
|
| 1609 |
+
|
| 1610 |
+
#ifndef __CUDACC_RTC__
|
| 1611 |
+
__host__ static inline unsigned int *__get_precalculated_matrix_host(int n)
|
| 1612 |
+
{
|
| 1613 |
+
if(n == 1) {
|
| 1614 |
+
return precalc_xorwow_matrix_host[n];
|
| 1615 |
+
}
|
| 1616 |
+
if(n == 3) {
|
| 1617 |
+
return precalc_xorwow_offset_matrix_host[n];
|
| 1618 |
+
}
|
| 1619 |
+
return precalc_xorwow_matrix_host[n];
|
| 1620 |
+
}
|
| 1621 |
+
#endif // #ifndef __CUDACC_RTC__
|
| 1622 |
+
|
| 1623 |
+
__device__ static inline unsigned int *__get_mrg32k3a_matrix(int n)
|
| 1624 |
+
{
|
| 1625 |
+
if(n == 0) {
|
| 1626 |
+
return mrg32k3aM1[n][0];
|
| 1627 |
+
}
|
| 1628 |
+
if(n == 2) {
|
| 1629 |
+
return mrg32k3aM2[n][0];
|
| 1630 |
+
}
|
| 1631 |
+
if(n == 4) {
|
| 1632 |
+
return mrg32k3aM1SubSeq[n][0];
|
| 1633 |
+
}
|
| 1634 |
+
if(n == 6) {
|
| 1635 |
+
return mrg32k3aM2SubSeq[n][0];
|
| 1636 |
+
}
|
| 1637 |
+
if(n == 8) {
|
| 1638 |
+
return mrg32k3aM1Seq[n][0];
|
| 1639 |
+
}
|
| 1640 |
+
if(n == 10) {
|
| 1641 |
+
return mrg32k3aM2Seq[n][0];
|
| 1642 |
+
}
|
| 1643 |
+
return mrg32k3aM1[n][0];
|
| 1644 |
+
}
|
| 1645 |
+
|
| 1646 |
+
#ifndef __CUDACC_RTC__
|
| 1647 |
+
__host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n)
|
| 1648 |
+
{
|
| 1649 |
+
if(n == 1) {
|
| 1650 |
+
return mrg32k3aM1Host[n][0];
|
| 1651 |
+
}
|
| 1652 |
+
if(n == 3) {
|
| 1653 |
+
return mrg32k3aM2Host[n][0];
|
| 1654 |
+
}
|
| 1655 |
+
if(n == 5) {
|
| 1656 |
+
return mrg32k3aM1SubSeqHost[n][0];
|
| 1657 |
+
}
|
| 1658 |
+
if(n == 7) {
|
| 1659 |
+
return mrg32k3aM2SubSeqHost[n][0];
|
| 1660 |
+
}
|
| 1661 |
+
if(n == 9) {
|
| 1662 |
+
return mrg32k3aM1SeqHost[n][0];
|
| 1663 |
+
}
|
| 1664 |
+
if(n == 11) {
|
| 1665 |
+
return mrg32k3aM2SeqHost[n][0];
|
| 1666 |
+
}
|
| 1667 |
+
return mrg32k3aM1Host[n][0];
|
| 1668 |
+
}
|
| 1669 |
+
|
| 1670 |
+
__host__ static inline double *__get__cr_lgamma_table_host(void) {
|
| 1671 |
+
return __cr_lgamma_table;
|
| 1672 |
+
}
|
| 1673 |
+
#endif // #ifndef __CUDACC_RTC__
|
| 1674 |
+
|
| 1675 |
+
/** @} */
|
| 1676 |
+
|
| 1677 |
+
#endif // !defined(CURAND_KERNEL_H_)
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h
ADDED
|
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/*
|
| 51 |
+
* curand_mtgp32_host.h
|
| 52 |
+
*
|
| 53 |
+
*
|
| 54 |
+
* MTGP32-11213
|
| 55 |
+
*
|
| 56 |
+
* Mersenne Twister RNG for the GPU
|
| 57 |
+
*
|
| 58 |
+
* The period of generated integers is 2<sup>11213</sup>-1.
|
| 59 |
+
*
|
| 60 |
+
* This code generates 32-bit unsigned integers, and
|
| 61 |
+
* single precision floating point numbers uniformly distributed
|
| 62 |
+
* in the range [1, 2). (float r; 1.0 <= r < 2.0)
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
/*
|
| 66 |
+
* Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 67 |
+
* University. All rights reserved.
|
| 68 |
+
* Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 69 |
+
* University and University of Tokyo. All rights reserved.
|
| 70 |
+
*
|
| 71 |
+
* Redistribution and use in source and binary forms, with or without
|
| 72 |
+
* modification, are permitted provided that the following conditions are
|
| 73 |
+
* met:
|
| 74 |
+
*
|
| 75 |
+
* * Redistributions of source code must retain the above copyright
|
| 76 |
+
* notice, this list of conditions and the following disclaimer.
|
| 77 |
+
* * Redistributions in binary form must reproduce the above
|
| 78 |
+
* copyright notice, this list of conditions and the following
|
| 79 |
+
* disclaimer in the documentation and/or other materials provided
|
| 80 |
+
* with the distribution.
|
| 81 |
+
* * Neither the name of the Hiroshima University nor the names of
|
| 82 |
+
* its contributors may be used to endorse or promote products
|
| 83 |
+
* derived from this software without specific prior written
|
| 84 |
+
* permission.
|
| 85 |
+
*
|
| 86 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 87 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 88 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 89 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 90 |
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 91 |
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 92 |
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 93 |
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 94 |
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 95 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 96 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 97 |
+
*/
|
| 98 |
+
#if !defined CURAND_MTGP32_HOST_H
|
| 99 |
+
#define CURAND_MTGP32_HOST_H
|
| 100 |
+
|
| 101 |
+
#if !defined(QUALIFIERS)
|
| 102 |
+
#define QUALIFIERS static inline __device__
|
| 103 |
+
#endif
|
| 104 |
+
|
| 105 |
+
#include <cuda_runtime.h>
|
| 106 |
+
#include <stdlib.h>
|
| 107 |
+
#include <memory.h>
|
| 108 |
+
#include <string.h>
|
| 109 |
+
#include "curand.h"
|
| 110 |
+
#include "curand_mtgp32.h"
|
| 111 |
+
#include "curand_mtgp32dc_p_11213.h"
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
/**
|
| 115 |
+
* \addtogroup DEVICE Device API
|
| 116 |
+
*
|
| 117 |
+
* @{
|
| 118 |
+
*/
|
| 119 |
+
|
| 120 |
+
static const unsigned int non_zero = 0x4d544750;
|
| 121 |
+
|
| 122 |
+
/*
|
| 123 |
+
* This function represents a function used in the initialization
|
| 124 |
+
* by mtgp32_init_by_array() and mtgp32_init_by_str().
|
| 125 |
+
* @param[in] x 32-bit integer
|
| 126 |
+
* @return 32-bit integer
|
| 127 |
+
*/
|
| 128 |
+
static __forceinline__ unsigned int ini_func1(unsigned int x) {
|
| 129 |
+
return (x ^ (x >> 27)) * (1664525);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
/*
|
| 133 |
+
* This function represents a function used in the initialization
|
| 134 |
+
* by mtgp32_init_by_array() and mtgp32_init_by_str().
|
| 135 |
+
* @param[in] x 32-bit integer
|
| 136 |
+
* @return 32-bit integer
|
| 137 |
+
*/
|
| 138 |
+
static __forceinline__ unsigned int ini_func2(unsigned int x) {
|
| 139 |
+
return (x ^ (x >> 27)) * (1566083941);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
/*
|
| 143 |
+
* This function initializes the internal state array with a 32-bit
|
| 144 |
+
* integer seed. The allocated memory should be freed by calling
|
| 145 |
+
* mtgp32_free(). \b para should be one of the elements in the
|
| 146 |
+
* parameter table (mtgp32-param-ref.c).
|
| 147 |
+
*
|
| 148 |
+
* This function is call by cuda program, because cuda program uses
|
| 149 |
+
* another structure and another allocation method.
|
| 150 |
+
*
|
| 151 |
+
* @param[out] array MTGP internal status vector.
|
| 152 |
+
* @param[in] para parameter structure
|
| 153 |
+
* @param[in] seed a 32-bit integer used as the seed.
|
| 154 |
+
*/
|
| 155 |
+
static __forceinline__ __host__
|
| 156 |
+
void mtgp32_init_state(unsigned int state[],
|
| 157 |
+
const mtgp32_params_fast_t *para, unsigned int seed) {
|
| 158 |
+
int i;
|
| 159 |
+
int size = para->mexp / 32 + 1;
|
| 160 |
+
unsigned int hidden_seed;
|
| 161 |
+
unsigned int tmp;
|
| 162 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 163 |
+
tmp = hidden_seed;
|
| 164 |
+
tmp += tmp >> 16;
|
| 165 |
+
tmp += tmp >> 8;
|
| 166 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 167 |
+
state[0] = seed;
|
| 168 |
+
state[1] = hidden_seed;
|
| 169 |
+
for (i = 1; i < size; i++) {
|
| 170 |
+
state[i] ^= (1812433253) * (state[i - 1] ^ (state[i - 1] >> 30)) + i;
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
/*
|
| 175 |
+
* This function initializes the internal state array
|
| 176 |
+
* with a 32-bit integer array. \b para should be one of the elements in
|
| 177 |
+
* the parameter table (mtgp32-param-ref.c).
|
| 178 |
+
*
|
| 179 |
+
* @param[out] mtgp32 MTGP structure.
|
| 180 |
+
* @param[in] para parameter structure
|
| 181 |
+
* @param[in] array a 32-bit integer array used as a seed.
|
| 182 |
+
* @param[in] length length of the array.
|
| 183 |
+
* @return CURAND_STATUS_SUCCESS
|
| 184 |
+
*/
|
| 185 |
+
static __forceinline__ __host__
|
| 186 |
+
int mtgp32_init_by_array(unsigned int state[],
|
| 187 |
+
const mtgp32_params_fast_t *para,
|
| 188 |
+
unsigned int *array, int length) {
|
| 189 |
+
int i, j, count;
|
| 190 |
+
unsigned int r;
|
| 191 |
+
int lag;
|
| 192 |
+
int mid;
|
| 193 |
+
int size = para->mexp / 32 + 1;
|
| 194 |
+
unsigned int hidden_seed;
|
| 195 |
+
unsigned int tmp;
|
| 196 |
+
|
| 197 |
+
if (size >= 623) {
|
| 198 |
+
lag = 11;
|
| 199 |
+
} else if (size >= 68) {
|
| 200 |
+
lag = 7;
|
| 201 |
+
} else if (size >= 39) {
|
| 202 |
+
lag = 5;
|
| 203 |
+
} else {
|
| 204 |
+
lag = 3;
|
| 205 |
+
}
|
| 206 |
+
mid = (size - lag) / 2;
|
| 207 |
+
|
| 208 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 209 |
+
tmp = hidden_seed;
|
| 210 |
+
tmp += tmp >> 16;
|
| 211 |
+
tmp += tmp >> 8;
|
| 212 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 213 |
+
state[0] = hidden_seed;
|
| 214 |
+
|
| 215 |
+
if (length + 1 > size) {
|
| 216 |
+
count = length + 1;
|
| 217 |
+
} else {
|
| 218 |
+
count = size;
|
| 219 |
+
}
|
| 220 |
+
r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
|
| 221 |
+
state[mid] += r;
|
| 222 |
+
r += length;
|
| 223 |
+
state[(mid + lag) % size] += r;
|
| 224 |
+
state[0] = r;
|
| 225 |
+
i = 1;
|
| 226 |
+
count--;
|
| 227 |
+
for (i = 1, j = 0; (j < count) && (j < length); j++) {
|
| 228 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 229 |
+
^ state[(i + size - 1) % size]);
|
| 230 |
+
state[(i + mid) % size] += r;
|
| 231 |
+
r += array[j] + i;
|
| 232 |
+
state[(i + mid + lag) % size] += r;
|
| 233 |
+
state[i] = r;
|
| 234 |
+
i = (i + 1) % size;
|
| 235 |
+
}
|
| 236 |
+
for (; j < count; j++) {
|
| 237 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 238 |
+
^ state[(i + size - 1) % size]);
|
| 239 |
+
state[(i + mid) % size] += r;
|
| 240 |
+
r += i;
|
| 241 |
+
state[(i + mid + lag) % size] += r;
|
| 242 |
+
state[i] = r;
|
| 243 |
+
i = (i + 1) % size;
|
| 244 |
+
}
|
| 245 |
+
for (j = 0; j < size; j++) {
|
| 246 |
+
r = ini_func2(state[i] + state[(i + mid) % size]
|
| 247 |
+
+ state[(i + size - 1) % size]);
|
| 248 |
+
state[(i + mid) % size] ^= r;
|
| 249 |
+
r -= i;
|
| 250 |
+
state[(i + mid + lag) % size] ^= r;
|
| 251 |
+
state[i] = r;
|
| 252 |
+
i = (i + 1) % size;
|
| 253 |
+
}
|
| 254 |
+
if (state[size - 1] == 0) {
|
| 255 |
+
state[size - 1] = non_zero;
|
| 256 |
+
}
|
| 257 |
+
return 0;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
/*
|
| 261 |
+
* This function initializes the internal state array
|
| 262 |
+
* with a character array. \b para should be one of the elements in
|
| 263 |
+
* the parameter table (mtgp32-param-ref.c).
|
| 264 |
+
* This is the same algorithm with mtgp32_init_by_array(), but hope to
|
| 265 |
+
* be more useful.
|
| 266 |
+
*
|
| 267 |
+
* @param[out] mtgp32 MTGP structure.
|
| 268 |
+
* @param[in] para parameter structure
|
| 269 |
+
* @param[in] array a character array used as a seed. (terminated by zero.)
|
| 270 |
+
* @return memory allocation result. if 0 then O.K.
|
| 271 |
+
*/
|
| 272 |
+
static __forceinline__ __host__
|
| 273 |
+
int mtgp32_init_by_str(unsigned int state[],
|
| 274 |
+
const mtgp32_params_fast_t *para, unsigned char *array) {
|
| 275 |
+
int i, j, count;
|
| 276 |
+
unsigned int r;
|
| 277 |
+
int lag;
|
| 278 |
+
int mid;
|
| 279 |
+
int size = para->mexp / 32 + 1;
|
| 280 |
+
int length = (unsigned int)strlen((char *)array);
|
| 281 |
+
unsigned int hidden_seed;
|
| 282 |
+
unsigned int tmp;
|
| 283 |
+
|
| 284 |
+
if (size >= 623) {
|
| 285 |
+
lag = 11;
|
| 286 |
+
} else if (size >= 68) {
|
| 287 |
+
lag = 7;
|
| 288 |
+
} else if (size >= 39) {
|
| 289 |
+
lag = 5;
|
| 290 |
+
} else {
|
| 291 |
+
lag = 3;
|
| 292 |
+
}
|
| 293 |
+
mid = (size - lag) / 2;
|
| 294 |
+
|
| 295 |
+
hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
|
| 296 |
+
tmp = hidden_seed;
|
| 297 |
+
tmp += tmp >> 16;
|
| 298 |
+
tmp += tmp >> 8;
|
| 299 |
+
memset(state, tmp & 0xff, sizeof(unsigned int) * size);
|
| 300 |
+
state[0] = hidden_seed;
|
| 301 |
+
|
| 302 |
+
if (length + 1 > size) {
|
| 303 |
+
count = length + 1;
|
| 304 |
+
} else {
|
| 305 |
+
count = size;
|
| 306 |
+
}
|
| 307 |
+
r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
|
| 308 |
+
state[mid] += r;
|
| 309 |
+
r += length;
|
| 310 |
+
state[(mid + lag) % size] += r;
|
| 311 |
+
state[0] = r;
|
| 312 |
+
i = 1;
|
| 313 |
+
count--;
|
| 314 |
+
for (i = 1, j = 0; (j < count) && (j < length); j++) {
|
| 315 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 316 |
+
^ state[(i + size - 1) % size]);
|
| 317 |
+
state[(i + mid) % size] += r;
|
| 318 |
+
r += array[j] + i;
|
| 319 |
+
state[(i + mid + lag) % size] += r;
|
| 320 |
+
state[i] = r;
|
| 321 |
+
i = (i + 1) % size;
|
| 322 |
+
}
|
| 323 |
+
for (; j < count; j++) {
|
| 324 |
+
r = ini_func1(state[i] ^ state[(i + mid) % size]
|
| 325 |
+
^ state[(i + size - 1) % size]);
|
| 326 |
+
state[(i + mid) % size] += r;
|
| 327 |
+
r += i;
|
| 328 |
+
state[(i + mid + lag) % size] += r;
|
| 329 |
+
state[i] = r;
|
| 330 |
+
i = (i + 1) % size;
|
| 331 |
+
}
|
| 332 |
+
for (j = 0; j < size; j++) {
|
| 333 |
+
r = ini_func2(state[i] + state[(i + mid) % size]
|
| 334 |
+
+ state[(i + size - 1) % size]);
|
| 335 |
+
state[(i + mid) % size] ^= r;
|
| 336 |
+
r -= i;
|
| 337 |
+
state[(i + mid + lag) % size] ^= r;
|
| 338 |
+
state[i] = r;
|
| 339 |
+
i = (i + 1) % size;
|
| 340 |
+
}
|
| 341 |
+
if (state[size - 1] == 0) {
|
| 342 |
+
state[size - 1] = non_zero;
|
| 343 |
+
}
|
| 344 |
+
return 0;
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
template<typename ParamsType>
|
| 348 |
+
static __forceinline__ __host__
|
| 349 |
+
curandStatus_t curandMakeMTGP32ConstantsImpl(const mtgp32_params_fast_t params[], ParamsType * p, const int block_num)
|
| 350 |
+
{
|
| 351 |
+
const int size1 = sizeof(unsigned int) * block_num;
|
| 352 |
+
const int size2 = sizeof(unsigned int) * block_num * TBL_SIZE;
|
| 353 |
+
unsigned int *h_pos_tbl;
|
| 354 |
+
unsigned int *h_sh1_tbl;
|
| 355 |
+
unsigned int *h_sh2_tbl;
|
| 356 |
+
unsigned int *h_param_tbl;
|
| 357 |
+
unsigned int *h_temper_tbl;
|
| 358 |
+
unsigned int *h_single_temper_tbl;
|
| 359 |
+
unsigned int *h_mask;
|
| 360 |
+
curandStatus_t status = CURAND_STATUS_SUCCESS;
|
| 361 |
+
|
| 362 |
+
h_pos_tbl = (unsigned int *)malloc(size1);
|
| 363 |
+
h_sh1_tbl = (unsigned int *)malloc(size1);
|
| 364 |
+
h_sh2_tbl = (unsigned int *)malloc(size1);
|
| 365 |
+
h_param_tbl = (unsigned int *)malloc(size2);
|
| 366 |
+
h_temper_tbl = (unsigned int *)malloc(size2);
|
| 367 |
+
h_single_temper_tbl = (unsigned int *)malloc(size2);
|
| 368 |
+
h_mask = (unsigned int *)malloc(sizeof(unsigned int));
|
| 369 |
+
if (h_pos_tbl == NULL
|
| 370 |
+
|| h_sh1_tbl == NULL
|
| 371 |
+
|| h_sh2_tbl == NULL
|
| 372 |
+
|| h_param_tbl == NULL
|
| 373 |
+
|| h_temper_tbl == NULL
|
| 374 |
+
|| h_single_temper_tbl == NULL
|
| 375 |
+
|| h_mask == NULL) {
|
| 376 |
+
if (h_pos_tbl != NULL) free(h_pos_tbl);
|
| 377 |
+
if (h_sh1_tbl != NULL) free(h_sh1_tbl);
|
| 378 |
+
if (h_sh2_tbl != NULL) free(h_sh2_tbl);
|
| 379 |
+
if (h_param_tbl != NULL) free(h_param_tbl);
|
| 380 |
+
if (h_temper_tbl != NULL) free(h_temper_tbl);
|
| 381 |
+
if (h_single_temper_tbl != NULL) free(h_single_temper_tbl);
|
| 382 |
+
if (h_mask != NULL) free(h_mask);
|
| 383 |
+
status = CURAND_STATUS_ALLOCATION_FAILED;
|
| 384 |
+
} else {
|
| 385 |
+
|
| 386 |
+
h_mask[0] = params[0].mask;
|
| 387 |
+
for (int i = 0; i < block_num; i++) {
|
| 388 |
+
h_pos_tbl[i] = params[i].pos;
|
| 389 |
+
h_sh1_tbl[i] = params[i].sh1;
|
| 390 |
+
h_sh2_tbl[i] = params[i].sh2;
|
| 391 |
+
for (int j = 0; j < TBL_SIZE; j++) {
|
| 392 |
+
h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
|
| 393 |
+
h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
|
| 394 |
+
h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
if (cudaMemcpy( p->pos_tbl,
|
| 398 |
+
h_pos_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 399 |
+
{
|
| 400 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 401 |
+
} else
|
| 402 |
+
if (cudaMemcpy( p->sh1_tbl,
|
| 403 |
+
h_sh1_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 404 |
+
{
|
| 405 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 406 |
+
} else
|
| 407 |
+
if (cudaMemcpy( p->sh2_tbl,
|
| 408 |
+
h_sh2_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 409 |
+
{
|
| 410 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 411 |
+
} else
|
| 412 |
+
if (cudaMemcpy( p->param_tbl,
|
| 413 |
+
h_param_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 414 |
+
{
|
| 415 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 416 |
+
} else
|
| 417 |
+
if (cudaMemcpy( p->temper_tbl,
|
| 418 |
+
h_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 419 |
+
{
|
| 420 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 421 |
+
} else
|
| 422 |
+
if (cudaMemcpy( p->single_temper_tbl,
|
| 423 |
+
h_single_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
|
| 424 |
+
{
|
| 425 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 426 |
+
} else
|
| 427 |
+
if (cudaMemcpy( p->mask,
|
| 428 |
+
h_mask, sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess)
|
| 429 |
+
{
|
| 430 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
if (h_pos_tbl != NULL) free(h_pos_tbl);
|
| 434 |
+
if (h_sh1_tbl != NULL) free(h_sh1_tbl);
|
| 435 |
+
if (h_sh2_tbl != NULL) free(h_sh2_tbl);
|
| 436 |
+
if (h_param_tbl != NULL) free(h_param_tbl);
|
| 437 |
+
if (h_temper_tbl != NULL) free(h_temper_tbl);
|
| 438 |
+
if (h_single_temper_tbl != NULL)free(h_single_temper_tbl);
|
| 439 |
+
if (h_mask != NULL) free(h_mask);
|
| 440 |
+
return status;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
/**
|
| 444 |
+
* \brief Set up constant parameters for the mtgp32 generator
|
| 445 |
+
*
|
| 446 |
+
* This host-side helper function re-organizes CURAND_NUM_MTGP32_PARAMS sets of
|
| 447 |
+
* generator parameters for use by kernel functions and copies the
|
| 448 |
+
* result to the specified location in device memory.
|
| 449 |
+
*
|
| 450 |
+
* \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
|
| 451 |
+
* \param p - pointer to a structure of type mtgp32_kernel_params_t in device memory.
|
| 452 |
+
*
|
| 453 |
+
* \return
|
| 454 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if host memory could not be allocated
|
| 455 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
|
| 456 |
+
* - CURAND_STATUS_SUCCESS otherwise
|
| 457 |
+
*/
|
| 458 |
+
static __forceinline__ __host__
|
| 459 |
+
curandStatus_t curandMakeMTGP32Constants(const mtgp32_params_fast_t params[], mtgp32_kernel_params_t * p)
|
| 460 |
+
{
|
| 461 |
+
return curandMakeMTGP32ConstantsImpl(params, p, CURAND_NUM_MTGP32_PARAMS);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
/**
|
| 465 |
+
* \brief Set up initial states for the mtgp32 generator
|
| 466 |
+
*
|
| 467 |
+
* This host-side helper function initializes a number of states (one parameter set per state) for
|
| 468 |
+
* an mtgp32 generator. To accomplish this it allocates a state array in host memory,
|
| 469 |
+
* initializes that array, and copies the result to device memory.
|
| 470 |
+
*
|
| 471 |
+
* \param s - pointer to an array of states in device memory
|
| 472 |
+
* \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
|
| 473 |
+
* \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory
|
| 474 |
+
* \param n - number of parameter sets/states to initialize
|
| 475 |
+
* \param seed - seed value
|
| 476 |
+
*
|
| 477 |
+
* \return
|
| 478 |
+
* - CURAND_STATUS_ALLOCATION_FAILED if host memory state could not be allocated
|
| 479 |
+
* - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
|
| 480 |
+
* - CURAND_STATUS_SUCCESS otherwise
|
| 481 |
+
*/
|
| 482 |
+
static __forceinline__ __host__
|
| 483 |
+
curandStatus_t CURANDAPI curandMakeMTGP32KernelState(curandStateMtgp32_t *s,
|
| 484 |
+
mtgp32_params_fast_t params[],
|
| 485 |
+
mtgp32_kernel_params_t *k,
|
| 486 |
+
int n,
|
| 487 |
+
unsigned long long seed)
|
| 488 |
+
{
|
| 489 |
+
int i;
|
| 490 |
+
curandStatus_t status = CURAND_STATUS_SUCCESS;
|
| 491 |
+
curandStateMtgp32_t *h_status =(curandStateMtgp32_t *) malloc(sizeof(curandStateMtgp32_t) * n);
|
| 492 |
+
if (h_status == NULL) {
|
| 493 |
+
status = CURAND_STATUS_ALLOCATION_FAILED;
|
| 494 |
+
} else {
|
| 495 |
+
seed = seed ^ (seed >> 32);
|
| 496 |
+
for (i = 0; i < n; i++) {
|
| 497 |
+
mtgp32_init_state(&(h_status[i].s[0]), ¶ms[i],(unsigned int)seed + i + 1);
|
| 498 |
+
h_status[i].offset = 0;
|
| 499 |
+
h_status[i].pIdx = i;
|
| 500 |
+
h_status[i].k = k;
|
| 501 |
+
}
|
| 502 |
+
if (cudaMemcpy(s, h_status,
|
| 503 |
+
sizeof(curandStateMtgp32_t) * n,
|
| 504 |
+
cudaMemcpyHostToDevice) != cudaSuccess) {
|
| 505 |
+
status = CURAND_STATUS_INITIALIZATION_FAILED;
|
| 506 |
+
}
|
| 507 |
+
}
|
| 508 |
+
free(h_status);
|
| 509 |
+
return status;
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
/** @} */
|
| 513 |
+
|
| 514 |
+
#endif
|
| 515 |
+
|
| 516 |
+
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/*
|
| 51 |
+
* curand_mtgp32_kernel.h
|
| 52 |
+
*
|
| 53 |
+
*
|
| 54 |
+
* MTGP32-11213
|
| 55 |
+
*
|
| 56 |
+
* Mersenne Twister RNG for the GPU
|
| 57 |
+
*
|
| 58 |
+
* The period of generated integers is 2<sup>11213</sup>-1.
|
| 59 |
+
*
|
| 60 |
+
* This code generates 32-bit unsigned integers, and
|
| 61 |
+
* single precision floating point numbers uniformly distributed
|
| 62 |
+
* in the range [1, 2). (float r; 1.0 <= r < 2.0)
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
/*
|
| 66 |
+
* Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 67 |
+
* University. All rights reserved.
|
| 68 |
+
* Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 69 |
+
* University and University of Tokyo. All rights reserved.
|
| 70 |
+
*
|
| 71 |
+
* Redistribution and use in source and binary forms, with or without
|
| 72 |
+
* modification, are permitted provided that the following conditions are
|
| 73 |
+
* met:
|
| 74 |
+
*
|
| 75 |
+
* * Redistributions of source code must retain the above copyright
|
| 76 |
+
* notice, this list of conditions and the following disclaimer.
|
| 77 |
+
* * Redistributions in binary form must reproduce the above
|
| 78 |
+
* copyright notice, this list of conditions and the following
|
| 79 |
+
* disclaimer in the documentation and/or other materials provided
|
| 80 |
+
* with the distribution.
|
| 81 |
+
* * Neither the name of the Hiroshima University nor the names of
|
| 82 |
+
* its contributors may be used to endorse or promote products
|
| 83 |
+
* derived from this software without specific prior written
|
| 84 |
+
* permission.
|
| 85 |
+
*
|
| 86 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 87 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 88 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 89 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 90 |
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 91 |
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 92 |
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 93 |
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 94 |
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 95 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 96 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 97 |
+
*/
|
| 98 |
+
#if !defined CURAND_MTGP32_KERNEL_H
|
| 99 |
+
#define CURAND_MTGP32_KERNEL_H
|
| 100 |
+
|
| 101 |
+
#if !defined(QUALIFIERS)
|
| 102 |
+
#define QUALIFIERS static __forceinline__ __device__
|
| 103 |
+
#endif
|
| 104 |
+
|
| 105 |
+
#ifndef __CUDACC_RTC__
|
| 106 |
+
#include <cuda_runtime.h>
|
| 107 |
+
#include <stdlib.h>
|
| 108 |
+
#include <memory.h>
|
| 109 |
+
#include <string.h>
|
| 110 |
+
#endif // ifndef __CUDACC_RTC__
|
| 111 |
+
#include <nv/target>
|
| 112 |
+
#include "curand.h"
|
| 113 |
+
#include "curand_mtgp32.h"
|
| 114 |
+
|
| 115 |
+
/**
|
| 116 |
+
* \addtogroup DEVICE Device API
|
| 117 |
+
*
|
| 118 |
+
* @{
|
| 119 |
+
*/
|
| 120 |
+
|
| 121 |
+
#ifndef __CUDA_ARCH__
|
| 122 |
+
// define blockDim and threadIdx for host compatibility call
|
| 123 |
+
extern const dim3 blockDim;
|
| 124 |
+
extern const uint3 threadIdx;
|
| 125 |
+
#endif
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
/*
|
| 129 |
+
* The function of the recursion formula calculation.
|
| 130 |
+
*
|
| 131 |
+
* @param[in] X1 the farthest part of state array.
|
| 132 |
+
* @param[in] X2 the second farthest part of state array.
|
| 133 |
+
* @param[in] Y a part of state array.
|
| 134 |
+
* @param[in] bid block id.
|
| 135 |
+
* @return output
|
| 136 |
+
*/
|
| 137 |
+
QUALIFIERS unsigned int para_rec(mtgp32_kernel_params_t * k,unsigned int X1, unsigned int X2, unsigned int Y, int bid) {
|
| 138 |
+
unsigned int X = (X1 & k->mask[0]) ^ X2;
|
| 139 |
+
unsigned int MAT;
|
| 140 |
+
|
| 141 |
+
X ^= X << k->sh1_tbl[bid];
|
| 142 |
+
Y = X ^ (Y >> k->sh2_tbl[bid]);
|
| 143 |
+
MAT = k->param_tbl[bid][Y & 0x0f];
|
| 144 |
+
return Y ^ MAT;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/*
|
| 148 |
+
* The tempering function.
|
| 149 |
+
*
|
| 150 |
+
* @param[in] V the output value should be tempered.
|
| 151 |
+
* @param[in] T the tempering helper value.
|
| 152 |
+
* @param[in] bid block id.
|
| 153 |
+
* @return the tempered value.
|
| 154 |
+
*/
|
| 155 |
+
QUALIFIERS unsigned int temper(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
|
| 156 |
+
unsigned int MAT;
|
| 157 |
+
|
| 158 |
+
T ^= T >> 16;
|
| 159 |
+
T ^= T >> 8;
|
| 160 |
+
MAT = k->temper_tbl[bid][T & 0x0f];
|
| 161 |
+
return V ^ MAT;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
/*
|
| 165 |
+
* The tempering and converting function.
|
| 166 |
+
* By using the preset table, converting to IEEE format
|
| 167 |
+
* and tempering are done simultaneously.
|
| 168 |
+
*
|
| 169 |
+
* @param[in] V the output value should be tempered.
|
| 170 |
+
* @param[in] T the tempering helper value.
|
| 171 |
+
* @param[in] bid block id.
|
| 172 |
+
* @return the tempered and converted value.
|
| 173 |
+
*/
|
| 174 |
+
QUALIFIERS unsigned int temper_single(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
|
| 175 |
+
unsigned int MAT;
|
| 176 |
+
unsigned int r;
|
| 177 |
+
|
| 178 |
+
T ^= T >> 16;
|
| 179 |
+
T ^= T >> 8;
|
| 180 |
+
MAT = k->single_temper_tbl[bid][T & 0x0f];
|
| 181 |
+
r = (V >> 9) ^ MAT;
|
| 182 |
+
return r;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/**
|
| 186 |
+
* \brief Return 32-bits of pseudorandomness from a mtgp32 generator.
|
| 187 |
+
*
|
| 188 |
+
* Return 32-bits of pseudorandomness from the mtgp32 generator in \p state,
|
| 189 |
+
* increment position of generator by the number of threads in the block.
|
| 190 |
+
* Note the number of threads in the block can not exceed 256.
|
| 191 |
+
*
|
| 192 |
+
* \param state - Pointer to state to update
|
| 193 |
+
*
|
| 194 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 195 |
+
*/
|
| 196 |
+
QUALIFIERS unsigned int curand(curandStateMtgp32_t *state)
|
| 197 |
+
{
|
| 198 |
+
unsigned int t;
|
| 199 |
+
unsigned int d;
|
| 200 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 201 |
+
unsigned int r;
|
| 202 |
+
unsigned int o;
|
| 203 |
+
|
| 204 |
+
d = blockDim.z * blockDim.y * blockDim.x;
|
| 205 |
+
//assert( d <= 256 );
|
| 206 |
+
t = (blockDim.z * blockDim.y * threadIdx.z) + (blockDim.x * threadIdx.y) + threadIdx.x;
|
| 207 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 208 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 209 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 210 |
+
state->pIdx);
|
| 211 |
+
|
| 212 |
+
state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
|
| 213 |
+
o = temper(state->k, r,
|
| 214 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 215 |
+
state->pIdx);
|
| 216 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 217 |
+
__syncthreads();
|
| 218 |
+
)
|
| 219 |
+
if (t == 0)
|
| 220 |
+
{
|
| 221 |
+
state->offset = (state->offset + d) & MTGP32_STATE_MASK;
|
| 222 |
+
}
|
| 223 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 224 |
+
__syncthreads();
|
| 225 |
+
)
|
| 226 |
+
return o;
|
| 227 |
+
|
| 228 |
+
}
|
| 229 |
+
/**
|
| 230 |
+
* \brief Return 32-bits of pseudorandomness from a specific position in a mtgp32 generator.
|
| 231 |
+
*
|
| 232 |
+
* Return 32-bits of pseudorandomness from position \p index of the mtgp32 generator in \p state,
|
| 233 |
+
* increment position of generator by \p n positions, which must be the total number of positions
|
| 234 |
+
* upddated in the state by the thread block, for this invocation.
|
| 235 |
+
*
|
| 236 |
+
* Note :
|
| 237 |
+
* Thread indices must range from 0...\ n - 1.
|
| 238 |
+
* The number of positions updated may not exceed 256.
|
| 239 |
+
* A thread block may update more than one state, but a given state may not be updated by more than one thread block.
|
| 240 |
+
*
|
| 241 |
+
* \param state - Pointer to state to update
|
| 242 |
+
* \param index - Index (0..255) of the position within the state to draw from and update
|
| 243 |
+
* \param n - The total number of postions in this state that are being updated by this invocation
|
| 244 |
+
*
|
| 245 |
+
* \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
|
| 246 |
+
*/
|
| 247 |
+
QUALIFIERS unsigned int curand_mtgp32_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
|
| 248 |
+
{
|
| 249 |
+
unsigned int t;
|
| 250 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 251 |
+
unsigned int r;
|
| 252 |
+
unsigned int o;
|
| 253 |
+
|
| 254 |
+
t = index;
|
| 255 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 256 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 257 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 258 |
+
state->pIdx);
|
| 259 |
+
|
| 260 |
+
state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
|
| 261 |
+
o = temper(state->k, r,
|
| 262 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 263 |
+
state->pIdx);
|
| 264 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 265 |
+
__syncthreads();
|
| 266 |
+
)
|
| 267 |
+
if (index == 0)
|
| 268 |
+
{
|
| 269 |
+
state->offset = (state->offset + n) & MTGP32_STATE_MASK;
|
| 270 |
+
}
|
| 271 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 272 |
+
__syncthreads();
|
| 273 |
+
)
|
| 274 |
+
return o;
|
| 275 |
+
}
|
| 276 |
+
/**
|
| 277 |
+
* \brief Return a uniformly distributed float from a mtgp32 generator.
|
| 278 |
+
*
|
| 279 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 280 |
+
* from the mtgp32 generator in \p state, increment position of generator.
|
| 281 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 282 |
+
* point outputs are never returned.
|
| 283 |
+
*
|
| 284 |
+
* Note: This alternate derivation of a uniform float is provided for completeness
|
| 285 |
+
* with the original source
|
| 286 |
+
*
|
| 287 |
+
* \param state - Pointer to state to update
|
| 288 |
+
*
|
| 289 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 290 |
+
*/
|
| 291 |
+
QUALIFIERS float curand_mtgp32_single(curandStateMtgp32_t *state)
|
| 292 |
+
{
|
| 293 |
+
unsigned int t;
|
| 294 |
+
unsigned int d;
|
| 295 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 296 |
+
unsigned int r;
|
| 297 |
+
unsigned int o_u;
|
| 298 |
+
float o_f;
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
t = blockDim.z * blockDim.y;
|
| 302 |
+
d = t * blockDim.x;
|
| 303 |
+
//assert( d <= 256 );
|
| 304 |
+
t += threadIdx.x;
|
| 305 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 306 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 307 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 308 |
+
state->pIdx);
|
| 309 |
+
|
| 310 |
+
state->s[t] = r;
|
| 311 |
+
o_u = temper_single(state->k, r,
|
| 312 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 313 |
+
state->pIdx);
|
| 314 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 315 |
+
__syncthreads();
|
| 316 |
+
)
|
| 317 |
+
if (threadIdx.x == 0)
|
| 318 |
+
{
|
| 319 |
+
state->offset = (state->offset + d) & MTGP32_STATE_MASK;
|
| 320 |
+
}
|
| 321 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 322 |
+
__syncthreads();
|
| 323 |
+
)
|
| 324 |
+
memcpy(&o_f, &o_u, sizeof(o_u));
|
| 325 |
+
return o_f;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
/**
|
| 329 |
+
* \brief Return a uniformly distributed float from a specific position in a mtgp32 generator.
|
| 330 |
+
*
|
| 331 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 332 |
+
* from position \p index of the mtgp32 generator in \p state, and
|
| 333 |
+
* increment position of generator by \p n positions, which must be the total number of positions
|
| 334 |
+
* upddated in the state by the thread block, for this invocation.
|
| 335 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 336 |
+
* point outputs are never returned.
|
| 337 |
+
*
|
| 338 |
+
* Note 1:
|
| 339 |
+
* Thread indices must range from 0...\p n - 1.
|
| 340 |
+
* The number of positions updated may not exceed 256.
|
| 341 |
+
* A thread block may update more than one state, but a given state may not be updated by more than one thread block.
|
| 342 |
+
*
|
| 343 |
+
* Note 2: This alternate derivation of a uniform float is provided for completeness
|
| 344 |
+
* with the original source
|
| 345 |
+
*
|
| 346 |
+
* \param state - Pointer to state to update
|
| 347 |
+
* \param index - Index (0..255) of the position within the state to draw from and update
|
| 348 |
+
* \param n - The total number of postions in this state that are being updated by this invocation
|
| 349 |
+
*
|
| 350 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 351 |
+
*/
|
| 352 |
+
QUALIFIERS float curand_mtgp32_single_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
|
| 353 |
+
{
|
| 354 |
+
unsigned int t;
|
| 355 |
+
int pos = state->k->pos_tbl[state->pIdx];
|
| 356 |
+
unsigned int r;
|
| 357 |
+
unsigned int o_u;
|
| 358 |
+
float o_f;
|
| 359 |
+
|
| 360 |
+
t = index;
|
| 361 |
+
r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
|
| 362 |
+
state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
|
| 363 |
+
state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
|
| 364 |
+
state->pIdx);
|
| 365 |
+
|
| 366 |
+
state->s[t] = r;
|
| 367 |
+
o_u = temper_single(state->k, r,
|
| 368 |
+
state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
|
| 369 |
+
state->pIdx);
|
| 370 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 371 |
+
__syncthreads();
|
| 372 |
+
)
|
| 373 |
+
if (threadIdx.x == 0)
|
| 374 |
+
{
|
| 375 |
+
state->offset = (state->offset + n) & MTGP32_STATE_MASK;
|
| 376 |
+
}
|
| 377 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 378 |
+
__syncthreads();
|
| 379 |
+
)
|
| 380 |
+
memcpy(&o_f, &o_u, sizeof(o_u));
|
| 381 |
+
return o_f;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
/** @} */
|
| 385 |
+
|
| 386 |
+
#endif
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h
ADDED
|
@@ -0,0 +1,840 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_NORMAL_H_)
|
| 52 |
+
#define CURAND_NORMAL_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
#include <nv/target>
|
| 64 |
+
|
| 65 |
+
#include "curand_mrg32k3a.h"
|
| 66 |
+
#include "curand_mtgp32_kernel.h"
|
| 67 |
+
#include "curand_philox4x32_x.h"
|
| 68 |
+
#include "curand_normal_static.h"
|
| 69 |
+
|
| 70 |
+
QUALIFIERS float2 _curand_box_muller(unsigned int x, unsigned int y)
|
| 71 |
+
{
|
| 72 |
+
float2 result;
|
| 73 |
+
float u = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2);
|
| 74 |
+
float v = y * CURAND_2POW32_INV_2PI + (CURAND_2POW32_INV_2PI/2);
|
| 75 |
+
float s;
|
| 76 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 77 |
+
s = sqrtf(-2.0f * logf(u));
|
| 78 |
+
__sincosf(v, &result.x, &result.y);
|
| 79 |
+
,
|
| 80 |
+
s = sqrtf(-2.0f * logf(u));
|
| 81 |
+
result.x = sinf(v);
|
| 82 |
+
result.y = cosf(v);
|
| 83 |
+
)
|
| 84 |
+
result.x *= s;
|
| 85 |
+
result.y *= s;
|
| 86 |
+
return result;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
QUALIFIERS float2 curand_box_muller_mrg(curandStateMRG32k3a_t * state)
|
| 90 |
+
{
|
| 91 |
+
float x, y;
|
| 92 |
+
x = curand_uniform(state);
|
| 93 |
+
y = curand_uniform(state) * CURAND_2PI;
|
| 94 |
+
float2 result;
|
| 95 |
+
float s;
|
| 96 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 97 |
+
s = sqrtf(-2.0f * logf(x));
|
| 98 |
+
__sincosf(y, &result.x, &result.y);
|
| 99 |
+
,
|
| 100 |
+
s = sqrtf(-2.0f * logf(x));
|
| 101 |
+
result.x = sinf(y);
|
| 102 |
+
result.y = cosf(y);
|
| 103 |
+
)
|
| 104 |
+
result.x *= s;
|
| 105 |
+
result.y *= s;
|
| 106 |
+
return result;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
QUALIFIERS double2
|
| 110 |
+
_curand_box_muller_double(unsigned int x0, unsigned int x1,
|
| 111 |
+
unsigned int y0, unsigned int y1)
|
| 112 |
+
{
|
| 113 |
+
double2 result;
|
| 114 |
+
unsigned long long zx = (unsigned long long)x0 ^
|
| 115 |
+
((unsigned long long)x1 << (53 - 32));
|
| 116 |
+
double u = zx * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 117 |
+
unsigned long long zy = (unsigned long long)y0 ^
|
| 118 |
+
((unsigned long long)y1 << (53 - 32));
|
| 119 |
+
double v = zy * (CURAND_2POW53_INV_DOUBLE*2.0) + CURAND_2POW53_INV_DOUBLE;
|
| 120 |
+
double s = sqrt(-2.0 * log(u));
|
| 121 |
+
|
| 122 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 123 |
+
sincospi(v, &result.x, &result.y);
|
| 124 |
+
,
|
| 125 |
+
result.x = sin(v*CURAND_PI_DOUBLE);
|
| 126 |
+
result.y = cos(v*CURAND_PI_DOUBLE);
|
| 127 |
+
)
|
| 128 |
+
result.x *= s;
|
| 129 |
+
result.y *= s;
|
| 130 |
+
|
| 131 |
+
return result;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
QUALIFIERS double2
|
| 135 |
+
curand_box_muller_mrg_double(curandStateMRG32k3a_t * state)
|
| 136 |
+
{
|
| 137 |
+
double x, y;
|
| 138 |
+
double2 result;
|
| 139 |
+
x = curand_uniform_double(state);
|
| 140 |
+
y = curand_uniform_double(state) * 2.0;
|
| 141 |
+
|
| 142 |
+
double s = sqrt(-2.0 * log(x));
|
| 143 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 144 |
+
sincospi(y, &result.x, &result.y);
|
| 145 |
+
,
|
| 146 |
+
result.x = sin(y*CURAND_PI_DOUBLE);
|
| 147 |
+
result.y = cos(y*CURAND_PI_DOUBLE);
|
| 148 |
+
)
|
| 149 |
+
result.x *= s;
|
| 150 |
+
result.y *= s;
|
| 151 |
+
return result;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
template <typename R>
|
| 155 |
+
QUALIFIERS float2 curand_box_muller(R *state)
|
| 156 |
+
{
|
| 157 |
+
float2 result;
|
| 158 |
+
unsigned int x = curand(state);
|
| 159 |
+
unsigned int y = curand(state);
|
| 160 |
+
result = _curand_box_muller(x, y);
|
| 161 |
+
return result;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template <typename R>
|
| 165 |
+
QUALIFIERS float4 curand_box_muller4(R *state)
|
| 166 |
+
{
|
| 167 |
+
float4 result;
|
| 168 |
+
float2 _result;
|
| 169 |
+
uint4 x = curand4(state);
|
| 170 |
+
//unsigned int y = curand(state);
|
| 171 |
+
_result = _curand_box_muller(x.x, x.y);
|
| 172 |
+
result.x = _result.x;
|
| 173 |
+
result.y = _result.y;
|
| 174 |
+
_result = _curand_box_muller(x.z, x.w);
|
| 175 |
+
result.z = _result.x;
|
| 176 |
+
result.w = _result.y;
|
| 177 |
+
return result;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
template <typename R>
|
| 181 |
+
QUALIFIERS double2 curand_box_muller_double(R *state)
|
| 182 |
+
{
|
| 183 |
+
double2 result;
|
| 184 |
+
unsigned int x0 = curand(state);
|
| 185 |
+
unsigned int x1 = curand(state);
|
| 186 |
+
unsigned int y0 = curand(state);
|
| 187 |
+
unsigned int y1 = curand(state);
|
| 188 |
+
result = _curand_box_muller_double(x0, x1, y0, y1);
|
| 189 |
+
return result;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
template <typename R>
|
| 193 |
+
QUALIFIERS double2 curand_box_muller2_double(R *state)
|
| 194 |
+
{
|
| 195 |
+
double2 result;
|
| 196 |
+
uint4 _x;
|
| 197 |
+
_x = curand4(state);
|
| 198 |
+
result = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 199 |
+
return result;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
template <typename R>
|
| 204 |
+
QUALIFIERS double4 curand_box_muller4_double(R *state)
|
| 205 |
+
{
|
| 206 |
+
double4 result;
|
| 207 |
+
double2 _res1;
|
| 208 |
+
double2 _res2;
|
| 209 |
+
uint4 _x;
|
| 210 |
+
uint4 _y;
|
| 211 |
+
_x = curand4(state);
|
| 212 |
+
_y = curand4(state);
|
| 213 |
+
_res1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 214 |
+
_res2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
|
| 215 |
+
result.x = _res1.x;
|
| 216 |
+
result.y = _res1.y;
|
| 217 |
+
result.z = _res2.x;
|
| 218 |
+
result.w = _res2.y;
|
| 219 |
+
return result;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
//QUALIFIERS float _curand_normal_icdf(unsigned int x)
|
| 223 |
+
//{
|
| 224 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 225 |
+
// float s = CURAND_SQRT2;
|
| 226 |
+
// // Mirror to avoid loss of precision
|
| 227 |
+
// if(x > 0x80000000UL) {
|
| 228 |
+
// x = 0xffffffffUL - x;
|
| 229 |
+
// s = -s;
|
| 230 |
+
// }
|
| 231 |
+
// float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 232 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 233 |
+
// return s * erfcinvf(2.0f * p);
|
| 234 |
+
//#else
|
| 235 |
+
// x++; //suppress warnings
|
| 236 |
+
// return 0.0f;
|
| 237 |
+
//#endif
|
| 238 |
+
//}
|
| 239 |
+
//
|
| 240 |
+
//QUALIFIERS float _curand_normal_icdf(unsigned long long x)
|
| 241 |
+
//{
|
| 242 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 243 |
+
// unsigned int t = (unsigned int)(x >> 32);
|
| 244 |
+
// float s = CURAND_SQRT2;
|
| 245 |
+
// // Mirror to avoid loss of precision
|
| 246 |
+
// if(t > 0x80000000UL) {
|
| 247 |
+
// t = 0xffffffffUL - t;
|
| 248 |
+
// s = -s;
|
| 249 |
+
// }
|
| 250 |
+
// float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 251 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 252 |
+
// return s * erfcinvf(2.0f * p);
|
| 253 |
+
//#else
|
| 254 |
+
// x++;
|
| 255 |
+
// return 0.0f;
|
| 256 |
+
//#endif
|
| 257 |
+
//}
|
| 258 |
+
//
|
| 259 |
+
//QUALIFIERS double _curand_normal_icdf_double(unsigned int x)
|
| 260 |
+
//{
|
| 261 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 262 |
+
// double s = CURAND_SQRT2_DOUBLE;
|
| 263 |
+
// // Mirror to avoid loss of precision
|
| 264 |
+
// if(x > 0x80000000UL) {
|
| 265 |
+
// x = 0xffffffffUL - x;
|
| 266 |
+
// s = -s;
|
| 267 |
+
// }
|
| 268 |
+
// double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
|
| 269 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 270 |
+
// return s * erfcinv(2.0 * p);
|
| 271 |
+
//#else
|
| 272 |
+
// x++;
|
| 273 |
+
// return 0.0;
|
| 274 |
+
//#endif
|
| 275 |
+
//}
|
| 276 |
+
//
|
| 277 |
+
//QUALIFIERS double _curand_normal_icdf_double(unsigned long long x)
|
| 278 |
+
//{
|
| 279 |
+
//#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
|
| 280 |
+
// double s = CURAND_SQRT2_DOUBLE;
|
| 281 |
+
// x >>= 11;
|
| 282 |
+
// // Mirror to avoid loss of precision
|
| 283 |
+
// if(x > 0x10000000000000UL) {
|
| 284 |
+
// x = 0x1fffffffffffffUL - x;
|
| 285 |
+
// s = -s;
|
| 286 |
+
// }
|
| 287 |
+
// double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 288 |
+
// // p is in (0, 0.5], 2p is in (0, 1]
|
| 289 |
+
// return s * erfcinv(2.0 * p);
|
| 290 |
+
//#else
|
| 291 |
+
// x++;
|
| 292 |
+
// return 0.0;
|
| 293 |
+
//#endif
|
| 294 |
+
//}
|
| 295 |
+
//
|
| 296 |
+
|
| 297 |
+
/**
|
| 298 |
+
* \brief Return a normally distributed float from an XORWOW generator.
|
| 299 |
+
*
|
| 300 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 301 |
+
* standard deviation \p 1.0f from the XORWOW generator in \p state,
|
| 302 |
+
* increment position of generator by one.
|
| 303 |
+
*
|
| 304 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 305 |
+
* normally distributed results, then returns them one at a time.
|
| 306 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 307 |
+
* both results at once.
|
| 308 |
+
*
|
| 309 |
+
* \param state - Pointer to state to update
|
| 310 |
+
*
|
| 311 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 312 |
+
*/
|
| 313 |
+
QUALIFIERS float curand_normal(curandStateXORWOW_t *state)
|
| 314 |
+
{
|
| 315 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 316 |
+
unsigned int x, y;
|
| 317 |
+
x = curand(state);
|
| 318 |
+
y = curand(state);
|
| 319 |
+
float2 v = _curand_box_muller(x, y);
|
| 320 |
+
state->boxmuller_extra = v.y;
|
| 321 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 322 |
+
return v.x;
|
| 323 |
+
}
|
| 324 |
+
state->boxmuller_flag = 0;
|
| 325 |
+
return state->boxmuller_extra;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
/**
|
| 329 |
+
* \brief Return a normally distributed float from an Philox4_32_10 generator.
|
| 330 |
+
*
|
| 331 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 332 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 333 |
+
* increment position of generator by one.
|
| 334 |
+
*
|
| 335 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 336 |
+
* normally distributed results, then returns them one at a time.
|
| 337 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 338 |
+
* both results at once.
|
| 339 |
+
*
|
| 340 |
+
* \param state - Pointer to state to update
|
| 341 |
+
*
|
| 342 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 343 |
+
*/
|
| 344 |
+
|
| 345 |
+
QUALIFIERS float curand_normal(curandStatePhilox4_32_10_t *state)
|
| 346 |
+
{
|
| 347 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 348 |
+
unsigned int x, y;
|
| 349 |
+
x = curand(state);
|
| 350 |
+
y = curand(state);
|
| 351 |
+
float2 v = _curand_box_muller(x, y);
|
| 352 |
+
state->boxmuller_extra = v.y;
|
| 353 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 354 |
+
return v.x;
|
| 355 |
+
}
|
| 356 |
+
state->boxmuller_flag = 0;
|
| 357 |
+
return state->boxmuller_extra;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
/**
|
| 363 |
+
* \brief Return a normally distributed float from an MRG32k3a generator.
|
| 364 |
+
*
|
| 365 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 366 |
+
* standard deviation \p 1.0f from the MRG32k3a generator in \p state,
|
| 367 |
+
* increment position of generator by one.
|
| 368 |
+
*
|
| 369 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 370 |
+
* normally distributed results, then returns them one at a time.
|
| 371 |
+
* See ::curand_normal2() for a more efficient version that returns
|
| 372 |
+
* both results at once.
|
| 373 |
+
*
|
| 374 |
+
* \param state - Pointer to state to update
|
| 375 |
+
*
|
| 376 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 377 |
+
*/
|
| 378 |
+
QUALIFIERS float curand_normal(curandStateMRG32k3a_t *state)
|
| 379 |
+
{
|
| 380 |
+
if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
|
| 381 |
+
float2 v = curand_box_muller_mrg(state);
|
| 382 |
+
state->boxmuller_extra = v.y;
|
| 383 |
+
state->boxmuller_flag = EXTRA_FLAG_NORMAL;
|
| 384 |
+
return v.x;
|
| 385 |
+
}
|
| 386 |
+
state->boxmuller_flag = 0;
|
| 387 |
+
return state->boxmuller_extra;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/**
|
| 391 |
+
* \brief Return two normally distributed floats from an XORWOW generator.
|
| 392 |
+
*
|
| 393 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 394 |
+
* standard deviation \p 1.0f from the XORWOW generator in \p state,
|
| 395 |
+
* increment position of generator by two.
|
| 396 |
+
*
|
| 397 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 398 |
+
* normally distributed results.
|
| 399 |
+
*
|
| 400 |
+
* \param state - Pointer to state to update
|
| 401 |
+
*
|
| 402 |
+
* \return Normally distributed float2 where each element is from a
|
| 403 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 404 |
+
*/
|
| 405 |
+
QUALIFIERS float2 curand_normal2(curandStateXORWOW_t *state)
|
| 406 |
+
{
|
| 407 |
+
return curand_box_muller(state);
|
| 408 |
+
}
|
| 409 |
+
/**
|
| 410 |
+
* \brief Return two normally distributed floats from an Philox4_32_10 generator.
|
| 411 |
+
*
|
| 412 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 413 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 414 |
+
* increment position of generator by two.
|
| 415 |
+
*
|
| 416 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 417 |
+
* normally distributed results.
|
| 418 |
+
*
|
| 419 |
+
* \param state - Pointer to state to update
|
| 420 |
+
*
|
| 421 |
+
* \return Normally distributed float2 where each element is from a
|
| 422 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 423 |
+
*/
|
| 424 |
+
QUALIFIERS float2 curand_normal2(curandStatePhilox4_32_10_t *state)
|
| 425 |
+
{
|
| 426 |
+
return curand_box_muller(state);
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
/**
|
| 430 |
+
* \brief Return four normally distributed floats from an Philox4_32_10 generator.
|
| 431 |
+
*
|
| 432 |
+
* Return four normally distributed floats with mean \p 0.0f and
|
| 433 |
+
* standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
|
| 434 |
+
* increment position of generator by four.
|
| 435 |
+
*
|
| 436 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 437 |
+
* normally distributed results.
|
| 438 |
+
*
|
| 439 |
+
* \param state - Pointer to state to update
|
| 440 |
+
*
|
| 441 |
+
* \return Normally distributed float2 where each element is from a
|
| 442 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 443 |
+
*/
|
| 444 |
+
QUALIFIERS float4 curand_normal4(curandStatePhilox4_32_10_t *state)
|
| 445 |
+
{
|
| 446 |
+
return curand_box_muller4(state);
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
/**
|
| 452 |
+
* \brief Return two normally distributed floats from an MRG32k3a generator.
|
| 453 |
+
*
|
| 454 |
+
* Return two normally distributed floats with mean \p 0.0f and
|
| 455 |
+
* standard deviation \p 1.0f from the MRG32k3a generator in \p state,
|
| 456 |
+
* increment position of generator by two.
|
| 457 |
+
*
|
| 458 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 459 |
+
* normally distributed results.
|
| 460 |
+
*
|
| 461 |
+
* \param state - Pointer to state to update
|
| 462 |
+
*
|
| 463 |
+
* \return Normally distributed float2 where each element is from a
|
| 464 |
+
* distribution with mean \p 0.0f and standard deviation \p 1.0f
|
| 465 |
+
*/
|
| 466 |
+
QUALIFIERS float2 curand_normal2(curandStateMRG32k3a_t *state)
|
| 467 |
+
{
|
| 468 |
+
return curand_box_muller_mrg(state);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
/**
|
| 472 |
+
* \brief Return a normally distributed float from a MTGP32 generator.
|
| 473 |
+
*
|
| 474 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 475 |
+
* standard deviation \p 1.0f from the MTGP32 generator in \p state,
|
| 476 |
+
* increment position of generator.
|
| 477 |
+
*
|
| 478 |
+
* The implementation uses the inverse cumulative distribution function
|
| 479 |
+
* to generate normally distributed results.
|
| 480 |
+
*
|
| 481 |
+
* \param state - Pointer to state to update
|
| 482 |
+
*
|
| 483 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 484 |
+
*/
|
| 485 |
+
QUALIFIERS float curand_normal(curandStateMtgp32_t *state)
|
| 486 |
+
{
|
| 487 |
+
return _curand_normal_icdf(curand(state));
|
| 488 |
+
}
|
| 489 |
+
/**
|
| 490 |
+
* \brief Return a normally distributed float from a Sobol32 generator.
|
| 491 |
+
*
|
| 492 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 493 |
+
* standard deviation \p 1.0f from the Sobol32 generator in \p state,
|
| 494 |
+
* increment position of generator by one.
|
| 495 |
+
*
|
| 496 |
+
* The implementation uses the inverse cumulative distribution function
|
| 497 |
+
* to generate normally distributed results.
|
| 498 |
+
*
|
| 499 |
+
* \param state - Pointer to state to update
|
| 500 |
+
*
|
| 501 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 502 |
+
*/
|
| 503 |
+
QUALIFIERS float curand_normal(curandStateSobol32_t *state)
|
| 504 |
+
{
|
| 505 |
+
return _curand_normal_icdf(curand(state));
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
/**
|
| 509 |
+
* \brief Return a normally distributed float from a scrambled Sobol32 generator.
|
| 510 |
+
*
|
| 511 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 512 |
+
* standard deviation \p 1.0f from the scrambled Sobol32 generator in \p state,
|
| 513 |
+
* increment position of generator by one.
|
| 514 |
+
*
|
| 515 |
+
* The implementation uses the inverse cumulative distribution function
|
| 516 |
+
* to generate normally distributed results.
|
| 517 |
+
*
|
| 518 |
+
* \param state - Pointer to state to update
|
| 519 |
+
*
|
| 520 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 521 |
+
*/
|
| 522 |
+
QUALIFIERS float curand_normal(curandStateScrambledSobol32_t *state)
|
| 523 |
+
{
|
| 524 |
+
return _curand_normal_icdf(curand(state));
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
/**
|
| 528 |
+
* \brief Return a normally distributed float from a Sobol64 generator.
|
| 529 |
+
*
|
| 530 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 531 |
+
* standard deviation \p 1.0f from the Sobol64 generator in \p state,
|
| 532 |
+
* increment position of generator by one.
|
| 533 |
+
*
|
| 534 |
+
* The implementation uses the inverse cumulative distribution function
|
| 535 |
+
* to generate normally distributed results.
|
| 536 |
+
*
|
| 537 |
+
* \param state - Pointer to state to update
|
| 538 |
+
*
|
| 539 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 540 |
+
*/
|
| 541 |
+
QUALIFIERS float curand_normal(curandStateSobol64_t *state)
|
| 542 |
+
{
|
| 543 |
+
return _curand_normal_icdf(curand(state));
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
/**
|
| 547 |
+
* \brief Return a normally distributed float from a scrambled Sobol64 generator.
|
| 548 |
+
*
|
| 549 |
+
* Return a single normally distributed float with mean \p 0.0f and
|
| 550 |
+
* standard deviation \p 1.0f from the scrambled Sobol64 generator in \p state,
|
| 551 |
+
* increment position of generator by one.
|
| 552 |
+
*
|
| 553 |
+
* The implementation uses the inverse cumulative distribution function
|
| 554 |
+
* to generate normally distributed results.
|
| 555 |
+
*
|
| 556 |
+
* \param state - Pointer to state to update
|
| 557 |
+
*
|
| 558 |
+
* \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
|
| 559 |
+
*/
|
| 560 |
+
QUALIFIERS float curand_normal(curandStateScrambledSobol64_t *state)
|
| 561 |
+
{
|
| 562 |
+
return _curand_normal_icdf(curand(state));
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
/**
|
| 566 |
+
* \brief Return a normally distributed double from an XORWOW generator.
|
| 567 |
+
*
|
| 568 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 569 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 570 |
+
* increment position of generator.
|
| 571 |
+
*
|
| 572 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 573 |
+
* normally distributed results, then returns them one at a time.
|
| 574 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 575 |
+
* both results at once.
|
| 576 |
+
*
|
| 577 |
+
* \param state - Pointer to state to update
|
| 578 |
+
*
|
| 579 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 580 |
+
*/
|
| 581 |
+
QUALIFIERS double curand_normal_double(curandStateXORWOW_t *state)
|
| 582 |
+
{
|
| 583 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 584 |
+
unsigned int x0, x1, y0, y1;
|
| 585 |
+
x0 = curand(state);
|
| 586 |
+
x1 = curand(state);
|
| 587 |
+
y0 = curand(state);
|
| 588 |
+
y1 = curand(state);
|
| 589 |
+
double2 v = _curand_box_muller_double(x0, x1, y0, y1);
|
| 590 |
+
state->boxmuller_extra_double = v.y;
|
| 591 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 592 |
+
return v.x;
|
| 593 |
+
}
|
| 594 |
+
state->boxmuller_flag_double = 0;
|
| 595 |
+
return state->boxmuller_extra_double;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
/**
|
| 599 |
+
* \brief Return a normally distributed double from an Philox4_32_10 generator.
|
| 600 |
+
*
|
| 601 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 602 |
+
* standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
|
| 603 |
+
* increment position of generator.
|
| 604 |
+
*
|
| 605 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 606 |
+
* normally distributed results, then returns them one at a time.
|
| 607 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 608 |
+
* both results at once.
|
| 609 |
+
*
|
| 610 |
+
* \param state - Pointer to state to update
|
| 611 |
+
*
|
| 612 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 613 |
+
*/
|
| 614 |
+
|
| 615 |
+
QUALIFIERS double curand_normal_double(curandStatePhilox4_32_10_t *state)
|
| 616 |
+
{
|
| 617 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 618 |
+
uint4 _x;
|
| 619 |
+
_x = curand4(state);
|
| 620 |
+
double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 621 |
+
state->boxmuller_extra_double = v.y;
|
| 622 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 623 |
+
return v.x;
|
| 624 |
+
}
|
| 625 |
+
state->boxmuller_flag_double = 0;
|
| 626 |
+
return state->boxmuller_extra_double;
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
/**
|
| 631 |
+
* \brief Return a normally distributed double from an MRG32k3a generator.
|
| 632 |
+
*
|
| 633 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 634 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 635 |
+
* increment position of generator.
|
| 636 |
+
*
|
| 637 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 638 |
+
* normally distributed results, then returns them one at a time.
|
| 639 |
+
* See ::curand_normal2_double() for a more efficient version that returns
|
| 640 |
+
* both results at once.
|
| 641 |
+
*
|
| 642 |
+
* \param state - Pointer to state to update
|
| 643 |
+
*
|
| 644 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 645 |
+
*/
|
| 646 |
+
QUALIFIERS double curand_normal_double(curandStateMRG32k3a_t *state)
|
| 647 |
+
{
|
| 648 |
+
if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
|
| 649 |
+
double2 v = curand_box_muller_mrg_double(state);
|
| 650 |
+
state->boxmuller_extra_double = v.y;
|
| 651 |
+
state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
|
| 652 |
+
return v.x;
|
| 653 |
+
}
|
| 654 |
+
state->boxmuller_flag_double = 0;
|
| 655 |
+
return state->boxmuller_extra_double;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
/**
|
| 659 |
+
* \brief Return two normally distributed doubles from an XORWOW generator.
|
| 660 |
+
*
|
| 661 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 662 |
+
* standard deviation \p 1.0 from the XORWOW generator in \p state,
|
| 663 |
+
* increment position of generator by 2.
|
| 664 |
+
*
|
| 665 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 666 |
+
* normally distributed results.
|
| 667 |
+
*
|
| 668 |
+
* \param state - Pointer to state to update
|
| 669 |
+
*
|
| 670 |
+
* \return Normally distributed double2 where each element is from a
|
| 671 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 672 |
+
*/
|
| 673 |
+
QUALIFIERS double2 curand_normal2_double(curandStateXORWOW_t *state)
|
| 674 |
+
{
|
| 675 |
+
return curand_box_muller_double(state);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
/**
|
| 679 |
+
* \brief Return two normally distributed doubles from an Philox4_32_10 generator.
|
| 680 |
+
*
|
| 681 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 682 |
+
* standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
|
| 683 |
+
* increment position of generator by 2.
|
| 684 |
+
*
|
| 685 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 686 |
+
* normally distributed results.
|
| 687 |
+
*
|
| 688 |
+
* \param state - Pointer to state to update
|
| 689 |
+
*
|
| 690 |
+
* \return Normally distributed double2 where each element is from a
|
| 691 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 692 |
+
*/
|
| 693 |
+
QUALIFIERS double2 curand_normal2_double(curandStatePhilox4_32_10_t *state)
|
| 694 |
+
{
|
| 695 |
+
uint4 _x;
|
| 696 |
+
double2 result;
|
| 697 |
+
|
| 698 |
+
_x = curand4(state);
|
| 699 |
+
double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 700 |
+
result.x = v1.x;
|
| 701 |
+
result.y = v1.y;
|
| 702 |
+
|
| 703 |
+
return result;
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
// not a part of API
|
| 707 |
+
QUALIFIERS double4 curand_normal4_double(curandStatePhilox4_32_10_t *state)
|
| 708 |
+
{
|
| 709 |
+
uint4 _x;
|
| 710 |
+
uint4 _y;
|
| 711 |
+
double4 result;
|
| 712 |
+
|
| 713 |
+
_x = curand4(state);
|
| 714 |
+
_y = curand4(state);
|
| 715 |
+
double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
|
| 716 |
+
double2 v2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
|
| 717 |
+
result.x = v1.x;
|
| 718 |
+
result.y = v1.y;
|
| 719 |
+
result.z = v2.x;
|
| 720 |
+
result.w = v2.y;
|
| 721 |
+
|
| 722 |
+
return result;
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
/**
|
| 727 |
+
* \brief Return two normally distributed doubles from an MRG32k3a generator.
|
| 728 |
+
*
|
| 729 |
+
* Return two normally distributed doubles with mean \p 0.0 and
|
| 730 |
+
* standard deviation \p 1.0 from the MRG32k3a generator in \p state,
|
| 731 |
+
* increment position of generator.
|
| 732 |
+
*
|
| 733 |
+
* The implementation uses a Box-Muller transform to generate two
|
| 734 |
+
* normally distributed results.
|
| 735 |
+
*
|
| 736 |
+
* \param state - Pointer to state to update
|
| 737 |
+
*
|
| 738 |
+
* \return Normally distributed double2 where each element is from a
|
| 739 |
+
* distribution with mean \p 0.0 and standard deviation \p 1.0
|
| 740 |
+
*/
|
| 741 |
+
QUALIFIERS double2 curand_normal2_double(curandStateMRG32k3a_t *state)
|
| 742 |
+
{
|
| 743 |
+
return curand_box_muller_mrg_double(state);
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
/**
|
| 747 |
+
* \brief Return a normally distributed double from an MTGP32 generator.
|
| 748 |
+
*
|
| 749 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 750 |
+
* standard deviation \p 1.0 from the MTGP32 generator in \p state,
|
| 751 |
+
* increment position of generator.
|
| 752 |
+
*
|
| 753 |
+
* The implementation uses the inverse cumulative distribution function
|
| 754 |
+
* to generate normally distributed results.
|
| 755 |
+
*
|
| 756 |
+
* \param state - Pointer to state to update
|
| 757 |
+
*
|
| 758 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 759 |
+
*/
|
| 760 |
+
QUALIFIERS double curand_normal_double(curandStateMtgp32_t *state)
|
| 761 |
+
{
|
| 762 |
+
return _curand_normal_icdf_double(curand(state));
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
/**
|
| 766 |
+
* \brief Return a normally distributed double from an Sobol32 generator.
|
| 767 |
+
*
|
| 768 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 769 |
+
* standard deviation \p 1.0 from the Sobol32 generator in \p state,
|
| 770 |
+
* increment position of generator by one.
|
| 771 |
+
*
|
| 772 |
+
* The implementation uses the inverse cumulative distribution function
|
| 773 |
+
* to generate normally distributed results.
|
| 774 |
+
*
|
| 775 |
+
* \param state - Pointer to state to update
|
| 776 |
+
*
|
| 777 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 778 |
+
*/
|
| 779 |
+
QUALIFIERS double curand_normal_double(curandStateSobol32_t *state)
|
| 780 |
+
{
|
| 781 |
+
return _curand_normal_icdf_double(curand(state));
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
/**
|
| 785 |
+
* \brief Return a normally distributed double from a scrambled Sobol32 generator.
|
| 786 |
+
*
|
| 787 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 788 |
+
* standard deviation \p 1.0 from the scrambled Sobol32 generator in \p state,
|
| 789 |
+
* increment position of generator by one.
|
| 790 |
+
*
|
| 791 |
+
* The implementation uses the inverse cumulative distribution function
|
| 792 |
+
* to generate normally distributed results.
|
| 793 |
+
*
|
| 794 |
+
* \param state - Pointer to state to update
|
| 795 |
+
*
|
| 796 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 797 |
+
*/
|
| 798 |
+
QUALIFIERS double curand_normal_double(curandStateScrambledSobol32_t *state)
|
| 799 |
+
{
|
| 800 |
+
return _curand_normal_icdf_double(curand(state));
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
/**
|
| 804 |
+
* \brief Return a normally distributed double from a Sobol64 generator.
|
| 805 |
+
*
|
| 806 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 807 |
+
* standard deviation \p 1.0 from the Sobol64 generator in \p state,
|
| 808 |
+
* increment position of generator by one.
|
| 809 |
+
*
|
| 810 |
+
* The implementation uses the inverse cumulative distribution function
|
| 811 |
+
* to generate normally distributed results.
|
| 812 |
+
*
|
| 813 |
+
* \param state - Pointer to state to update
|
| 814 |
+
*
|
| 815 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 816 |
+
*/
|
| 817 |
+
QUALIFIERS double curand_normal_double(curandStateSobol64_t *state)
|
| 818 |
+
{
|
| 819 |
+
return _curand_normal_icdf_double(curand(state));
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
/**
|
| 823 |
+
* \brief Return a normally distributed double from a scrambled Sobol64 generator.
|
| 824 |
+
*
|
| 825 |
+
* Return a single normally distributed double with mean \p 0.0 and
|
| 826 |
+
* standard deviation \p 1.0 from the scrambled Sobol64 generator in \p state,
|
| 827 |
+
* increment position of generator by one.
|
| 828 |
+
*
|
| 829 |
+
* The implementation uses the inverse cumulative distribution function
|
| 830 |
+
* to generate normally distributed results.
|
| 831 |
+
*
|
| 832 |
+
* \param state - Pointer to state to update
|
| 833 |
+
*
|
| 834 |
+
* \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
|
| 835 |
+
*/
|
| 836 |
+
QUALIFIERS double curand_normal_double(curandStateScrambledSobol64_t *state)
|
| 837 |
+
{
|
| 838 |
+
return _curand_normal_icdf_double(curand(state));
|
| 839 |
+
}
|
| 840 |
+
#endif // !defined(CURAND_NORMAL_H_)
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_POISSON_H_)
|
| 52 |
+
#define CURAND_POISSON_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include <nv/target>
|
| 65 |
+
|
| 66 |
+
#include "curand_mrg32k3a.h"
|
| 67 |
+
#include "curand_mtgp32_kernel.h"
|
| 68 |
+
#include "curand_philox4x32_x.h"
|
| 69 |
+
|
| 70 |
+
#define CR_CUDART_PI 3.1415926535897931e+0
|
| 71 |
+
#define CR_CUDART_TWO_TO_52 4503599627370496.0
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
QUALIFIERS float __cr_rsqrt(float a)
|
| 75 |
+
{
|
| 76 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 77 |
+
asm ("rsqrt.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 78 |
+
,
|
| 79 |
+
a = 1.0f / sqrtf (a);
|
| 80 |
+
)
|
| 81 |
+
return a;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
QUALIFIERS float __cr_exp (float a)
|
| 85 |
+
{
|
| 86 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 87 |
+
a = a * 1.4426950408889634074;
|
| 88 |
+
asm ("ex2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 89 |
+
,
|
| 90 |
+
a = expf (a);
|
| 91 |
+
)
|
| 92 |
+
return a;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
QUALIFIERS float __cr_log (float a)
|
| 96 |
+
{
|
| 97 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 98 |
+
asm ("lg2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 99 |
+
a = a * 0.69314718055994530942;
|
| 100 |
+
,
|
| 101 |
+
a = logf (a);
|
| 102 |
+
)
|
| 103 |
+
return a;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
QUALIFIERS float __cr_rcp (float a)
|
| 107 |
+
{
|
| 108 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 109 |
+
asm ("rcp.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
|
| 110 |
+
,
|
| 111 |
+
a = 1.0f / a;
|
| 112 |
+
)
|
| 113 |
+
return a;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
/* Computes regularized gamma function: gammainc(a,x)/gamma(a) */
|
| 117 |
+
QUALIFIERS float __cr_pgammainc (float a, float x)
|
| 118 |
+
{
|
| 119 |
+
float t, alpha, beta;
|
| 120 |
+
|
| 121 |
+
/* First level parametrization constants */
|
| 122 |
+
float ma1 = 1.43248035075540910f,
|
| 123 |
+
ma2 = 0.12400979329415655f,
|
| 124 |
+
ma3 = 0.00025361074907033f,
|
| 125 |
+
mb1 = 0.21096734870196546f,
|
| 126 |
+
mb2 = 1.97381164089999420f,
|
| 127 |
+
mb3 = 0.94201734077887530f;
|
| 128 |
+
|
| 129 |
+
/* Second level parametrization constants (depends only on a) */
|
| 130 |
+
|
| 131 |
+
alpha = __cr_rsqrt (a - ma2);
|
| 132 |
+
alpha = ma1 * alpha + ma3;
|
| 133 |
+
beta = __cr_rsqrt (a - mb2);
|
| 134 |
+
beta = mb1 * beta + mb3;
|
| 135 |
+
|
| 136 |
+
/* Final approximation (depends on a and x) */
|
| 137 |
+
|
| 138 |
+
t = a - x;
|
| 139 |
+
t = alpha * t - beta;
|
| 140 |
+
t = 1.0f + __cr_exp (t);
|
| 141 |
+
t = t * t;
|
| 142 |
+
t = __cr_rcp (t);
|
| 143 |
+
|
| 144 |
+
/* Negative a,x or a,x=NAN requires special handling */
|
| 145 |
+
//t = !(x > 0 && a >= 0) ? 0.0 : t;
|
| 146 |
+
|
| 147 |
+
return t;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
/* Computes inverse of pgammainc */
|
| 151 |
+
QUALIFIERS float __cr_pgammaincinv (float a, float y)
|
| 152 |
+
{
|
| 153 |
+
float t, alpha, beta;
|
| 154 |
+
|
| 155 |
+
/* First level parametrization constants */
|
| 156 |
+
|
| 157 |
+
float ma1 = 1.43248035075540910f,
|
| 158 |
+
ma2 = 0.12400979329415655f,
|
| 159 |
+
ma3 = 0.00025361074907033f,
|
| 160 |
+
mb1 = 0.21096734870196546f,
|
| 161 |
+
mb2 = 1.97381164089999420f,
|
| 162 |
+
mb3 = 0.94201734077887530f;
|
| 163 |
+
|
| 164 |
+
/* Second level parametrization constants (depends only on a) */
|
| 165 |
+
|
| 166 |
+
alpha = __cr_rsqrt (a - ma2);
|
| 167 |
+
alpha = ma1 * alpha + ma3;
|
| 168 |
+
beta = __cr_rsqrt (a - mb2);
|
| 169 |
+
beta = mb1 * beta + mb3;
|
| 170 |
+
|
| 171 |
+
/* Final approximation (depends on a and y) */
|
| 172 |
+
|
| 173 |
+
t = __cr_rsqrt (y) - 1.0f;
|
| 174 |
+
t = __cr_log (t);
|
| 175 |
+
t = beta + t;
|
| 176 |
+
t = - t * __cr_rcp (alpha) + a;
|
| 177 |
+
/* Negative a,x or a,x=NAN requires special handling */
|
| 178 |
+
//t = !(y > 0 && a >= 0) ? 0.0 : t;
|
| 179 |
+
return t;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
#if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables)
|
| 183 |
+
inline __constant__ double __cr_lgamma_table [] = {
|
| 184 |
+
#else
|
| 185 |
+
static __constant__ double __cr_lgamma_table [] = {
|
| 186 |
+
#endif
|
| 187 |
+
0.000000000000000000e-1,
|
| 188 |
+
0.000000000000000000e-1,
|
| 189 |
+
6.931471805599453094e-1,
|
| 190 |
+
1.791759469228055001e0,
|
| 191 |
+
3.178053830347945620e0,
|
| 192 |
+
4.787491742782045994e0,
|
| 193 |
+
6.579251212010100995e0,
|
| 194 |
+
8.525161361065414300e0,
|
| 195 |
+
1.060460290274525023e1
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
QUALIFIERS double __cr_lgamma_integer(int a)
|
| 200 |
+
{
|
| 201 |
+
double s;
|
| 202 |
+
double t;
|
| 203 |
+
double fa = fabs((float)a);
|
| 204 |
+
double sum;
|
| 205 |
+
|
| 206 |
+
if (a > 8) {
|
| 207 |
+
/* Stirling approximation; coefficients from Hart et al, "Computer
|
| 208 |
+
* Approximations", Wiley 1968. Approximation 5404.
|
| 209 |
+
*/
|
| 210 |
+
s = 1.0 / fa;
|
| 211 |
+
t = s * s;
|
| 212 |
+
sum = -0.1633436431e-2;
|
| 213 |
+
sum = sum * t + 0.83645878922e-3;
|
| 214 |
+
sum = sum * t - 0.5951896861197e-3;
|
| 215 |
+
sum = sum * t + 0.793650576493454e-3;
|
| 216 |
+
sum = sum * t - 0.277777777735865004e-2;
|
| 217 |
+
sum = sum * t + 0.833333333333331018375e-1;
|
| 218 |
+
sum = sum * s + 0.918938533204672;
|
| 219 |
+
s = 0.5 * log (fa);
|
| 220 |
+
t = fa - 0.5;
|
| 221 |
+
s = s * t;
|
| 222 |
+
t = s - fa;
|
| 223 |
+
s = s + sum;
|
| 224 |
+
t = t + s;
|
| 225 |
+
return t;
|
| 226 |
+
} else {
|
| 227 |
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
| 228 |
+
return __cr_lgamma_table [(int) fa-1];
|
| 229 |
+
,
|
| 230 |
+
switch(a) {
|
| 231 |
+
case 1: return 0.000000000000000000e-1;
|
| 232 |
+
case 2: return 0.000000000000000000e-1;
|
| 233 |
+
case 3: return 6.931471805599453094e-1;
|
| 234 |
+
case 4: return 1.791759469228055001e0;
|
| 235 |
+
case 5: return 3.178053830347945620e0;
|
| 236 |
+
case 6: return 4.787491742782045994e0;
|
| 237 |
+
case 7: return 6.579251212010100995e0;
|
| 238 |
+
case 8: return 8.525161361065414300e0;
|
| 239 |
+
default: return 1.060460290274525023e1;
|
| 240 |
+
}
|
| 241 |
+
)
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
#define KNUTH_FLOAT_CONST 60.0
|
| 246 |
+
template <typename T>
|
| 247 |
+
// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
|
| 248 |
+
QUALIFIERS unsigned int curand_poisson_knuth(T *state, float lambda)
|
| 249 |
+
{
|
| 250 |
+
unsigned int k = 0;
|
| 251 |
+
float p = expf(lambda);
|
| 252 |
+
do{
|
| 253 |
+
k++;
|
| 254 |
+
p *= curand_uniform(state);
|
| 255 |
+
}while (p > 1.0);
|
| 256 |
+
return k-1;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <typename T>
|
| 260 |
+
// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
|
| 261 |
+
QUALIFIERS uint4 curand_poisson_knuth4(T *state, float lambda)
|
| 262 |
+
{
|
| 263 |
+
uint4 k = {0,0,0,0};
|
| 264 |
+
float exp_lambda = expf(lambda);
|
| 265 |
+
float4 p={ exp_lambda,exp_lambda,exp_lambda,exp_lambda };
|
| 266 |
+
do{
|
| 267 |
+
k.x++;
|
| 268 |
+
p.x *= curand_uniform(state);
|
| 269 |
+
}while (p.x > 1.0);
|
| 270 |
+
do{
|
| 271 |
+
k.y++;
|
| 272 |
+
p.y *= curand_uniform(state);
|
| 273 |
+
}while (p.y > 1.0);
|
| 274 |
+
do{
|
| 275 |
+
k.z++;
|
| 276 |
+
p.z *= curand_uniform(state);
|
| 277 |
+
}while (p.z > 1.0);
|
| 278 |
+
do{
|
| 279 |
+
k.w++;
|
| 280 |
+
p.w *= curand_uniform(state);
|
| 281 |
+
}while (p.w > 1.0);
|
| 282 |
+
|
| 283 |
+
k.x--;
|
| 284 |
+
k.y--;
|
| 285 |
+
k.z--;
|
| 286 |
+
k.w--;
|
| 287 |
+
return k;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
template <typename T>
|
| 291 |
+
// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
|
| 292 |
+
QUALIFIERS unsigned int _curand_M2_double(T x, curandDistributionM2Shift_t distributionM2)
|
| 293 |
+
{
|
| 294 |
+
double u = _curand_uniform_double(x);
|
| 295 |
+
int j = (int) floor(distributionM2->length*u);
|
| 296 |
+
|
| 297 |
+
double histogramVj;
|
| 298 |
+
unsigned int histogramKj;
|
| 299 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 300 |
+
histogramVj = __ldg( &(distributionM2->histogram->V[j]));
|
| 301 |
+
histogramKj = __ldg( &(distributionM2->histogram->K[j]));
|
| 302 |
+
,
|
| 303 |
+
histogramVj = distributionM2->histogram->V[j];
|
| 304 |
+
histogramKj = distributionM2->histogram->K[j];
|
| 305 |
+
)
|
| 306 |
+
//if (u < distributionM2->histogram->V[j]) return distributionM2->shift + j;
|
| 307 |
+
//return distributionM2->shift + distributionM2->histogram->K[j];
|
| 308 |
+
if (u < histogramVj) return distributionM2->shift + j;
|
| 309 |
+
return distributionM2->shift + histogramKj;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
template <typename T>
|
| 313 |
+
// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
|
| 314 |
+
QUALIFIERS uint4 _curand_M2_double4(T x, curandDistributionM2Shift_t distributionM2)
|
| 315 |
+
{
|
| 316 |
+
double4 u;
|
| 317 |
+
uint4 result = {0,0,0,0};
|
| 318 |
+
int4 flag = {1,1,1,1};
|
| 319 |
+
|
| 320 |
+
u.x = _curand_uniform_double(x.x);
|
| 321 |
+
u.y = _curand_uniform_double(x.y);
|
| 322 |
+
u.z = _curand_uniform_double(x.z);
|
| 323 |
+
u.w = _curand_uniform_double(x.w);
|
| 324 |
+
|
| 325 |
+
int4 j;
|
| 326 |
+
j.x = (int) floor(distributionM2->length*u.x);
|
| 327 |
+
j.y = (int) floor(distributionM2->length*u.y);
|
| 328 |
+
j.z = (int) floor(distributionM2->length*u.z);
|
| 329 |
+
j.w = (int) floor(distributionM2->length*u.w);
|
| 330 |
+
// int result;
|
| 331 |
+
|
| 332 |
+
double histogramVjx;
|
| 333 |
+
double histogramVjy;
|
| 334 |
+
double histogramVjz;
|
| 335 |
+
double histogramVjw;
|
| 336 |
+
unsigned int histogramKjx;
|
| 337 |
+
unsigned int histogramKjy;
|
| 338 |
+
unsigned int histogramKjz;
|
| 339 |
+
unsigned int histogramKjw;
|
| 340 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 341 |
+
histogramVjx = __ldg( &(distributionM2->histogram->V[j.x]));
|
| 342 |
+
histogramVjy = __ldg( &(distributionM2->histogram->V[j.y]));
|
| 343 |
+
histogramVjz = __ldg( &(distributionM2->histogram->V[j.z]));
|
| 344 |
+
histogramVjw = __ldg( &(distributionM2->histogram->V[j.w]));
|
| 345 |
+
|
| 346 |
+
histogramKjx = __ldg( &(distributionM2->histogram->K[j.x]));
|
| 347 |
+
histogramKjy = __ldg( &(distributionM2->histogram->K[j.y]));
|
| 348 |
+
histogramKjz = __ldg( &(distributionM2->histogram->K[j.z]));
|
| 349 |
+
histogramKjw = __ldg( &(distributionM2->histogram->K[j.w]));
|
| 350 |
+
,
|
| 351 |
+
histogramVjx = distributionM2->histogram->V[j.x];
|
| 352 |
+
histogramVjy = distributionM2->histogram->V[j.y];
|
| 353 |
+
histogramVjz = distributionM2->histogram->V[j.z];
|
| 354 |
+
histogramVjw = distributionM2->histogram->V[j.w];
|
| 355 |
+
|
| 356 |
+
histogramKjx = distributionM2->histogram->K[j.x];
|
| 357 |
+
histogramKjy = distributionM2->histogram->K[j.y];
|
| 358 |
+
histogramKjz = distributionM2->histogram->K[j.z];
|
| 359 |
+
histogramKjw = distributionM2->histogram->K[j.w];
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
if (u.x < histogramVjx){ result.x = distributionM2->shift + j.x; flag.x = 0; }
|
| 363 |
+
if (u.y < histogramVjy){ result.y = distributionM2->shift + j.y; flag.y = 0; }
|
| 364 |
+
if (u.z < histogramVjz){ result.z = distributionM2->shift + j.z; flag.z = 0; }
|
| 365 |
+
if (u.w < histogramVjw){ result.w = distributionM2->shift + j.w; flag.w = 0; }
|
| 366 |
+
//return distributionM2->shift + distributionM2->histogram->K[j];
|
| 367 |
+
|
| 368 |
+
if(flag.x) result.x = distributionM2->shift + histogramKjx;
|
| 369 |
+
if(flag.y) result.y = distributionM2->shift + histogramKjy;
|
| 370 |
+
if(flag.z) result.z = distributionM2->shift + histogramKjz;
|
| 371 |
+
if(flag.w) result.w = distributionM2->shift + histogramKjw;
|
| 372 |
+
|
| 373 |
+
return result;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
template <typename STATE>
|
| 377 |
+
QUALIFIERS unsigned int curand_M2_double(STATE *state, curandDistributionM2Shift_t distributionM2)
|
| 378 |
+
{
|
| 379 |
+
return _curand_M2_double(curand(state), distributionM2);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
template <typename STATE>
|
| 383 |
+
QUALIFIERS uint4 curand_M2_double4(STATE *state, curandDistributionM2Shift_t distributionM2)
|
| 384 |
+
{
|
| 385 |
+
return _curand_M2_double4(curand4(state), distributionM2);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
template <typename T>
|
| 390 |
+
QUALIFIERS unsigned int _curand_binary_search_double(T x, curandDistributionShift_t distribution)
|
| 391 |
+
{
|
| 392 |
+
double u = _curand_uniform_double(x);
|
| 393 |
+
int min = 0;
|
| 394 |
+
int max = distribution->length-1;
|
| 395 |
+
do{
|
| 396 |
+
int mid = (max + min)/2;
|
| 397 |
+
double probability_mid;
|
| 398 |
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
|
| 399 |
+
probability_mid = __ldg( &(distribution->probability[mid]));
|
| 400 |
+
,
|
| 401 |
+
probability_mid = distribution->probability[mid];
|
| 402 |
+
)
|
| 403 |
+
if (u <= probability_mid){
|
| 404 |
+
max = mid;
|
| 405 |
+
}else{
|
| 406 |
+
min = mid+1;
|
| 407 |
+
}
|
| 408 |
+
}while (min < max);
|
| 409 |
+
return distribution->shift + min;
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
template <typename STATE>
|
| 413 |
+
QUALIFIERS unsigned int curand_binary_search_double(STATE *state, curandDistributionShift_t distribution)
|
| 414 |
+
{
|
| 415 |
+
return _curand_binary_search_double(curand(state), distribution);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
// Generates uniformly distributed double values in range (0.0; 1.0) from uniformly distributed
|
| 419 |
+
// unsigned int. We can't use standard _curand_uniform_double since it can generate 1.0.
|
| 420 |
+
// This is required only for _curand_poisson_ITR_double.
|
| 421 |
+
QUALIFIERS double _curand_uniform_double_excluding_one(unsigned int x)
|
| 422 |
+
{
|
| 423 |
+
return x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
// Overload for unsigned long long.
|
| 427 |
+
// This is required only for _curand_poisson_ITR_double.
|
| 428 |
+
QUALIFIERS double _curand_uniform_double_excluding_one(unsigned long long x)
|
| 429 |
+
{
|
| 430 |
+
return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/4.0);
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
#define MAGIC_DOUBLE_CONST 500.0
|
| 434 |
+
template <typename T>
|
| 435 |
+
//George S. Fishman Discrete-event simulation: modeling, programming, and analysis
|
| 436 |
+
QUALIFIERS unsigned int _curand_poisson_ITR_double(T x, double lambda)
|
| 437 |
+
{
|
| 438 |
+
double L,p = 1.0;
|
| 439 |
+
double q = 1.0;
|
| 440 |
+
unsigned int k = 0;
|
| 441 |
+
int pow=0;
|
| 442 |
+
// This algorithm requires u to be in (0;1) range, however, _curand_uniform_double
|
| 443 |
+
// returns a number in range (0;1]. If u is 1.0 the inner loop never ends. The
|
| 444 |
+
// following operation transforms the range from (0;1] to (0;1).
|
| 445 |
+
double u = _curand_uniform_double_excluding_one(x);
|
| 446 |
+
do{
|
| 447 |
+
if (lambda > (double)(pow+MAGIC_DOUBLE_CONST)){
|
| 448 |
+
L = exp(-MAGIC_DOUBLE_CONST);
|
| 449 |
+
}else{
|
| 450 |
+
L = exp((double)(pow - lambda));
|
| 451 |
+
}
|
| 452 |
+
p *= L;
|
| 453 |
+
q *= L;
|
| 454 |
+
pow += (int) MAGIC_DOUBLE_CONST;
|
| 455 |
+
while (u > q){
|
| 456 |
+
k++;
|
| 457 |
+
p *= ((double)lambda / (double) k);
|
| 458 |
+
q += p;
|
| 459 |
+
}
|
| 460 |
+
}while((double)pow < lambda);
|
| 461 |
+
return k;
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
template <typename T>
|
| 465 |
+
/* Rejection Method for Poisson distribution based on gammainc approximation */
|
| 466 |
+
QUALIFIERS unsigned int curand_poisson_gammainc(T state, float lambda){
|
| 467 |
+
float y, x, t, z,v;
|
| 468 |
+
float logl = __cr_log (lambda);
|
| 469 |
+
while (true) {
|
| 470 |
+
y = curand_uniform (state);
|
| 471 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 472 |
+
x = floorf (x);
|
| 473 |
+
z = curand_uniform (state);
|
| 474 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 475 |
+
z = z*v;
|
| 476 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 477 |
+
if ((z < t) && (v>=1e-20))
|
| 478 |
+
break;
|
| 479 |
+
}
|
| 480 |
+
return (unsigned int)x;
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
template <typename T>
|
| 484 |
+
/* Rejection Method for Poisson distribution based on gammainc approximation */
|
| 485 |
+
QUALIFIERS uint4 curand_poisson_gammainc4(T state, float lambda){
|
| 486 |
+
uint4 result;
|
| 487 |
+
float y, x, t, z,v;
|
| 488 |
+
float logl = __cr_log (lambda);
|
| 489 |
+
while (true) {
|
| 490 |
+
y = curand_uniform(state);
|
| 491 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 492 |
+
x = floorf (x);
|
| 493 |
+
z = curand_uniform (state);
|
| 494 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 495 |
+
z = z*v;
|
| 496 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 497 |
+
if ((z < t) && (v>=1e-20))
|
| 498 |
+
break;
|
| 499 |
+
}
|
| 500 |
+
result.x = (unsigned int)x;
|
| 501 |
+
|
| 502 |
+
while (true) {
|
| 503 |
+
y = curand_uniform(state);
|
| 504 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 505 |
+
x = floorf (x);
|
| 506 |
+
z = curand_uniform (state);
|
| 507 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 508 |
+
z = z*v;
|
| 509 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 510 |
+
if ((z < t) && (v>=1e-20))
|
| 511 |
+
break;
|
| 512 |
+
}
|
| 513 |
+
result.y = (unsigned int)x;
|
| 514 |
+
|
| 515 |
+
while (true) {
|
| 516 |
+
y = curand_uniform(state);
|
| 517 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 518 |
+
x = floorf (x);
|
| 519 |
+
z = curand_uniform (state);
|
| 520 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 521 |
+
z = z*v;
|
| 522 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 523 |
+
if ((z < t) && (v>=1e-20))
|
| 524 |
+
break;
|
| 525 |
+
}
|
| 526 |
+
result.z = (unsigned int)x;
|
| 527 |
+
|
| 528 |
+
while (true) {
|
| 529 |
+
y = curand_uniform(state);
|
| 530 |
+
x = __cr_pgammaincinv (lambda, y);
|
| 531 |
+
x = floorf (x);
|
| 532 |
+
z = curand_uniform (state);
|
| 533 |
+
v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
|
| 534 |
+
z = z*v;
|
| 535 |
+
t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
|
| 536 |
+
if ((z < t) && (v>=1e-20))
|
| 537 |
+
break;
|
| 538 |
+
}
|
| 539 |
+
result.w = (unsigned int)x;
|
| 540 |
+
|
| 541 |
+
return result;
|
| 542 |
+
}
|
| 543 |
+
// Note below that the round to nearest integer, where needed,is done in line with code that
|
| 544 |
+
// assumes the range of values is < 2**32
|
| 545 |
+
|
| 546 |
+
template <typename T>
|
| 547 |
+
QUALIFIERS unsigned int _curand_poisson(T x, double lambda)
|
| 548 |
+
{
|
| 549 |
+
if (lambda < 1000)
|
| 550 |
+
return _curand_poisson_ITR_double(x, lambda);
|
| 551 |
+
return (unsigned int)((sqrt(lambda) * _curand_normal_icdf_double(x)) + lambda + 0.5); //Round to nearest
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
template <typename T>
|
| 555 |
+
QUALIFIERS unsigned int _curand_poisson_from_normal(T x, double lambda)
|
| 556 |
+
{
|
| 557 |
+
return (unsigned int)((sqrt(lambda) * _curand_normal_icdf(x)) + lambda + 0.5); //Round to nearest
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
template <typename STATE>
|
| 561 |
+
QUALIFIERS unsigned int curand_poisson_from_normal(STATE state, double lambda)
|
| 562 |
+
{
|
| 563 |
+
return (unsigned int)((sqrt(lambda) * curand_normal(state)) + lambda + 0.5); //Round to nearest
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
template <typename STATE>
|
| 567 |
+
QUALIFIERS uint4 curand_poisson_from_normal4(STATE state, double lambda)
|
| 568 |
+
{
|
| 569 |
+
uint4 result;
|
| 570 |
+
float4 _res;
|
| 571 |
+
|
| 572 |
+
_res = curand_normal4(state);
|
| 573 |
+
|
| 574 |
+
result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
|
| 575 |
+
result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
|
| 576 |
+
result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
|
| 577 |
+
result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
|
| 578 |
+
return result; //Round to nearest
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
/**
|
| 582 |
+
* \brief Return a Poisson-distributed unsigned int from a XORWOW generator.
|
| 583 |
+
*
|
| 584 |
+
* Return a single unsigned int from a Poisson
|
| 585 |
+
* distribution with lambda \p lambda from the XORWOW generator in \p state,
|
| 586 |
+
* increment the position of the generator by a variable amount, depending
|
| 587 |
+
* on the algorithm used.
|
| 588 |
+
*
|
| 589 |
+
* \param state - Pointer to state to update
|
| 590 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 591 |
+
*
|
| 592 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 593 |
+
*/
|
| 594 |
+
QUALIFIERS unsigned int curand_poisson(curandStateXORWOW_t *state, double lambda)
|
| 595 |
+
{
|
| 596 |
+
if (lambda < 64)
|
| 597 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 598 |
+
if (lambda > 4000)
|
| 599 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 600 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
/**
|
| 604 |
+
* \brief Return a Poisson-distributed unsigned int from a Philox4_32_10 generator.
|
| 605 |
+
*
|
| 606 |
+
* Return a single unsigned int from a Poisson
|
| 607 |
+
* distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
|
| 608 |
+
* increment the position of the generator by a variable amount, depending
|
| 609 |
+
* on the algorithm used.
|
| 610 |
+
*
|
| 611 |
+
* \param state - Pointer to state to update
|
| 612 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 613 |
+
*
|
| 614 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 615 |
+
*/
|
| 616 |
+
QUALIFIERS unsigned int curand_poisson(curandStatePhilox4_32_10_t *state, double lambda)
|
| 617 |
+
{
|
| 618 |
+
if (lambda < 64)
|
| 619 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 620 |
+
if (lambda > 4000)
|
| 621 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 622 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 623 |
+
}
|
| 624 |
+
/**
|
| 625 |
+
* \brief Return four Poisson-distributed unsigned ints from a Philox4_32_10 generator.
|
| 626 |
+
*
|
| 627 |
+
* Return a four unsigned ints from a Poisson
|
| 628 |
+
* distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
|
| 629 |
+
* increment the position of the generator by a variable amount, depending
|
| 630 |
+
* on the algorithm used.
|
| 631 |
+
*
|
| 632 |
+
* \param state - Pointer to state to update
|
| 633 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 634 |
+
*
|
| 635 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 636 |
+
*/
|
| 637 |
+
QUALIFIERS uint4 curand_poisson4(curandStatePhilox4_32_10_t *state, double lambda)
|
| 638 |
+
{
|
| 639 |
+
uint4 result;
|
| 640 |
+
double4 _res;
|
| 641 |
+
if (lambda < 64)
|
| 642 |
+
return curand_poisson_knuth4(state, (float)lambda);
|
| 643 |
+
if (lambda > 4000) {
|
| 644 |
+
_res = curand_normal4_double(state);
|
| 645 |
+
result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
|
| 646 |
+
result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
|
| 647 |
+
result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
|
| 648 |
+
result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
|
| 649 |
+
return result;
|
| 650 |
+
}
|
| 651 |
+
return curand_poisson_gammainc4(state, (float)lambda);
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
/**
|
| 657 |
+
* \brief Return a Poisson-distributed unsigned int from a MRG32k3A generator.
|
| 658 |
+
*
|
| 659 |
+
* Return a single unsigned int from a Poisson
|
| 660 |
+
* distribution with lambda \p lambda from the MRG32k3a generator in \p state,
|
| 661 |
+
* increment the position of the generator by a variable amount, depending
|
| 662 |
+
* on the algorithm used.
|
| 663 |
+
*
|
| 664 |
+
* \param state - Pointer to state to update
|
| 665 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 666 |
+
*
|
| 667 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 668 |
+
*/
|
| 669 |
+
QUALIFIERS unsigned int curand_poisson(curandStateMRG32k3a_t *state, double lambda)
|
| 670 |
+
{
|
| 671 |
+
if (lambda < 64)
|
| 672 |
+
return curand_poisson_knuth(state, (float)lambda);
|
| 673 |
+
if (lambda > 4000)
|
| 674 |
+
return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
|
| 675 |
+
return curand_poisson_gammainc(state, (float)lambda);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
/**
|
| 679 |
+
* \brief Return a Poisson-distributed unsigned int from a MTGP32 generator.
|
| 680 |
+
*
|
| 681 |
+
* Return a single int from a Poisson
|
| 682 |
+
* distribution with lambda \p lambda from the MTGP32 generator in \p state,
|
| 683 |
+
* increment the position of the generator by one.
|
| 684 |
+
*
|
| 685 |
+
* \param state - Pointer to state to update
|
| 686 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 687 |
+
*
|
| 688 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 689 |
+
*/
|
| 690 |
+
QUALIFIERS unsigned int curand_poisson(curandStateMtgp32_t *state, double lambda)
|
| 691 |
+
{
|
| 692 |
+
return _curand_poisson(curand(state), lambda);
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
/**
|
| 696 |
+
* \brief Return a Poisson-distributed unsigned int from a Sobol32 generator.
|
| 697 |
+
*
|
| 698 |
+
* Return a single unsigned int from a Poisson
|
| 699 |
+
* distribution with lambda \p lambda from the Sobol32 generator in \p state,
|
| 700 |
+
* increment the position of the generator by one.
|
| 701 |
+
*
|
| 702 |
+
* \param state - Pointer to state to update
|
| 703 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 704 |
+
*
|
| 705 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 706 |
+
*/
|
| 707 |
+
|
| 708 |
+
QUALIFIERS unsigned int curand_poisson(curandStateSobol32_t *state, double lambda)
|
| 709 |
+
{
|
| 710 |
+
return _curand_poisson(curand(state), lambda);
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
/**
|
| 714 |
+
* \brief Return a Poisson-distributed unsigned int from a scrambled Sobol32 generator.
|
| 715 |
+
*
|
| 716 |
+
* Return a single unsigned int from a Poisson
|
| 717 |
+
* distribution with lambda \p lambda from the scrambled Sobol32 generator in \p state,
|
| 718 |
+
* increment the position of the generator by one.
|
| 719 |
+
*
|
| 720 |
+
* \param state - Pointer to state to update
|
| 721 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 722 |
+
*
|
| 723 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 724 |
+
*/
|
| 725 |
+
QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol32_t *state, double lambda)
|
| 726 |
+
{
|
| 727 |
+
return _curand_poisson(curand(state), lambda);
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
/**
|
| 731 |
+
* \brief Return a Poisson-distributed unsigned int from a Sobol64 generator.
|
| 732 |
+
*
|
| 733 |
+
* Return a single unsigned int from a Poisson
|
| 734 |
+
* distribution with lambda \p lambda from the Sobol64 generator in \p state,
|
| 735 |
+
* increment position of generator by one.
|
| 736 |
+
*
|
| 737 |
+
* \param state - Pointer to state to update
|
| 738 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 739 |
+
*
|
| 740 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 741 |
+
*/
|
| 742 |
+
QUALIFIERS unsigned int curand_poisson(curandStateSobol64_t *state, double lambda)
|
| 743 |
+
{
|
| 744 |
+
return _curand_poisson(curand(state), lambda);
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
/**
|
| 748 |
+
* \brief Return a Poisson-distributed unsigned int from a scrambled Sobol64 generator.
|
| 749 |
+
*
|
| 750 |
+
* Return a single unsigned int from a Poisson
|
| 751 |
+
* distribution with lambda \p lambda from the scrambled Sobol64 generator in \p state,
|
| 752 |
+
* increment position of generator by one.
|
| 753 |
+
*
|
| 754 |
+
* \param state - Pointer to state to update
|
| 755 |
+
* \param lambda - Lambda of the Poisson distribution
|
| 756 |
+
*
|
| 757 |
+
* \return Poisson-distributed unsigned int with lambda \p lambda
|
| 758 |
+
*/
|
| 759 |
+
QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol64_t *state, double lambda)
|
| 760 |
+
{
|
| 761 |
+
return _curand_poisson(curand(state), lambda);
|
| 762 |
+
}
|
| 763 |
+
#endif // !defined(CURAND_POISSON_H_)
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_precalc.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/* Copyright 2010-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#if !defined(CURAND_UNIFORM_H_)
|
| 52 |
+
#define CURAND_UNIFORM_H_
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \defgroup DEVICE Device API
|
| 56 |
+
*
|
| 57 |
+
* @{
|
| 58 |
+
*/
|
| 59 |
+
|
| 60 |
+
#ifndef __CUDACC_RTC__
|
| 61 |
+
#include <math.h>
|
| 62 |
+
#endif // __CUDACC_RTC__
|
| 63 |
+
|
| 64 |
+
#include "curand_mrg32k3a.h"
|
| 65 |
+
#include "curand_mtgp32_kernel.h"
|
| 66 |
+
#include "curand_philox4x32_x.h"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
QUALIFIERS float _curand_uniform(unsigned int x)
|
| 70 |
+
{
|
| 71 |
+
return x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
QUALIFIERS float4 _curand_uniform4(uint4 x)
|
| 75 |
+
{
|
| 76 |
+
float4 y;
|
| 77 |
+
y.x = x.x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 78 |
+
y.y = x.y * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 79 |
+
y.z = x.z * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 80 |
+
y.w = x.w * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 81 |
+
return y;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
QUALIFIERS float _curand_uniform(unsigned long long x)
|
| 85 |
+
{
|
| 86 |
+
unsigned int t;
|
| 87 |
+
t = (unsigned int)(x >> 32);
|
| 88 |
+
return t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
QUALIFIERS double _curand_uniform_double(unsigned int x)
|
| 92 |
+
{
|
| 93 |
+
return x * CURAND_2POW32_INV_DOUBLE + CURAND_2POW32_INV_DOUBLE;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
QUALIFIERS double _curand_uniform_double(unsigned long long x)
|
| 97 |
+
{
|
| 98 |
+
return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
QUALIFIERS double _curand_uniform_double_hq(unsigned int x, unsigned int y)
|
| 102 |
+
{
|
| 103 |
+
unsigned long long z = (unsigned long long)x ^
|
| 104 |
+
((unsigned long long)y << (53 - 32));
|
| 105 |
+
return z * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
QUALIFIERS float curand_uniform(curandStateTest_t *state)
|
| 109 |
+
{
|
| 110 |
+
return _curand_uniform(curand(state));
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
QUALIFIERS double curand_uniform_double(curandStateTest_t *state)
|
| 114 |
+
{
|
| 115 |
+
return _curand_uniform_double(curand(state));
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
/**
|
| 119 |
+
* \brief Return a uniformly distributed float from an XORWOW generator.
|
| 120 |
+
*
|
| 121 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 122 |
+
* from the XORWOW generator in \p state, increment position of generator.
|
| 123 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 124 |
+
* point outputs are never returned.
|
| 125 |
+
*
|
| 126 |
+
* The implementation may use any number of calls to \p curand() to
|
| 127 |
+
* get enough random bits to create the return value. The current
|
| 128 |
+
* implementation uses one call.
|
| 129 |
+
*
|
| 130 |
+
* \param state - Pointer to state to update
|
| 131 |
+
*
|
| 132 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 133 |
+
*/
|
| 134 |
+
QUALIFIERS float curand_uniform(curandStateXORWOW_t *state)
|
| 135 |
+
{
|
| 136 |
+
return _curand_uniform(curand(state));
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
/**
|
| 140 |
+
* \brief Return a uniformly distributed double from an XORWOW generator.
|
| 141 |
+
*
|
| 142 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 143 |
+
* from the XORWOW generator in \p state, increment position of generator.
|
| 144 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 145 |
+
* point outputs are never returned.
|
| 146 |
+
*
|
| 147 |
+
* The implementation may use any number of calls to \p curand() to
|
| 148 |
+
* get enough random bits to create the return value. The current
|
| 149 |
+
* implementation uses exactly two calls.
|
| 150 |
+
*
|
| 151 |
+
* \param state - Pointer to state to update
|
| 152 |
+
*
|
| 153 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 154 |
+
*/
|
| 155 |
+
QUALIFIERS double curand_uniform_double(curandStateXORWOW_t *state)
|
| 156 |
+
{
|
| 157 |
+
unsigned int x, y;
|
| 158 |
+
x = curand(state);
|
| 159 |
+
y = curand(state);
|
| 160 |
+
return _curand_uniform_double_hq(x, y);
|
| 161 |
+
}
|
| 162 |
+
/**
|
| 163 |
+
* \brief Return a uniformly distributed float from an MRG32k3a generator.
|
| 164 |
+
*
|
| 165 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 166 |
+
* from the MRG32k3a generator in \p state, increment position of generator.
|
| 167 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 168 |
+
* point outputs are never returned.
|
| 169 |
+
*
|
| 170 |
+
* The implementation returns up to 23 bits of mantissa, with the minimum
|
| 171 |
+
* return value \f$ 2^{-32} \f$
|
| 172 |
+
*
|
| 173 |
+
* \param state - Pointer to state to update
|
| 174 |
+
*
|
| 175 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 176 |
+
*/
|
| 177 |
+
QUALIFIERS float curand_uniform(curandStateMRG32k3a_t *state)
|
| 178 |
+
{
|
| 179 |
+
return ((float)(curand_MRG32k3a(state)*MRG32K3A_NORM));
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
/**
|
| 183 |
+
* \brief Return a uniformly distributed double from an MRG32k3a generator.
|
| 184 |
+
*
|
| 185 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 186 |
+
* from the MRG32k3a generator in \p state, increment position of generator.
|
| 187 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 188 |
+
* point outputs are never returned.
|
| 189 |
+
*
|
| 190 |
+
* Note the implementation returns at most 32 random bits of mantissa as
|
| 191 |
+
* outlined in the seminal paper by L'Ecuyer.
|
| 192 |
+
*
|
| 193 |
+
* \param state - Pointer to state to update
|
| 194 |
+
*
|
| 195 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 196 |
+
*/
|
| 197 |
+
QUALIFIERS double curand_uniform_double(curandStateMRG32k3a_t *state)
|
| 198 |
+
{
|
| 199 |
+
return curand_MRG32k3a(state)*MRG32K3A_NORM;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
/**
|
| 205 |
+
* \brief Return a uniformly distributed tuple of 2 doubles from an Philox4_32_10 generator.
|
| 206 |
+
*
|
| 207 |
+
* Return a uniformly distributed 2 doubles (double4) between \p 0.0 and \p 1.0
|
| 208 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator by 4.
|
| 209 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 210 |
+
* point outputs are never returned.
|
| 211 |
+
*
|
| 212 |
+
* \param state - Pointer to state to update
|
| 213 |
+
*
|
| 214 |
+
* \return 2 uniformly distributed doubles between \p 0.0 and \p 1.0
|
| 215 |
+
*/
|
| 216 |
+
|
| 217 |
+
QUALIFIERS double2 curand_uniform2_double(curandStatePhilox4_32_10_t *state)
|
| 218 |
+
{
|
| 219 |
+
uint4 _x;
|
| 220 |
+
double2 result;
|
| 221 |
+
_x = curand4(state);
|
| 222 |
+
result.x = _curand_uniform_double_hq(_x.x,_x.y);
|
| 223 |
+
result.y = _curand_uniform_double_hq(_x.z,_x.w);
|
| 224 |
+
return result;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
// not a part of API
|
| 229 |
+
QUALIFIERS double4 curand_uniform4_double(curandStatePhilox4_32_10_t *state)
|
| 230 |
+
{
|
| 231 |
+
uint4 _x, _y;
|
| 232 |
+
double4 result;
|
| 233 |
+
_x = curand4(state);
|
| 234 |
+
_y = curand4(state);
|
| 235 |
+
result.x = _curand_uniform_double_hq(_x.x,_x.y);
|
| 236 |
+
result.y = _curand_uniform_double_hq(_x.z,_x.w);
|
| 237 |
+
result.z = _curand_uniform_double_hq(_y.x,_y.y);
|
| 238 |
+
result.w = _curand_uniform_double_hq(_y.z,_y.w);
|
| 239 |
+
return result;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
/**
|
| 243 |
+
* \brief Return a uniformly distributed float from a Philox4_32_10 generator.
|
| 244 |
+
*
|
| 245 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 246 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator.
|
| 247 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 248 |
+
* point outputs are never returned.
|
| 249 |
+
*
|
| 250 |
+
* \param state - Pointer to state to update
|
| 251 |
+
*
|
| 252 |
+
* \return uniformly distributed float between \p 0.0 and \p 1.0
|
| 253 |
+
*
|
| 254 |
+
*/
|
| 255 |
+
QUALIFIERS float curand_uniform(curandStatePhilox4_32_10_t *state)
|
| 256 |
+
{
|
| 257 |
+
return _curand_uniform(curand(state));
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
/**
|
| 261 |
+
* \brief Return a uniformly distributed tuple of 4 floats from a Philox4_32_10 generator.
|
| 262 |
+
*
|
| 263 |
+
* Return a uniformly distributed 4 floats between \p 0.0f and \p 1.0f
|
| 264 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator by 4.
|
| 265 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 266 |
+
* point outputs are never returned.
|
| 267 |
+
*
|
| 268 |
+
* \param state - Pointer to state to update
|
| 269 |
+
*
|
| 270 |
+
* \return uniformly distributed float between \p 0.0 and \p 1.0
|
| 271 |
+
*
|
| 272 |
+
*/
|
| 273 |
+
QUALIFIERS float4 curand_uniform4(curandStatePhilox4_32_10_t *state)
|
| 274 |
+
{
|
| 275 |
+
return _curand_uniform4(curand4(state));
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
/**
|
| 279 |
+
* \brief Return a uniformly distributed float from a MTGP32 generator.
|
| 280 |
+
*
|
| 281 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 282 |
+
* from the MTGP32 generator in \p state, increment position of generator.
|
| 283 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 284 |
+
* point outputs are never returned.
|
| 285 |
+
*
|
| 286 |
+
* \param state - Pointer to state to update
|
| 287 |
+
*
|
| 288 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 289 |
+
*/
|
| 290 |
+
QUALIFIERS float curand_uniform(curandStateMtgp32_t *state)
|
| 291 |
+
{
|
| 292 |
+
return _curand_uniform(curand(state));
|
| 293 |
+
}
|
| 294 |
+
/**
|
| 295 |
+
* \brief Return a uniformly distributed double from a MTGP32 generator.
|
| 296 |
+
*
|
| 297 |
+
* Return a uniformly distributed double between \p 0.0f and \p 1.0f
|
| 298 |
+
* from the MTGP32 generator in \p state, increment position of generator.
|
| 299 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 300 |
+
* point outputs are never returned.
|
| 301 |
+
*
|
| 302 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 303 |
+
* precision value.
|
| 304 |
+
*
|
| 305 |
+
* \param state - Pointer to state to update
|
| 306 |
+
*
|
| 307 |
+
* \return uniformly distributed double between \p 0.0f and \p 1.0f
|
| 308 |
+
*/
|
| 309 |
+
QUALIFIERS double curand_uniform_double(curandStateMtgp32_t *state)
|
| 310 |
+
{
|
| 311 |
+
return _curand_uniform_double(curand(state));
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
/**
|
| 315 |
+
* \brief Return a uniformly distributed double from a Philox4_32_10 generator.
|
| 316 |
+
*
|
| 317 |
+
* Return a uniformly distributed double between \p 0.0f and \p 1.0f
|
| 318 |
+
* from the Philox4_32_10 generator in \p state, increment position of generator.
|
| 319 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 320 |
+
* point outputs are never returned.
|
| 321 |
+
*
|
| 322 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 323 |
+
* precision value.
|
| 324 |
+
*
|
| 325 |
+
* \p curand_uniform2_double() is recommended for higher quality uniformly distributed
|
| 326 |
+
* double precision values.
|
| 327 |
+
*
|
| 328 |
+
* \param state - Pointer to state to update
|
| 329 |
+
*
|
| 330 |
+
* \return uniformly distributed double between \p 0.0f and \p 1.0f
|
| 331 |
+
*/
|
| 332 |
+
|
| 333 |
+
QUALIFIERS double curand_uniform_double(curandStatePhilox4_32_10_t *state)
|
| 334 |
+
{
|
| 335 |
+
return _curand_uniform_double(curand(state));
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
/**
|
| 340 |
+
* \brief Return a uniformly distributed float from a Sobol32 generator.
|
| 341 |
+
*
|
| 342 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 343 |
+
* from the Sobol32 generator in \p state, increment position of generator.
|
| 344 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 345 |
+
* point outputs are never returned.
|
| 346 |
+
*
|
| 347 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 348 |
+
*
|
| 349 |
+
* \param state - Pointer to state to update
|
| 350 |
+
*
|
| 351 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 352 |
+
*/
|
| 353 |
+
QUALIFIERS float curand_uniform(curandStateSobol32_t *state)
|
| 354 |
+
{
|
| 355 |
+
return _curand_uniform(curand(state));
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
/**
|
| 359 |
+
* \brief Return a uniformly distributed double from a Sobol32 generator.
|
| 360 |
+
*
|
| 361 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 362 |
+
* from the Sobol32 generator in \p state, increment position of generator.
|
| 363 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 364 |
+
* point outputs are never returned.
|
| 365 |
+
*
|
| 366 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 367 |
+
* to preserve the quasirandom properties of the sequence.
|
| 368 |
+
*
|
| 369 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 370 |
+
* precision value.
|
| 371 |
+
*
|
| 372 |
+
* \param state - Pointer to state to update
|
| 373 |
+
*
|
| 374 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 375 |
+
*/
|
| 376 |
+
QUALIFIERS double curand_uniform_double(curandStateSobol32_t *state)
|
| 377 |
+
{
|
| 378 |
+
return _curand_uniform_double(curand(state));
|
| 379 |
+
}
|
| 380 |
+
/**
|
| 381 |
+
* \brief Return a uniformly distributed float from a scrambled Sobol32 generator.
|
| 382 |
+
*
|
| 383 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 384 |
+
* from the scrambled Sobol32 generator in \p state, increment position of generator.
|
| 385 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 386 |
+
* point outputs are never returned.
|
| 387 |
+
*
|
| 388 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 389 |
+
*
|
| 390 |
+
* \param state - Pointer to state to update
|
| 391 |
+
*
|
| 392 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 393 |
+
*/
|
| 394 |
+
QUALIFIERS float curand_uniform(curandStateScrambledSobol32_t *state)
|
| 395 |
+
{
|
| 396 |
+
return _curand_uniform(curand(state));
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
/**
|
| 400 |
+
* \brief Return a uniformly distributed double from a scrambled Sobol32 generator.
|
| 401 |
+
*
|
| 402 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 403 |
+
* from the scrambled Sobol32 generator in \p state, increment position of generator.
|
| 404 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 405 |
+
* point outputs are never returned.
|
| 406 |
+
*
|
| 407 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 408 |
+
* to preserve the quasirandom properties of the sequence.
|
| 409 |
+
*
|
| 410 |
+
* Note that the implementation uses only 32 random bits to generate a single double
|
| 411 |
+
* precision value.
|
| 412 |
+
*
|
| 413 |
+
* \param state - Pointer to state to update
|
| 414 |
+
*
|
| 415 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 416 |
+
*/
|
| 417 |
+
QUALIFIERS double curand_uniform_double(curandStateScrambledSobol32_t *state)
|
| 418 |
+
{
|
| 419 |
+
return _curand_uniform_double(curand(state));
|
| 420 |
+
}
|
| 421 |
+
/**
|
| 422 |
+
* \brief Return a uniformly distributed float from a Sobol64 generator.
|
| 423 |
+
*
|
| 424 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 425 |
+
* from the Sobol64 generator in \p state, increment position of generator.
|
| 426 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 427 |
+
* point outputs are never returned.
|
| 428 |
+
*
|
| 429 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 430 |
+
*
|
| 431 |
+
* \param state - Pointer to state to update
|
| 432 |
+
*
|
| 433 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 434 |
+
*/
|
| 435 |
+
QUALIFIERS float curand_uniform(curandStateSobol64_t *state)
|
| 436 |
+
{
|
| 437 |
+
return _curand_uniform(curand(state));
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
/**
|
| 441 |
+
* \brief Return a uniformly distributed double from a Sobol64 generator.
|
| 442 |
+
*
|
| 443 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 444 |
+
* from the Sobol64 generator in \p state, increment position of generator.
|
| 445 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 446 |
+
* point outputs are never returned.
|
| 447 |
+
*
|
| 448 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 449 |
+
* to preserve the quasirandom properties of the sequence.
|
| 450 |
+
*
|
| 451 |
+
* \param state - Pointer to state to update
|
| 452 |
+
*
|
| 453 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 454 |
+
*/
|
| 455 |
+
QUALIFIERS double curand_uniform_double(curandStateSobol64_t *state)
|
| 456 |
+
{
|
| 457 |
+
return _curand_uniform_double(curand(state));
|
| 458 |
+
}
|
| 459 |
+
/**
|
| 460 |
+
* \brief Return a uniformly distributed float from a scrambled Sobol64 generator.
|
| 461 |
+
*
|
| 462 |
+
* Return a uniformly distributed float between \p 0.0f and \p 1.0f
|
| 463 |
+
* from the scrambled Sobol64 generator in \p state, increment position of generator.
|
| 464 |
+
* Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
|
| 465 |
+
* point outputs are never returned.
|
| 466 |
+
*
|
| 467 |
+
* The implementation is guaranteed to use a single call to \p curand().
|
| 468 |
+
*
|
| 469 |
+
* \param state - Pointer to state to update
|
| 470 |
+
*
|
| 471 |
+
* \return uniformly distributed float between \p 0.0f and \p 1.0f
|
| 472 |
+
*/
|
| 473 |
+
QUALIFIERS float curand_uniform(curandStateScrambledSobol64_t *state)
|
| 474 |
+
{
|
| 475 |
+
return _curand_uniform(curand(state));
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
/**
|
| 479 |
+
* \brief Return a uniformly distributed double from a scrambled Sobol64 generator.
|
| 480 |
+
*
|
| 481 |
+
* Return a uniformly distributed double between \p 0.0 and \p 1.0
|
| 482 |
+
* from the scrambled Sobol64 generator in \p state, increment position of generator.
|
| 483 |
+
* Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
|
| 484 |
+
* point outputs are never returned.
|
| 485 |
+
*
|
| 486 |
+
* The implementation is guaranteed to use a single call to \p curand()
|
| 487 |
+
* to preserve the quasirandom properties of the sequence.
|
| 488 |
+
*
|
| 489 |
+
* \param state - Pointer to state to update
|
| 490 |
+
*
|
| 491 |
+
* \return uniformly distributed double between \p 0.0 and \p 1.0
|
| 492 |
+
*/
|
| 493 |
+
QUALIFIERS double curand_uniform_double(curandStateScrambledSobol64_t *state)
|
| 494 |
+
{
|
| 495 |
+
return _curand_uniform_double(curand(state));
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
#endif // !defined(CURAND_UNIFORM_H_)
|
infer_4_37_2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dab8074b610b82a863a42eceda788e9b08364b545bab948509306b48c46018cf
|
| 3 |
+
size 96525744
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e428449e985790af8faa2227d6c2c989c54cc093110f476200d68f36b4a524df
|
| 3 |
+
size 2194056
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7d4536852551fda02d7d1b2d2f4aaf559877db06a6a40e8d5fa73bc0e1d048c
|
| 3 |
+
size 133184
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/arrays.pyi
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Sequence
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from pandas._typing import (
|
| 6 |
+
AxisInt,
|
| 7 |
+
DtypeObj,
|
| 8 |
+
Self,
|
| 9 |
+
Shape,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
class NDArrayBacked:
|
| 13 |
+
_dtype: DtypeObj
|
| 14 |
+
_ndarray: np.ndarray
|
| 15 |
+
def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ...
|
| 16 |
+
@classmethod
|
| 17 |
+
def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ...
|
| 18 |
+
def _from_backing_data(self, values: np.ndarray): ...
|
| 19 |
+
def __setstate__(self, state): ...
|
| 20 |
+
def __len__(self) -> int: ...
|
| 21 |
+
@property
|
| 22 |
+
def shape(self) -> Shape: ...
|
| 23 |
+
@property
|
| 24 |
+
def ndim(self) -> int: ...
|
| 25 |
+
@property
|
| 26 |
+
def size(self) -> int: ...
|
| 27 |
+
@property
|
| 28 |
+
def nbytes(self) -> int: ...
|
| 29 |
+
def copy(self, order=...): ...
|
| 30 |
+
def delete(self, loc, axis=...): ...
|
| 31 |
+
def swapaxes(self, axis1, axis2): ...
|
| 32 |
+
def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ...
|
| 33 |
+
def reshape(self, *args, **kwargs): ...
|
| 34 |
+
def ravel(self, order=...): ...
|
| 35 |
+
@property
|
| 36 |
+
def T(self): ...
|
| 37 |
+
@classmethod
|
| 38 |
+
def _concat_same_type(
|
| 39 |
+
cls, to_concat: Sequence[Self], axis: AxisInt = ...
|
| 40 |
+
) -> Self: ...
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (61.7 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/hashing.pyi
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from pandas._typing import npt
|
| 4 |
+
|
| 5 |
+
def hash_object_array(
|
| 6 |
+
arr: npt.NDArray[np.object_],
|
| 7 |
+
key: str,
|
| 8 |
+
encoding: str = ...,
|
| 9 |
+
) -> npt.NDArray[np.uint64]: ...
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/lib.pyi
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TODO(npdtypes): Many types specified here can be made more specific/accurate;
|
| 2 |
+
# the more specific versions are specified in comments
|
| 3 |
+
from decimal import Decimal
|
| 4 |
+
from typing import (
|
| 5 |
+
Any,
|
| 6 |
+
Callable,
|
| 7 |
+
Final,
|
| 8 |
+
Generator,
|
| 9 |
+
Hashable,
|
| 10 |
+
Literal,
|
| 11 |
+
TypeAlias,
|
| 12 |
+
overload,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from pandas._libs.interval import Interval
|
| 18 |
+
from pandas._libs.tslibs import Period
|
| 19 |
+
from pandas._typing import (
|
| 20 |
+
ArrayLike,
|
| 21 |
+
DtypeObj,
|
| 22 |
+
TypeGuard,
|
| 23 |
+
npt,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# placeholder until we can specify np.ndarray[object, ndim=2]
|
| 27 |
+
ndarray_obj_2d = np.ndarray
|
| 28 |
+
|
| 29 |
+
from enum import Enum
|
| 30 |
+
|
| 31 |
+
class _NoDefault(Enum):
|
| 32 |
+
no_default = ...
|
| 33 |
+
|
| 34 |
+
no_default: Final = _NoDefault.no_default
|
| 35 |
+
NoDefault: TypeAlias = Literal[_NoDefault.no_default]
|
| 36 |
+
|
| 37 |
+
i8max: int
|
| 38 |
+
u8max: int
|
| 39 |
+
|
| 40 |
+
def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ...
|
| 41 |
+
def item_from_zerodim(val: object) -> object: ...
|
| 42 |
+
def infer_dtype(value: object, skipna: bool = ...) -> str: ...
|
| 43 |
+
def is_iterator(obj: object) -> bool: ...
|
| 44 |
+
def is_scalar(val: object) -> bool: ...
|
| 45 |
+
def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
|
| 46 |
+
def is_pyarrow_array(obj: object) -> bool: ...
|
| 47 |
+
def is_period(val: object) -> TypeGuard[Period]: ...
|
| 48 |
+
def is_interval(obj: object) -> TypeGuard[Interval]: ...
|
| 49 |
+
def is_decimal(obj: object) -> TypeGuard[Decimal]: ...
|
| 50 |
+
def is_complex(obj: object) -> TypeGuard[complex]: ...
|
| 51 |
+
def is_bool(obj: object) -> TypeGuard[bool | np.bool_]: ...
|
| 52 |
+
def is_integer(obj: object) -> TypeGuard[int | np.integer]: ...
|
| 53 |
+
def is_int_or_none(obj) -> bool: ...
|
| 54 |
+
def is_float(obj: object) -> TypeGuard[float]: ...
|
| 55 |
+
def is_interval_array(values: np.ndarray) -> bool: ...
|
| 56 |
+
def is_datetime64_array(values: np.ndarray, skipna: bool = True) -> bool: ...
|
| 57 |
+
def is_timedelta_or_timedelta64_array(
|
| 58 |
+
values: np.ndarray, skipna: bool = True
|
| 59 |
+
) -> bool: ...
|
| 60 |
+
def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ...
|
| 61 |
+
def is_time_array(values: np.ndarray, skipna: bool = ...): ...
|
| 62 |
+
def is_date_array(values: np.ndarray, skipna: bool = ...): ...
|
| 63 |
+
def is_datetime_array(values: np.ndarray, skipna: bool = ...): ...
|
| 64 |
+
def is_string_array(values: np.ndarray, skipna: bool = ...): ...
|
| 65 |
+
def is_float_array(values: np.ndarray): ...
|
| 66 |
+
def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
|
| 67 |
+
def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
|
| 68 |
+
def fast_multiget(
|
| 69 |
+
mapping: dict,
|
| 70 |
+
keys: np.ndarray, # object[:]
|
| 71 |
+
default=...,
|
| 72 |
+
) -> np.ndarray: ...
|
| 73 |
+
def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
|
| 74 |
+
def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
|
| 75 |
+
def map_infer(
|
| 76 |
+
arr: np.ndarray,
|
| 77 |
+
f: Callable[[Any], Any],
|
| 78 |
+
convert: bool = ...,
|
| 79 |
+
ignore_na: bool = ...,
|
| 80 |
+
) -> np.ndarray: ...
|
| 81 |
+
@overload
|
| 82 |
+
def maybe_convert_objects(
|
| 83 |
+
objects: npt.NDArray[np.object_],
|
| 84 |
+
*,
|
| 85 |
+
try_float: bool = ...,
|
| 86 |
+
safe: bool = ...,
|
| 87 |
+
convert_numeric: bool = ...,
|
| 88 |
+
convert_non_numeric: Literal[False] = ...,
|
| 89 |
+
convert_to_nullable_dtype: Literal[False] = ...,
|
| 90 |
+
dtype_if_all_nat: DtypeObj | None = ...,
|
| 91 |
+
) -> npt.NDArray[np.object_ | np.number]: ...
|
| 92 |
+
@overload
|
| 93 |
+
def maybe_convert_objects(
|
| 94 |
+
objects: npt.NDArray[np.object_],
|
| 95 |
+
*,
|
| 96 |
+
try_float: bool = ...,
|
| 97 |
+
safe: bool = ...,
|
| 98 |
+
convert_numeric: bool = ...,
|
| 99 |
+
convert_non_numeric: bool = ...,
|
| 100 |
+
convert_to_nullable_dtype: Literal[True] = ...,
|
| 101 |
+
dtype_if_all_nat: DtypeObj | None = ...,
|
| 102 |
+
) -> ArrayLike: ...
|
| 103 |
+
@overload
|
| 104 |
+
def maybe_convert_objects(
|
| 105 |
+
objects: npt.NDArray[np.object_],
|
| 106 |
+
*,
|
| 107 |
+
try_float: bool = ...,
|
| 108 |
+
safe: bool = ...,
|
| 109 |
+
convert_numeric: bool = ...,
|
| 110 |
+
convert_non_numeric: bool = ...,
|
| 111 |
+
convert_to_nullable_dtype: bool = ...,
|
| 112 |
+
dtype_if_all_nat: DtypeObj | None = ...,
|
| 113 |
+
) -> ArrayLike: ...
|
| 114 |
+
@overload
|
| 115 |
+
def maybe_convert_numeric(
|
| 116 |
+
values: npt.NDArray[np.object_],
|
| 117 |
+
na_values: set,
|
| 118 |
+
convert_empty: bool = ...,
|
| 119 |
+
coerce_numeric: bool = ...,
|
| 120 |
+
convert_to_masked_nullable: Literal[False] = ...,
|
| 121 |
+
) -> tuple[np.ndarray, None]: ...
|
| 122 |
+
@overload
|
| 123 |
+
def maybe_convert_numeric(
|
| 124 |
+
values: npt.NDArray[np.object_],
|
| 125 |
+
na_values: set,
|
| 126 |
+
convert_empty: bool = ...,
|
| 127 |
+
coerce_numeric: bool = ...,
|
| 128 |
+
*,
|
| 129 |
+
convert_to_masked_nullable: Literal[True],
|
| 130 |
+
) -> tuple[np.ndarray, np.ndarray]: ...
|
| 131 |
+
|
| 132 |
+
# TODO: restrict `arr`?
|
| 133 |
+
def ensure_string_array(
|
| 134 |
+
arr,
|
| 135 |
+
na_value: object = ...,
|
| 136 |
+
convert_na_value: bool = ...,
|
| 137 |
+
copy: bool = ...,
|
| 138 |
+
skipna: bool = ...,
|
| 139 |
+
) -> npt.NDArray[np.object_]: ...
|
| 140 |
+
def convert_nans_to_NA(
|
| 141 |
+
arr: npt.NDArray[np.object_],
|
| 142 |
+
) -> npt.NDArray[np.object_]: ...
|
| 143 |
+
def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ...
|
| 144 |
+
|
| 145 |
+
# TODO: can we be more specific about rows?
|
| 146 |
+
def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ...
|
| 147 |
+
def tuples_to_object_array(
|
| 148 |
+
tuples: npt.NDArray[np.object_],
|
| 149 |
+
) -> ndarray_obj_2d: ...
|
| 150 |
+
|
| 151 |
+
# TODO: can we be more specific about rows?
|
| 152 |
+
def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ...
|
| 153 |
+
def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ...
|
| 154 |
+
def maybe_booleans_to_slice(
|
| 155 |
+
mask: npt.NDArray[np.uint8],
|
| 156 |
+
) -> slice | npt.NDArray[np.uint8]: ...
|
| 157 |
+
def maybe_indices_to_slice(
|
| 158 |
+
indices: npt.NDArray[np.intp],
|
| 159 |
+
max_len: int,
|
| 160 |
+
) -> slice | npt.NDArray[np.intp]: ...
|
| 161 |
+
def is_all_arraylike(obj: list) -> bool: ...
|
| 162 |
+
|
| 163 |
+
# -----------------------------------------------------------------
|
| 164 |
+
# Functions which in reality take memoryviews
|
| 165 |
+
|
| 166 |
+
def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64
|
| 167 |
+
def map_infer_mask(
|
| 168 |
+
arr: np.ndarray,
|
| 169 |
+
f: Callable[[Any], Any],
|
| 170 |
+
mask: np.ndarray, # const uint8_t[:]
|
| 171 |
+
convert: bool = ...,
|
| 172 |
+
na_value: Any = ...,
|
| 173 |
+
dtype: np.dtype = ...,
|
| 174 |
+
) -> np.ndarray: ...
|
| 175 |
+
def indices_fast(
|
| 176 |
+
index: npt.NDArray[np.intp],
|
| 177 |
+
labels: np.ndarray, # const int64_t[:]
|
| 178 |
+
keys: list,
|
| 179 |
+
sorted_labels: list[npt.NDArray[np.int64]],
|
| 180 |
+
) -> dict[Hashable, npt.NDArray[np.intp]]: ...
|
| 181 |
+
def generate_slices(
|
| 182 |
+
labels: np.ndarray, ngroups: int # const intp_t[:]
|
| 183 |
+
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
|
| 184 |
+
def count_level_2d(
|
| 185 |
+
mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
|
| 186 |
+
labels: np.ndarray, # const intp_t[:]
|
| 187 |
+
max_bin: int,
|
| 188 |
+
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2]
|
| 189 |
+
def get_level_sorter(
|
| 190 |
+
codes: np.ndarray, # const int64_t[:]
|
| 191 |
+
starts: np.ndarray, # const intp_t[:]
|
| 192 |
+
) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1]
|
| 193 |
+
def generate_bins_dt64(
|
| 194 |
+
values: npt.NDArray[np.int64],
|
| 195 |
+
binner: np.ndarray, # const int64_t[:]
|
| 196 |
+
closed: object = ...,
|
| 197 |
+
hasnans: bool = ...,
|
| 198 |
+
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
|
| 199 |
+
def array_equivalent_object(
|
| 200 |
+
left: npt.NDArray[np.object_],
|
| 201 |
+
right: npt.NDArray[np.object_],
|
| 202 |
+
) -> bool: ...
|
| 203 |
+
def has_infs(arr: np.ndarray) -> bool: ... # const floating[:]
|
| 204 |
+
def has_only_ints_or_nan(arr: np.ndarray) -> bool: ... # const floating[:]
|
| 205 |
+
def get_reverse_indexer(
|
| 206 |
+
indexer: np.ndarray, # const intp_t[:]
|
| 207 |
+
length: int,
|
| 208 |
+
) -> npt.NDArray[np.intp]: ...
|
| 209 |
+
def is_bool_list(obj: list) -> bool: ...
|
| 210 |
+
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
|
| 211 |
+
def is_range_indexer(
|
| 212 |
+
left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
|
| 213 |
+
) -> bool: ...
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c9e1a4899b63e3a9a6431882897273d1797a9c2d2baff27406c8b8211eceaf34
|
| 3 |
+
size 594760
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/properties.pyi
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
Sequence,
|
| 3 |
+
overload,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
from pandas._typing import (
|
| 7 |
+
AnyArrayLike,
|
| 8 |
+
DataFrame,
|
| 9 |
+
Index,
|
| 10 |
+
Series,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
# note: this is a lie to make type checkers happy (they special
|
| 14 |
+
# case property). cache_readonly uses attribute names similar to
|
| 15 |
+
# property (fget) but it does not provide fset and fdel.
|
| 16 |
+
cache_readonly = property
|
| 17 |
+
|
| 18 |
+
class AxisProperty:
|
| 19 |
+
axis: int
|
| 20 |
+
def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
|
| 21 |
+
@overload
|
| 22 |
+
def __get__(self, obj: DataFrame | Series, type) -> Index: ...
|
| 23 |
+
@overload
|
| 24 |
+
def __get__(self, obj: None, type) -> AxisProperty: ...
|
| 25 |
+
def __set__(
|
| 26 |
+
self, obj: DataFrame | Series, value: AnyArrayLike | Sequence
|
| 27 |
+
) -> None: ...
|
infer_4_37_2/lib/python3.10/site-packages/pandas/_libs/tslib.pyi
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import tzinfo
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from pandas._typing import npt
|
| 6 |
+
|
| 7 |
+
def format_array_from_datetime(
|
| 8 |
+
values: npt.NDArray[np.int64],
|
| 9 |
+
tz: tzinfo | None = ...,
|
| 10 |
+
format: str | None = ...,
|
| 11 |
+
na_rep: str | float = ...,
|
| 12 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
| 13 |
+
) -> npt.NDArray[np.object_]: ...
|
| 14 |
+
def array_with_unit_to_datetime(
|
| 15 |
+
values: npt.NDArray[np.object_],
|
| 16 |
+
unit: str,
|
| 17 |
+
errors: str = ...,
|
| 18 |
+
) -> tuple[np.ndarray, tzinfo | None]: ...
|
| 19 |
+
def first_non_null(values: np.ndarray) -> int: ...
|
| 20 |
+
def array_to_datetime(
|
| 21 |
+
values: npt.NDArray[np.object_],
|
| 22 |
+
errors: str = ...,
|
| 23 |
+
dayfirst: bool = ...,
|
| 24 |
+
yearfirst: bool = ...,
|
| 25 |
+
utc: bool = ...,
|
| 26 |
+
creso: int = ...,
|
| 27 |
+
) -> tuple[np.ndarray, tzinfo | None]: ...
|
| 28 |
+
|
| 29 |
+
# returned ndarray may be object dtype or datetime64[ns]
|
| 30 |
+
|
| 31 |
+
def array_to_datetime_with_tz(
|
| 32 |
+
values: npt.NDArray[np.object_],
|
| 33 |
+
tz: tzinfo,
|
| 34 |
+
dayfirst: bool,
|
| 35 |
+
yearfirst: bool,
|
| 36 |
+
creso: int,
|
| 37 |
+
) -> npt.NDArray[np.int64]: ...
|
janus/lib/python3.10/site-packages/sympy/codegen/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" The ``sympy.codegen`` module contains classes and functions for building
|
| 2 |
+
abstract syntax trees of algorithms. These trees may then be printed by the
|
| 3 |
+
code-printers in ``sympy.printing``.
|
| 4 |
+
|
| 5 |
+
There are several submodules available:
|
| 6 |
+
- ``sympy.codegen.ast``: AST nodes useful across multiple languages.
|
| 7 |
+
- ``sympy.codegen.cnodes``: AST nodes useful for the C family of languages.
|
| 8 |
+
- ``sympy.codegen.fnodes``: AST nodes useful for Fortran.
|
| 9 |
+
- ``sympy.codegen.cfunctions``: functions specific to C (C99 math functions)
|
| 10 |
+
- ``sympy.codegen.ffunctions``: functions specific to Fortran (e.g. ``kind``).
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
from .ast import (
|
| 16 |
+
Assignment, aug_assign, CodeBlock, For, Attribute, Variable, Declaration,
|
| 17 |
+
While, Scope, Print, FunctionPrototype, FunctionDefinition, FunctionCall
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'Assignment', 'aug_assign', 'CodeBlock', 'For', 'Attribute', 'Variable',
|
| 22 |
+
'Declaration', 'While', 'Scope', 'Print', 'FunctionPrototype',
|
| 23 |
+
'FunctionDefinition', 'FunctionCall',
|
| 24 |
+
]
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc
ADDED
|
Binary file (936 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/approximations.cpython-310.pyc
ADDED
|
Binary file (6.32 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cfunctions.cpython-310.pyc
ADDED
|
Binary file (14.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cnodes.cpython-310.pyc
ADDED
|
Binary file (4.88 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cutils.cpython-310.pyc
ADDED
|
Binary file (703 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/cxxnodes.cpython-310.pyc
ADDED
|
Binary file (687 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/fnodes.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/futils.cpython-310.pyc
ADDED
|
Binary file (2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/__pycache__/pynodes.cpython-310.pyc
ADDED
|
Binary file (655 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/abstract_nodes.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module provides containers for python objects that are valid
|
| 2 |
+
printing targets but are not a subclass of SymPy's Printable.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from sympy.core.containers import Tuple
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class List(Tuple):
|
| 10 |
+
"""Represents a (frozen) (Python) list (for code printing purposes)."""
|
| 11 |
+
def __eq__(self, other):
|
| 12 |
+
if isinstance(other, list):
|
| 13 |
+
return self == List(*other)
|
| 14 |
+
else:
|
| 15 |
+
return self.args == other
|
| 16 |
+
|
| 17 |
+
def __hash__(self):
|
| 18 |
+
return super().__hash__()
|
janus/lib/python3.10/site-packages/sympy/codegen/approximations.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from sympy.sets.sets import Interval
|
| 3 |
+
from sympy.calculus.singularities import is_increasing, is_decreasing
|
| 4 |
+
from sympy.codegen.rewriting import Optimization
|
| 5 |
+
from sympy.core.function import UndefinedFunction
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
This module collects classes useful for approimate rewriting of expressions.
|
| 9 |
+
This can be beneficial when generating numeric code for which performance is
|
| 10 |
+
of greater importance than precision (e.g. for preconditioners used in iterative
|
| 11 |
+
methods).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
class SumApprox(Optimization):
|
| 15 |
+
"""
|
| 16 |
+
Approximates sum by neglecting small terms.
|
| 17 |
+
|
| 18 |
+
Explanation
|
| 19 |
+
===========
|
| 20 |
+
|
| 21 |
+
If terms are expressions which can be determined to be monotonic, then
|
| 22 |
+
bounds for those expressions are added.
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
==========
|
| 26 |
+
|
| 27 |
+
bounds : dict
|
| 28 |
+
Mapping expressions to length 2 tuple of bounds (low, high).
|
| 29 |
+
reltol : number
|
| 30 |
+
Threshold for when to ignore a term. Taken relative to the largest
|
| 31 |
+
lower bound among bounds.
|
| 32 |
+
|
| 33 |
+
Examples
|
| 34 |
+
========
|
| 35 |
+
|
| 36 |
+
>>> from sympy import exp
|
| 37 |
+
>>> from sympy.abc import x, y, z
|
| 38 |
+
>>> from sympy.codegen.rewriting import optimize
|
| 39 |
+
>>> from sympy.codegen.approximations import SumApprox
|
| 40 |
+
>>> bounds = {x: (-1, 1), y: (1000, 2000), z: (-10, 3)}
|
| 41 |
+
>>> sum_approx3 = SumApprox(bounds, reltol=1e-3)
|
| 42 |
+
>>> sum_approx2 = SumApprox(bounds, reltol=1e-2)
|
| 43 |
+
>>> sum_approx1 = SumApprox(bounds, reltol=1e-1)
|
| 44 |
+
>>> expr = 3*(x + y + exp(z))
|
| 45 |
+
>>> optimize(expr, [sum_approx3])
|
| 46 |
+
3*(x + y + exp(z))
|
| 47 |
+
>>> optimize(expr, [sum_approx2])
|
| 48 |
+
3*y + 3*exp(z)
|
| 49 |
+
>>> optimize(expr, [sum_approx1])
|
| 50 |
+
3*y
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, bounds, reltol, **kwargs):
|
| 55 |
+
super().__init__(**kwargs)
|
| 56 |
+
self.bounds = bounds
|
| 57 |
+
self.reltol = reltol
|
| 58 |
+
|
| 59 |
+
def __call__(self, expr):
|
| 60 |
+
return expr.factor().replace(self.query, lambda arg: self.value(arg))
|
| 61 |
+
|
| 62 |
+
def query(self, expr):
|
| 63 |
+
return expr.is_Add
|
| 64 |
+
|
| 65 |
+
def value(self, add):
|
| 66 |
+
for term in add.args:
|
| 67 |
+
if term.is_number or term in self.bounds or len(term.free_symbols) != 1:
|
| 68 |
+
continue
|
| 69 |
+
fs, = term.free_symbols
|
| 70 |
+
if fs not in self.bounds:
|
| 71 |
+
continue
|
| 72 |
+
intrvl = Interval(*self.bounds[fs])
|
| 73 |
+
if is_increasing(term, intrvl, fs):
|
| 74 |
+
self.bounds[term] = (
|
| 75 |
+
term.subs({fs: self.bounds[fs][0]}),
|
| 76 |
+
term.subs({fs: self.bounds[fs][1]})
|
| 77 |
+
)
|
| 78 |
+
elif is_decreasing(term, intrvl, fs):
|
| 79 |
+
self.bounds[term] = (
|
| 80 |
+
term.subs({fs: self.bounds[fs][1]}),
|
| 81 |
+
term.subs({fs: self.bounds[fs][0]})
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
return add
|
| 85 |
+
|
| 86 |
+
if all(term.is_number or term in self.bounds for term in add.args):
|
| 87 |
+
bounds = [(term, term) if term.is_number else self.bounds[term] for term in add.args]
|
| 88 |
+
largest_abs_guarantee = 0
|
| 89 |
+
for lo, hi in bounds:
|
| 90 |
+
if lo <= 0 <= hi:
|
| 91 |
+
continue
|
| 92 |
+
largest_abs_guarantee = max(largest_abs_guarantee,
|
| 93 |
+
min(abs(lo), abs(hi)))
|
| 94 |
+
new_terms = []
|
| 95 |
+
for term, (lo, hi) in zip(add.args, bounds):
|
| 96 |
+
if max(abs(lo), abs(hi)) >= largest_abs_guarantee*self.reltol:
|
| 97 |
+
new_terms.append(term)
|
| 98 |
+
return add.func(*new_terms)
|
| 99 |
+
else:
|
| 100 |
+
return add
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class SeriesApprox(Optimization):
|
| 104 |
+
""" Approximates functions by expanding them as a series.
|
| 105 |
+
|
| 106 |
+
Parameters
|
| 107 |
+
==========
|
| 108 |
+
|
| 109 |
+
bounds : dict
|
| 110 |
+
Mapping expressions to length 2 tuple of bounds (low, high).
|
| 111 |
+
reltol : number
|
| 112 |
+
Threshold for when to ignore a term. Taken relative to the largest
|
| 113 |
+
lower bound among bounds.
|
| 114 |
+
max_order : int
|
| 115 |
+
Largest order to include in series expansion
|
| 116 |
+
n_point_checks : int (even)
|
| 117 |
+
The validity of an expansion (with respect to reltol) is checked at
|
| 118 |
+
discrete points (linearly spaced over the bounds of the variable). The
|
| 119 |
+
number of points used in this numerical check is given by this number.
|
| 120 |
+
|
| 121 |
+
Examples
|
| 122 |
+
========
|
| 123 |
+
|
| 124 |
+
>>> from sympy import sin, pi
|
| 125 |
+
>>> from sympy.abc import x, y
|
| 126 |
+
>>> from sympy.codegen.rewriting import optimize
|
| 127 |
+
>>> from sympy.codegen.approximations import SeriesApprox
|
| 128 |
+
>>> bounds = {x: (-.1, .1), y: (pi-1, pi+1)}
|
| 129 |
+
>>> series_approx2 = SeriesApprox(bounds, reltol=1e-2)
|
| 130 |
+
>>> series_approx3 = SeriesApprox(bounds, reltol=1e-3)
|
| 131 |
+
>>> series_approx8 = SeriesApprox(bounds, reltol=1e-8)
|
| 132 |
+
>>> expr = sin(x)*sin(y)
|
| 133 |
+
>>> optimize(expr, [series_approx2])
|
| 134 |
+
x*(-y + (y - pi)**3/6 + pi)
|
| 135 |
+
>>> optimize(expr, [series_approx3])
|
| 136 |
+
(-x**3/6 + x)*sin(y)
|
| 137 |
+
>>> optimize(expr, [series_approx8])
|
| 138 |
+
sin(x)*sin(y)
|
| 139 |
+
|
| 140 |
+
"""
|
| 141 |
+
def __init__(self, bounds, reltol, max_order=4, n_point_checks=4, **kwargs):
|
| 142 |
+
super().__init__(**kwargs)
|
| 143 |
+
self.bounds = bounds
|
| 144 |
+
self.reltol = reltol
|
| 145 |
+
self.max_order = max_order
|
| 146 |
+
if n_point_checks % 2 == 1:
|
| 147 |
+
raise ValueError("Checking the solution at expansion point is not helpful")
|
| 148 |
+
self.n_point_checks = n_point_checks
|
| 149 |
+
self._prec = math.ceil(-math.log10(self.reltol))
|
| 150 |
+
|
| 151 |
+
def __call__(self, expr):
|
| 152 |
+
return expr.factor().replace(self.query, lambda arg: self.value(arg))
|
| 153 |
+
|
| 154 |
+
def query(self, expr):
|
| 155 |
+
return (expr.is_Function and not isinstance(expr, UndefinedFunction)
|
| 156 |
+
and len(expr.args) == 1)
|
| 157 |
+
|
| 158 |
+
def value(self, fexpr):
|
| 159 |
+
free_symbols = fexpr.free_symbols
|
| 160 |
+
if len(free_symbols) != 1:
|
| 161 |
+
return fexpr
|
| 162 |
+
symb, = free_symbols
|
| 163 |
+
if symb not in self.bounds:
|
| 164 |
+
return fexpr
|
| 165 |
+
lo, hi = self.bounds[symb]
|
| 166 |
+
x0 = (lo + hi)/2
|
| 167 |
+
cheapest = None
|
| 168 |
+
for n in range(self.max_order+1, 0, -1):
|
| 169 |
+
fseri = fexpr.series(symb, x0=x0, n=n).removeO()
|
| 170 |
+
n_ok = True
|
| 171 |
+
for idx in range(self.n_point_checks):
|
| 172 |
+
x = lo + idx*(hi - lo)/(self.n_point_checks - 1)
|
| 173 |
+
val = fseri.xreplace({symb: x})
|
| 174 |
+
ref = fexpr.xreplace({symb: x})
|
| 175 |
+
if abs((1 - val/ref).evalf(self._prec)) > self.reltol:
|
| 176 |
+
n_ok = False
|
| 177 |
+
break
|
| 178 |
+
|
| 179 |
+
if n_ok:
|
| 180 |
+
cheapest = fseri
|
| 181 |
+
else:
|
| 182 |
+
break
|
| 183 |
+
|
| 184 |
+
if cheapest is None:
|
| 185 |
+
return fexpr
|
| 186 |
+
else:
|
| 187 |
+
return cheapest
|
janus/lib/python3.10/site-packages/sympy/codegen/cfunctions.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains SymPy functions mathcin corresponding to special math functions in the
|
| 3 |
+
C standard library (since C99, also available in C++11).
|
| 4 |
+
|
| 5 |
+
The functions defined in this module allows the user to express functions such as ``expm1``
|
| 6 |
+
as a SymPy function for symbolic manipulation.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
from sympy.core.function import ArgumentIndexError, Function
|
| 10 |
+
from sympy.core.numbers import Rational
|
| 11 |
+
from sympy.core.power import Pow
|
| 12 |
+
from sympy.core.singleton import S
|
| 13 |
+
from sympy.functions.elementary.exponential import exp, log
|
| 14 |
+
from sympy.functions.elementary.miscellaneous import sqrt
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _expm1(x):
|
| 18 |
+
return exp(x) - S.One
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class expm1(Function):
|
| 22 |
+
"""
|
| 23 |
+
Represents the exponential function minus one.
|
| 24 |
+
|
| 25 |
+
Explanation
|
| 26 |
+
===========
|
| 27 |
+
|
| 28 |
+
The benefit of using ``expm1(x)`` over ``exp(x) - 1``
|
| 29 |
+
is that the latter is prone to cancellation under finite precision
|
| 30 |
+
arithmetic when x is close to zero.
|
| 31 |
+
|
| 32 |
+
Examples
|
| 33 |
+
========
|
| 34 |
+
|
| 35 |
+
>>> from sympy.abc import x
|
| 36 |
+
>>> from sympy.codegen.cfunctions import expm1
|
| 37 |
+
>>> '%.0e' % expm1(1e-99).evalf()
|
| 38 |
+
'1e-99'
|
| 39 |
+
>>> from math import exp
|
| 40 |
+
>>> exp(1e-99) - 1
|
| 41 |
+
0.0
|
| 42 |
+
>>> expm1(x).diff(x)
|
| 43 |
+
exp(x)
|
| 44 |
+
|
| 45 |
+
See Also
|
| 46 |
+
========
|
| 47 |
+
|
| 48 |
+
log1p
|
| 49 |
+
"""
|
| 50 |
+
nargs = 1
|
| 51 |
+
|
| 52 |
+
def fdiff(self, argindex=1):
|
| 53 |
+
"""
|
| 54 |
+
Returns the first derivative of this function.
|
| 55 |
+
"""
|
| 56 |
+
if argindex == 1:
|
| 57 |
+
return exp(*self.args)
|
| 58 |
+
else:
|
| 59 |
+
raise ArgumentIndexError(self, argindex)
|
| 60 |
+
|
| 61 |
+
def _eval_expand_func(self, **hints):
|
| 62 |
+
return _expm1(*self.args)
|
| 63 |
+
|
| 64 |
+
def _eval_rewrite_as_exp(self, arg, **kwargs):
|
| 65 |
+
return exp(arg) - S.One
|
| 66 |
+
|
| 67 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
|
| 68 |
+
|
| 69 |
+
@classmethod
|
| 70 |
+
def eval(cls, arg):
|
| 71 |
+
exp_arg = exp.eval(arg)
|
| 72 |
+
if exp_arg is not None:
|
| 73 |
+
return exp_arg - S.One
|
| 74 |
+
|
| 75 |
+
def _eval_is_real(self):
|
| 76 |
+
return self.args[0].is_real
|
| 77 |
+
|
| 78 |
+
def _eval_is_finite(self):
|
| 79 |
+
return self.args[0].is_finite
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _log1p(x):
|
| 83 |
+
return log(x + S.One)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class log1p(Function):
|
| 87 |
+
"""
|
| 88 |
+
Represents the natural logarithm of a number plus one.
|
| 89 |
+
|
| 90 |
+
Explanation
|
| 91 |
+
===========
|
| 92 |
+
|
| 93 |
+
The benefit of using ``log1p(x)`` over ``log(x + 1)``
|
| 94 |
+
is that the latter is prone to cancellation under finite precision
|
| 95 |
+
arithmetic when x is close to zero.
|
| 96 |
+
|
| 97 |
+
Examples
|
| 98 |
+
========
|
| 99 |
+
|
| 100 |
+
>>> from sympy.abc import x
|
| 101 |
+
>>> from sympy.codegen.cfunctions import log1p
|
| 102 |
+
>>> from sympy import expand_log
|
| 103 |
+
>>> '%.0e' % expand_log(log1p(1e-99)).evalf()
|
| 104 |
+
'1e-99'
|
| 105 |
+
>>> from math import log
|
| 106 |
+
>>> log(1 + 1e-99)
|
| 107 |
+
0.0
|
| 108 |
+
>>> log1p(x).diff(x)
|
| 109 |
+
1/(x + 1)
|
| 110 |
+
|
| 111 |
+
See Also
|
| 112 |
+
========
|
| 113 |
+
|
| 114 |
+
expm1
|
| 115 |
+
"""
|
| 116 |
+
nargs = 1
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def fdiff(self, argindex=1):
|
| 120 |
+
"""
|
| 121 |
+
Returns the first derivative of this function.
|
| 122 |
+
"""
|
| 123 |
+
if argindex == 1:
|
| 124 |
+
return S.One/(self.args[0] + S.One)
|
| 125 |
+
else:
|
| 126 |
+
raise ArgumentIndexError(self, argindex)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def _eval_expand_func(self, **hints):
|
| 130 |
+
return _log1p(*self.args)
|
| 131 |
+
|
| 132 |
+
def _eval_rewrite_as_log(self, arg, **kwargs):
|
| 133 |
+
return _log1p(arg)
|
| 134 |
+
|
| 135 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_log
|
| 136 |
+
|
| 137 |
+
@classmethod
|
| 138 |
+
def eval(cls, arg):
|
| 139 |
+
if arg.is_Rational:
|
| 140 |
+
return log(arg + S.One)
|
| 141 |
+
elif not arg.is_Float: # not safe to add 1 to Float
|
| 142 |
+
return log.eval(arg + S.One)
|
| 143 |
+
elif arg.is_number:
|
| 144 |
+
return log(Rational(arg) + S.One)
|
| 145 |
+
|
| 146 |
+
def _eval_is_real(self):
|
| 147 |
+
return (self.args[0] + S.One).is_nonnegative
|
| 148 |
+
|
| 149 |
+
def _eval_is_finite(self):
|
| 150 |
+
if (self.args[0] + S.One).is_zero:
|
| 151 |
+
return False
|
| 152 |
+
return self.args[0].is_finite
|
| 153 |
+
|
| 154 |
+
def _eval_is_positive(self):
|
| 155 |
+
return self.args[0].is_positive
|
| 156 |
+
|
| 157 |
+
def _eval_is_zero(self):
|
| 158 |
+
return self.args[0].is_zero
|
| 159 |
+
|
| 160 |
+
def _eval_is_nonnegative(self):
|
| 161 |
+
return self.args[0].is_nonnegative
|
| 162 |
+
|
| 163 |
+
_Two = S(2)
|
| 164 |
+
|
| 165 |
+
def _exp2(x):
|
| 166 |
+
return Pow(_Two, x)
|
| 167 |
+
|
| 168 |
+
class exp2(Function):
|
| 169 |
+
"""
|
| 170 |
+
Represents the exponential function with base two.
|
| 171 |
+
|
| 172 |
+
Explanation
|
| 173 |
+
===========
|
| 174 |
+
|
| 175 |
+
The benefit of using ``exp2(x)`` over ``2**x``
|
| 176 |
+
is that the latter is not as efficient under finite precision
|
| 177 |
+
arithmetic.
|
| 178 |
+
|
| 179 |
+
Examples
|
| 180 |
+
========
|
| 181 |
+
|
| 182 |
+
>>> from sympy.abc import x
|
| 183 |
+
>>> from sympy.codegen.cfunctions import exp2
|
| 184 |
+
>>> exp2(2).evalf() == 4.0
|
| 185 |
+
True
|
| 186 |
+
>>> exp2(x).diff(x)
|
| 187 |
+
log(2)*exp2(x)
|
| 188 |
+
|
| 189 |
+
See Also
|
| 190 |
+
========
|
| 191 |
+
|
| 192 |
+
log2
|
| 193 |
+
"""
|
| 194 |
+
nargs = 1
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def fdiff(self, argindex=1):
|
| 198 |
+
"""
|
| 199 |
+
Returns the first derivative of this function.
|
| 200 |
+
"""
|
| 201 |
+
if argindex == 1:
|
| 202 |
+
return self*log(_Two)
|
| 203 |
+
else:
|
| 204 |
+
raise ArgumentIndexError(self, argindex)
|
| 205 |
+
|
| 206 |
+
def _eval_rewrite_as_Pow(self, arg, **kwargs):
|
| 207 |
+
return _exp2(arg)
|
| 208 |
+
|
| 209 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
|
| 210 |
+
|
| 211 |
+
def _eval_expand_func(self, **hints):
|
| 212 |
+
return _exp2(*self.args)
|
| 213 |
+
|
| 214 |
+
@classmethod
|
| 215 |
+
def eval(cls, arg):
|
| 216 |
+
if arg.is_number:
|
| 217 |
+
return _exp2(arg)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _log2(x):
|
| 221 |
+
return log(x)/log(_Two)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class log2(Function):
|
| 225 |
+
"""
|
| 226 |
+
Represents the logarithm function with base two.
|
| 227 |
+
|
| 228 |
+
Explanation
|
| 229 |
+
===========
|
| 230 |
+
|
| 231 |
+
The benefit of using ``log2(x)`` over ``log(x)/log(2)``
|
| 232 |
+
is that the latter is not as efficient under finite precision
|
| 233 |
+
arithmetic.
|
| 234 |
+
|
| 235 |
+
Examples
|
| 236 |
+
========
|
| 237 |
+
|
| 238 |
+
>>> from sympy.abc import x
|
| 239 |
+
>>> from sympy.codegen.cfunctions import log2
|
| 240 |
+
>>> log2(4).evalf() == 2.0
|
| 241 |
+
True
|
| 242 |
+
>>> log2(x).diff(x)
|
| 243 |
+
1/(x*log(2))
|
| 244 |
+
|
| 245 |
+
See Also
|
| 246 |
+
========
|
| 247 |
+
|
| 248 |
+
exp2
|
| 249 |
+
log10
|
| 250 |
+
"""
|
| 251 |
+
nargs = 1
|
| 252 |
+
|
| 253 |
+
def fdiff(self, argindex=1):
|
| 254 |
+
"""
|
| 255 |
+
Returns the first derivative of this function.
|
| 256 |
+
"""
|
| 257 |
+
if argindex == 1:
|
| 258 |
+
return S.One/(log(_Two)*self.args[0])
|
| 259 |
+
else:
|
| 260 |
+
raise ArgumentIndexError(self, argindex)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@classmethod
|
| 264 |
+
def eval(cls, arg):
|
| 265 |
+
if arg.is_number:
|
| 266 |
+
result = log.eval(arg, base=_Two)
|
| 267 |
+
if result.is_Atom:
|
| 268 |
+
return result
|
| 269 |
+
elif arg.is_Pow and arg.base == _Two:
|
| 270 |
+
return arg.exp
|
| 271 |
+
|
| 272 |
+
def _eval_evalf(self, *args, **kwargs):
|
| 273 |
+
return self.rewrite(log).evalf(*args, **kwargs)
|
| 274 |
+
|
| 275 |
+
def _eval_expand_func(self, **hints):
|
| 276 |
+
return _log2(*self.args)
|
| 277 |
+
|
| 278 |
+
def _eval_rewrite_as_log(self, arg, **kwargs):
|
| 279 |
+
return _log2(arg)
|
| 280 |
+
|
| 281 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_log
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _fma(x, y, z):
|
| 285 |
+
return x*y + z
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class fma(Function):
|
| 289 |
+
"""
|
| 290 |
+
Represents "fused multiply add".
|
| 291 |
+
|
| 292 |
+
Explanation
|
| 293 |
+
===========
|
| 294 |
+
|
| 295 |
+
The benefit of using ``fma(x, y, z)`` over ``x*y + z``
|
| 296 |
+
is that, under finite precision arithmetic, the former is
|
| 297 |
+
supported by special instructions on some CPUs.
|
| 298 |
+
|
| 299 |
+
Examples
|
| 300 |
+
========
|
| 301 |
+
|
| 302 |
+
>>> from sympy.abc import x, y, z
|
| 303 |
+
>>> from sympy.codegen.cfunctions import fma
|
| 304 |
+
>>> fma(x, y, z).diff(x)
|
| 305 |
+
y
|
| 306 |
+
|
| 307 |
+
"""
|
| 308 |
+
nargs = 3
|
| 309 |
+
|
| 310 |
+
def fdiff(self, argindex=1):
|
| 311 |
+
"""
|
| 312 |
+
Returns the first derivative of this function.
|
| 313 |
+
"""
|
| 314 |
+
if argindex in (1, 2):
|
| 315 |
+
return self.args[2 - argindex]
|
| 316 |
+
elif argindex == 3:
|
| 317 |
+
return S.One
|
| 318 |
+
else:
|
| 319 |
+
raise ArgumentIndexError(self, argindex)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def _eval_expand_func(self, **hints):
|
| 323 |
+
return _fma(*self.args)
|
| 324 |
+
|
| 325 |
+
def _eval_rewrite_as_tractable(self, arg, limitvar=None, **kwargs):
|
| 326 |
+
return _fma(arg)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
_Ten = S(10)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def _log10(x):
|
| 333 |
+
return log(x)/log(_Ten)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class log10(Function):
|
| 337 |
+
"""
|
| 338 |
+
Represents the logarithm function with base ten.
|
| 339 |
+
|
| 340 |
+
Examples
|
| 341 |
+
========
|
| 342 |
+
|
| 343 |
+
>>> from sympy.abc import x
|
| 344 |
+
>>> from sympy.codegen.cfunctions import log10
|
| 345 |
+
>>> log10(100).evalf() == 2.0
|
| 346 |
+
True
|
| 347 |
+
>>> log10(x).diff(x)
|
| 348 |
+
1/(x*log(10))
|
| 349 |
+
|
| 350 |
+
See Also
|
| 351 |
+
========
|
| 352 |
+
|
| 353 |
+
log2
|
| 354 |
+
"""
|
| 355 |
+
nargs = 1
|
| 356 |
+
|
| 357 |
+
def fdiff(self, argindex=1):
|
| 358 |
+
"""
|
| 359 |
+
Returns the first derivative of this function.
|
| 360 |
+
"""
|
| 361 |
+
if argindex == 1:
|
| 362 |
+
return S.One/(log(_Ten)*self.args[0])
|
| 363 |
+
else:
|
| 364 |
+
raise ArgumentIndexError(self, argindex)
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
@classmethod
|
| 368 |
+
def eval(cls, arg):
|
| 369 |
+
if arg.is_number:
|
| 370 |
+
result = log.eval(arg, base=_Ten)
|
| 371 |
+
if result.is_Atom:
|
| 372 |
+
return result
|
| 373 |
+
elif arg.is_Pow and arg.base == _Ten:
|
| 374 |
+
return arg.exp
|
| 375 |
+
|
| 376 |
+
def _eval_expand_func(self, **hints):
|
| 377 |
+
return _log10(*self.args)
|
| 378 |
+
|
| 379 |
+
def _eval_rewrite_as_log(self, arg, **kwargs):
|
| 380 |
+
return _log10(arg)
|
| 381 |
+
|
| 382 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_log
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def _Sqrt(x):
|
| 386 |
+
return Pow(x, S.Half)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class Sqrt(Function): # 'sqrt' already defined in sympy.functions.elementary.miscellaneous
|
| 390 |
+
"""
|
| 391 |
+
Represents the square root function.
|
| 392 |
+
|
| 393 |
+
Explanation
|
| 394 |
+
===========
|
| 395 |
+
|
| 396 |
+
The reason why one would use ``Sqrt(x)`` over ``sqrt(x)``
|
| 397 |
+
is that the latter is internally represented as ``Pow(x, S.Half)`` which
|
| 398 |
+
may not be what one wants when doing code-generation.
|
| 399 |
+
|
| 400 |
+
Examples
|
| 401 |
+
========
|
| 402 |
+
|
| 403 |
+
>>> from sympy.abc import x
|
| 404 |
+
>>> from sympy.codegen.cfunctions import Sqrt
|
| 405 |
+
>>> Sqrt(x)
|
| 406 |
+
Sqrt(x)
|
| 407 |
+
>>> Sqrt(x).diff(x)
|
| 408 |
+
1/(2*sqrt(x))
|
| 409 |
+
|
| 410 |
+
See Also
|
| 411 |
+
========
|
| 412 |
+
|
| 413 |
+
Cbrt
|
| 414 |
+
"""
|
| 415 |
+
nargs = 1
|
| 416 |
+
|
| 417 |
+
def fdiff(self, argindex=1):
|
| 418 |
+
"""
|
| 419 |
+
Returns the first derivative of this function.
|
| 420 |
+
"""
|
| 421 |
+
if argindex == 1:
|
| 422 |
+
return Pow(self.args[0], Rational(-1, 2))/_Two
|
| 423 |
+
else:
|
| 424 |
+
raise ArgumentIndexError(self, argindex)
|
| 425 |
+
|
| 426 |
+
def _eval_expand_func(self, **hints):
|
| 427 |
+
return _Sqrt(*self.args)
|
| 428 |
+
|
| 429 |
+
def _eval_rewrite_as_Pow(self, arg, **kwargs):
|
| 430 |
+
return _Sqrt(arg)
|
| 431 |
+
|
| 432 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def _Cbrt(x):
|
| 436 |
+
return Pow(x, Rational(1, 3))
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class Cbrt(Function): # 'cbrt' already defined in sympy.functions.elementary.miscellaneous
|
| 440 |
+
"""
|
| 441 |
+
Represents the cube root function.
|
| 442 |
+
|
| 443 |
+
Explanation
|
| 444 |
+
===========
|
| 445 |
+
|
| 446 |
+
The reason why one would use ``Cbrt(x)`` over ``cbrt(x)``
|
| 447 |
+
is that the latter is internally represented as ``Pow(x, Rational(1, 3))`` which
|
| 448 |
+
may not be what one wants when doing code-generation.
|
| 449 |
+
|
| 450 |
+
Examples
|
| 451 |
+
========
|
| 452 |
+
|
| 453 |
+
>>> from sympy.abc import x
|
| 454 |
+
>>> from sympy.codegen.cfunctions import Cbrt
|
| 455 |
+
>>> Cbrt(x)
|
| 456 |
+
Cbrt(x)
|
| 457 |
+
>>> Cbrt(x).diff(x)
|
| 458 |
+
1/(3*x**(2/3))
|
| 459 |
+
|
| 460 |
+
See Also
|
| 461 |
+
========
|
| 462 |
+
|
| 463 |
+
Sqrt
|
| 464 |
+
"""
|
| 465 |
+
nargs = 1
|
| 466 |
+
|
| 467 |
+
def fdiff(self, argindex=1):
|
| 468 |
+
"""
|
| 469 |
+
Returns the first derivative of this function.
|
| 470 |
+
"""
|
| 471 |
+
if argindex == 1:
|
| 472 |
+
return Pow(self.args[0], Rational(-_Two/3))/3
|
| 473 |
+
else:
|
| 474 |
+
raise ArgumentIndexError(self, argindex)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def _eval_expand_func(self, **hints):
|
| 478 |
+
return _Cbrt(*self.args)
|
| 479 |
+
|
| 480 |
+
def _eval_rewrite_as_Pow(self, arg, **kwargs):
|
| 481 |
+
return _Cbrt(arg)
|
| 482 |
+
|
| 483 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def _hypot(x, y):
|
| 487 |
+
return sqrt(Pow(x, 2) + Pow(y, 2))
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class hypot(Function):
|
| 491 |
+
"""
|
| 492 |
+
Represents the hypotenuse function.
|
| 493 |
+
|
| 494 |
+
Explanation
|
| 495 |
+
===========
|
| 496 |
+
|
| 497 |
+
The hypotenuse function is provided by e.g. the math library
|
| 498 |
+
in the C99 standard, hence one may want to represent the function
|
| 499 |
+
symbolically when doing code-generation.
|
| 500 |
+
|
| 501 |
+
Examples
|
| 502 |
+
========
|
| 503 |
+
|
| 504 |
+
>>> from sympy.abc import x, y
|
| 505 |
+
>>> from sympy.codegen.cfunctions import hypot
|
| 506 |
+
>>> hypot(3, 4).evalf() == 5.0
|
| 507 |
+
True
|
| 508 |
+
>>> hypot(x, y)
|
| 509 |
+
hypot(x, y)
|
| 510 |
+
>>> hypot(x, y).diff(x)
|
| 511 |
+
x/hypot(x, y)
|
| 512 |
+
|
| 513 |
+
"""
|
| 514 |
+
nargs = 2
|
| 515 |
+
|
| 516 |
+
def fdiff(self, argindex=1):
|
| 517 |
+
"""
|
| 518 |
+
Returns the first derivative of this function.
|
| 519 |
+
"""
|
| 520 |
+
if argindex in (1, 2):
|
| 521 |
+
return 2*self.args[argindex-1]/(_Two*self.func(*self.args))
|
| 522 |
+
else:
|
| 523 |
+
raise ArgumentIndexError(self, argindex)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def _eval_expand_func(self, **hints):
|
| 527 |
+
return _hypot(*self.args)
|
| 528 |
+
|
| 529 |
+
def _eval_rewrite_as_Pow(self, arg, **kwargs):
|
| 530 |
+
return _hypot(arg)
|
| 531 |
+
|
| 532 |
+
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
class isnan(Function):
|
| 536 |
+
nargs = 1
|
janus/lib/python3.10/site-packages/sympy/codegen/cnodes.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AST nodes specific to the C family of languages
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from sympy.codegen.ast import (
|
| 6 |
+
Attribute, Declaration, Node, String, Token, Type, none,
|
| 7 |
+
FunctionCall, CodeBlock
|
| 8 |
+
)
|
| 9 |
+
from sympy.core.basic import Basic
|
| 10 |
+
from sympy.core.containers import Tuple
|
| 11 |
+
from sympy.core.sympify import sympify
|
| 12 |
+
|
| 13 |
+
void = Type('void')
|
| 14 |
+
|
| 15 |
+
restrict = Attribute('restrict') # guarantees no pointer aliasing
|
| 16 |
+
volatile = Attribute('volatile')
|
| 17 |
+
static = Attribute('static')
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def alignof(arg):
|
| 21 |
+
""" Generate of FunctionCall instance for calling 'alignof' """
|
| 22 |
+
return FunctionCall('alignof', [String(arg) if isinstance(arg, str) else arg])
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def sizeof(arg):
|
| 26 |
+
""" Generate of FunctionCall instance for calling 'sizeof'
|
| 27 |
+
|
| 28 |
+
Examples
|
| 29 |
+
========
|
| 30 |
+
|
| 31 |
+
>>> from sympy.codegen.ast import real
|
| 32 |
+
>>> from sympy.codegen.cnodes import sizeof
|
| 33 |
+
>>> from sympy import ccode
|
| 34 |
+
>>> ccode(sizeof(real))
|
| 35 |
+
'sizeof(double)'
|
| 36 |
+
"""
|
| 37 |
+
return FunctionCall('sizeof', [String(arg) if isinstance(arg, str) else arg])
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class CommaOperator(Basic):
|
| 41 |
+
""" Represents the comma operator in C """
|
| 42 |
+
def __new__(cls, *args):
|
| 43 |
+
return Basic.__new__(cls, *[sympify(arg) for arg in args])
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class Label(Node):
|
| 47 |
+
""" Label for use with e.g. goto statement.
|
| 48 |
+
|
| 49 |
+
Examples
|
| 50 |
+
========
|
| 51 |
+
|
| 52 |
+
>>> from sympy import ccode, Symbol
|
| 53 |
+
>>> from sympy.codegen.cnodes import Label, PreIncrement
|
| 54 |
+
>>> print(ccode(Label('foo')))
|
| 55 |
+
foo:
|
| 56 |
+
>>> print(ccode(Label('bar', [PreIncrement(Symbol('a'))])))
|
| 57 |
+
bar:
|
| 58 |
+
++(a);
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
__slots__ = _fields = ('name', 'body')
|
| 62 |
+
defaults = {'body': none}
|
| 63 |
+
_construct_name = String
|
| 64 |
+
|
| 65 |
+
@classmethod
|
| 66 |
+
def _construct_body(cls, itr):
|
| 67 |
+
if isinstance(itr, CodeBlock):
|
| 68 |
+
return itr
|
| 69 |
+
else:
|
| 70 |
+
return CodeBlock(*itr)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class goto(Token):
|
| 74 |
+
""" Represents goto in C """
|
| 75 |
+
__slots__ = _fields = ('label',)
|
| 76 |
+
_construct_label = Label
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class PreDecrement(Basic):
|
| 80 |
+
""" Represents the pre-decrement operator
|
| 81 |
+
|
| 82 |
+
Examples
|
| 83 |
+
========
|
| 84 |
+
|
| 85 |
+
>>> from sympy.abc import x
|
| 86 |
+
>>> from sympy.codegen.cnodes import PreDecrement
|
| 87 |
+
>>> from sympy import ccode
|
| 88 |
+
>>> ccode(PreDecrement(x))
|
| 89 |
+
'--(x)'
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
nargs = 1
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class PostDecrement(Basic):
|
| 96 |
+
""" Represents the post-decrement operator
|
| 97 |
+
|
| 98 |
+
Examples
|
| 99 |
+
========
|
| 100 |
+
|
| 101 |
+
>>> from sympy.abc import x
|
| 102 |
+
>>> from sympy.codegen.cnodes import PostDecrement
|
| 103 |
+
>>> from sympy import ccode
|
| 104 |
+
>>> ccode(PostDecrement(x))
|
| 105 |
+
'(x)--'
|
| 106 |
+
|
| 107 |
+
"""
|
| 108 |
+
nargs = 1
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class PreIncrement(Basic):
|
| 112 |
+
""" Represents the pre-increment operator
|
| 113 |
+
|
| 114 |
+
Examples
|
| 115 |
+
========
|
| 116 |
+
|
| 117 |
+
>>> from sympy.abc import x
|
| 118 |
+
>>> from sympy.codegen.cnodes import PreIncrement
|
| 119 |
+
>>> from sympy import ccode
|
| 120 |
+
>>> ccode(PreIncrement(x))
|
| 121 |
+
'++(x)'
|
| 122 |
+
|
| 123 |
+
"""
|
| 124 |
+
nargs = 1
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class PostIncrement(Basic):
|
| 128 |
+
""" Represents the post-increment operator
|
| 129 |
+
|
| 130 |
+
Examples
|
| 131 |
+
========
|
| 132 |
+
|
| 133 |
+
>>> from sympy.abc import x
|
| 134 |
+
>>> from sympy.codegen.cnodes import PostIncrement
|
| 135 |
+
>>> from sympy import ccode
|
| 136 |
+
>>> ccode(PostIncrement(x))
|
| 137 |
+
'(x)++'
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
nargs = 1
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class struct(Node):
|
| 144 |
+
""" Represents a struct in C """
|
| 145 |
+
__slots__ = _fields = ('name', 'declarations')
|
| 146 |
+
defaults = {'name': none}
|
| 147 |
+
_construct_name = String
|
| 148 |
+
|
| 149 |
+
@classmethod
|
| 150 |
+
def _construct_declarations(cls, args):
|
| 151 |
+
return Tuple(*[Declaration(arg) for arg in args])
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class union(struct):
|
| 155 |
+
""" Represents a union in C """
|
| 156 |
+
__slots__ = ()
|
janus/lib/python3.10/site-packages/sympy/codegen/cxxnodes.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AST nodes specific to C++.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from sympy.codegen.ast import Attribute, String, Token, Type, none
|
| 6 |
+
|
| 7 |
+
class using(Token):
|
| 8 |
+
""" Represents a 'using' statement in C++ """
|
| 9 |
+
__slots__ = _fields = ('type', 'alias')
|
| 10 |
+
defaults = {'alias': none}
|
| 11 |
+
_construct_type = Type
|
| 12 |
+
_construct_alias = String
|
| 13 |
+
|
| 14 |
+
constexpr = Attribute('constexpr')
|
janus/lib/python3.10/site-packages/sympy/codegen/futils.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import chain
|
| 2 |
+
from sympy.codegen.fnodes import Module
|
| 3 |
+
from sympy.core.symbol import Dummy
|
| 4 |
+
from sympy.printing.fortran import FCodePrinter
|
| 5 |
+
|
| 6 |
+
""" This module collects utilities for rendering Fortran code. """
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def render_as_module(definitions, name, declarations=(), printer_settings=None):
|
| 10 |
+
""" Creates a ``Module`` instance and renders it as a string.
|
| 11 |
+
|
| 12 |
+
This generates Fortran source code for a module with the correct ``use`` statements.
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
==========
|
| 16 |
+
|
| 17 |
+
definitions : iterable
|
| 18 |
+
Passed to :class:`sympy.codegen.fnodes.Module`.
|
| 19 |
+
name : str
|
| 20 |
+
Passed to :class:`sympy.codegen.fnodes.Module`.
|
| 21 |
+
declarations : iterable
|
| 22 |
+
Passed to :class:`sympy.codegen.fnodes.Module`. It will be extended with
|
| 23 |
+
use statements, 'implicit none' and public list generated from ``definitions``.
|
| 24 |
+
printer_settings : dict
|
| 25 |
+
Passed to ``FCodePrinter`` (default: ``{'standard': 2003, 'source_format': 'free'}``).
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
printer_settings = printer_settings or {'standard': 2003, 'source_format': 'free'}
|
| 29 |
+
printer = FCodePrinter(printer_settings)
|
| 30 |
+
dummy = Dummy()
|
| 31 |
+
if isinstance(definitions, Module):
|
| 32 |
+
raise ValueError("This function expects to construct a module on its own.")
|
| 33 |
+
mod = Module(name, chain(declarations, [dummy]), definitions)
|
| 34 |
+
fstr = printer.doprint(mod)
|
| 35 |
+
module_use_str = ' %s\n' % ' \n'.join(['use %s, only: %s' % (k, ', '.join(v)) for
|
| 36 |
+
k, v in printer.module_uses.items()])
|
| 37 |
+
module_use_str += ' implicit none\n'
|
| 38 |
+
module_use_str += ' private\n'
|
| 39 |
+
module_use_str += ' public %s\n' % ', '.join([str(node.name) for node in definitions if getattr(node, 'name', None)])
|
| 40 |
+
return fstr.replace(printer.doprint(dummy), module_use_str)
|
janus/lib/python3.10/site-packages/sympy/codegen/pynodes.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .abstract_nodes import List as AbstractList
|
| 2 |
+
from .ast import Token
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class List(AbstractList):
|
| 6 |
+
pass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class NumExprEvaluate(Token):
|
| 10 |
+
"""represents a call to :class:`numexpr`s :func:`evaluate`"""
|
| 11 |
+
__slots__ = _fields = ('expr',)
|
janus/lib/python3.10/site-packages/sympy/codegen/tests/__init__.py
ADDED
|
File without changes
|
janus/lib/python3.10/site-packages/sympy/codegen/tests/__pycache__/test_abstract_nodes.cpython-310.pyc
ADDED
|
Binary file (856 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/codegen/tests/test_ast.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from sympy.core.containers import Tuple
|
| 3 |
+
from sympy.core.numbers import nan, oo, Float, Integer
|
| 4 |
+
from sympy.core.relational import Lt
|
| 5 |
+
from sympy.core.symbol import symbols, Symbol
|
| 6 |
+
from sympy.functions.elementary.trigonometric import sin
|
| 7 |
+
from sympy.matrices.dense import Matrix
|
| 8 |
+
from sympy.matrices.expressions.matexpr import MatrixSymbol
|
| 9 |
+
from sympy.sets.fancysets import Range
|
| 10 |
+
from sympy.tensor.indexed import Idx, IndexedBase
|
| 11 |
+
from sympy.testing.pytest import raises
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from sympy.codegen.ast import (
|
| 15 |
+
Assignment, Attribute, aug_assign, CodeBlock, For, Type, Variable, Pointer, Declaration,
|
| 16 |
+
AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
|
| 17 |
+
DivAugmentedAssignment, ModAugmentedAssignment, value_const, pointer_const,
|
| 18 |
+
integer, real, complex_, int8, uint8, float16 as f16, float32 as f32,
|
| 19 |
+
float64 as f64, float80 as f80, float128 as f128, complex64 as c64, complex128 as c128,
|
| 20 |
+
While, Scope, String, Print, QuotedString, FunctionPrototype, FunctionDefinition, Return,
|
| 21 |
+
FunctionCall, untyped, IntBaseType, intc, Node, none, NoneToken, Token, Comment
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
x, y, z, t, x0, x1, x2, a, b = symbols("x, y, z, t, x0, x1, x2, a, b")
|
| 25 |
+
n = symbols("n", integer=True)
|
| 26 |
+
A = MatrixSymbol('A', 3, 1)
|
| 27 |
+
mat = Matrix([1, 2, 3])
|
| 28 |
+
B = IndexedBase('B')
|
| 29 |
+
i = Idx("i", n)
|
| 30 |
+
A22 = MatrixSymbol('A22',2,2)
|
| 31 |
+
B22 = MatrixSymbol('B22',2,2)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_Assignment():
|
| 35 |
+
# Here we just do things to show they don't error
|
| 36 |
+
Assignment(x, y)
|
| 37 |
+
Assignment(x, 0)
|
| 38 |
+
Assignment(A, mat)
|
| 39 |
+
Assignment(A[1,0], 0)
|
| 40 |
+
Assignment(A[1,0], x)
|
| 41 |
+
Assignment(B[i], x)
|
| 42 |
+
Assignment(B[i], 0)
|
| 43 |
+
a = Assignment(x, y)
|
| 44 |
+
assert a.func(*a.args) == a
|
| 45 |
+
assert a.op == ':='
|
| 46 |
+
# Here we test things to show that they error
|
| 47 |
+
# Matrix to scalar
|
| 48 |
+
raises(ValueError, lambda: Assignment(B[i], A))
|
| 49 |
+
raises(ValueError, lambda: Assignment(B[i], mat))
|
| 50 |
+
raises(ValueError, lambda: Assignment(x, mat))
|
| 51 |
+
raises(ValueError, lambda: Assignment(x, A))
|
| 52 |
+
raises(ValueError, lambda: Assignment(A[1,0], mat))
|
| 53 |
+
# Scalar to matrix
|
| 54 |
+
raises(ValueError, lambda: Assignment(A, x))
|
| 55 |
+
raises(ValueError, lambda: Assignment(A, 0))
|
| 56 |
+
# Non-atomic lhs
|
| 57 |
+
raises(TypeError, lambda: Assignment(mat, A))
|
| 58 |
+
raises(TypeError, lambda: Assignment(0, x))
|
| 59 |
+
raises(TypeError, lambda: Assignment(x*x, 1))
|
| 60 |
+
raises(TypeError, lambda: Assignment(A + A, mat))
|
| 61 |
+
raises(TypeError, lambda: Assignment(B, 0))
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def test_AugAssign():
|
| 65 |
+
# Here we just do things to show they don't error
|
| 66 |
+
aug_assign(x, '+', y)
|
| 67 |
+
aug_assign(x, '+', 0)
|
| 68 |
+
aug_assign(A, '+', mat)
|
| 69 |
+
aug_assign(A[1, 0], '+', 0)
|
| 70 |
+
aug_assign(A[1, 0], '+', x)
|
| 71 |
+
aug_assign(B[i], '+', x)
|
| 72 |
+
aug_assign(B[i], '+', 0)
|
| 73 |
+
|
| 74 |
+
# Check creation via aug_assign vs constructor
|
| 75 |
+
for binop, cls in [
|
| 76 |
+
('+', AddAugmentedAssignment),
|
| 77 |
+
('-', SubAugmentedAssignment),
|
| 78 |
+
('*', MulAugmentedAssignment),
|
| 79 |
+
('/', DivAugmentedAssignment),
|
| 80 |
+
('%', ModAugmentedAssignment),
|
| 81 |
+
]:
|
| 82 |
+
a = aug_assign(x, binop, y)
|
| 83 |
+
b = cls(x, y)
|
| 84 |
+
assert a.func(*a.args) == a == b
|
| 85 |
+
assert a.binop == binop
|
| 86 |
+
assert a.op == binop + '='
|
| 87 |
+
|
| 88 |
+
# Here we test things to show that they error
|
| 89 |
+
# Matrix to scalar
|
| 90 |
+
raises(ValueError, lambda: aug_assign(B[i], '+', A))
|
| 91 |
+
raises(ValueError, lambda: aug_assign(B[i], '+', mat))
|
| 92 |
+
raises(ValueError, lambda: aug_assign(x, '+', mat))
|
| 93 |
+
raises(ValueError, lambda: aug_assign(x, '+', A))
|
| 94 |
+
raises(ValueError, lambda: aug_assign(A[1, 0], '+', mat))
|
| 95 |
+
# Scalar to matrix
|
| 96 |
+
raises(ValueError, lambda: aug_assign(A, '+', x))
|
| 97 |
+
raises(ValueError, lambda: aug_assign(A, '+', 0))
|
| 98 |
+
# Non-atomic lhs
|
| 99 |
+
raises(TypeError, lambda: aug_assign(mat, '+', A))
|
| 100 |
+
raises(TypeError, lambda: aug_assign(0, '+', x))
|
| 101 |
+
raises(TypeError, lambda: aug_assign(x * x, '+', 1))
|
| 102 |
+
raises(TypeError, lambda: aug_assign(A + A, '+', mat))
|
| 103 |
+
raises(TypeError, lambda: aug_assign(B, '+', 0))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def test_Assignment_printing():
|
| 107 |
+
assignment_classes = [
|
| 108 |
+
Assignment,
|
| 109 |
+
AddAugmentedAssignment,
|
| 110 |
+
SubAugmentedAssignment,
|
| 111 |
+
MulAugmentedAssignment,
|
| 112 |
+
DivAugmentedAssignment,
|
| 113 |
+
ModAugmentedAssignment,
|
| 114 |
+
]
|
| 115 |
+
pairs = [
|
| 116 |
+
(x, 2 * y + 2),
|
| 117 |
+
(B[i], x),
|
| 118 |
+
(A22, B22),
|
| 119 |
+
(A[0, 0], x),
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
for cls in assignment_classes:
|
| 123 |
+
for lhs, rhs in pairs:
|
| 124 |
+
a = cls(lhs, rhs)
|
| 125 |
+
assert repr(a) == '%s(%s, %s)' % (cls.__name__, repr(lhs), repr(rhs))
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_CodeBlock():
|
| 129 |
+
c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1))
|
| 130 |
+
assert c.func(*c.args) == c
|
| 131 |
+
|
| 132 |
+
assert c.left_hand_sides == Tuple(x, y)
|
| 133 |
+
assert c.right_hand_sides == Tuple(1, x + 1)
|
| 134 |
+
|
| 135 |
+
def test_CodeBlock_topological_sort():
|
| 136 |
+
assignments = [
|
| 137 |
+
Assignment(x, y + z),
|
| 138 |
+
Assignment(z, 1),
|
| 139 |
+
Assignment(t, x),
|
| 140 |
+
Assignment(y, 2),
|
| 141 |
+
]
|
| 142 |
+
|
| 143 |
+
ordered_assignments = [
|
| 144 |
+
# Note that the unrelated z=1 and y=2 are kept in that order
|
| 145 |
+
Assignment(z, 1),
|
| 146 |
+
Assignment(y, 2),
|
| 147 |
+
Assignment(x, y + z),
|
| 148 |
+
Assignment(t, x),
|
| 149 |
+
]
|
| 150 |
+
c1 = CodeBlock.topological_sort(assignments)
|
| 151 |
+
assert c1 == CodeBlock(*ordered_assignments)
|
| 152 |
+
|
| 153 |
+
# Cycle
|
| 154 |
+
invalid_assignments = [
|
| 155 |
+
Assignment(x, y + z),
|
| 156 |
+
Assignment(z, 1),
|
| 157 |
+
Assignment(y, x),
|
| 158 |
+
Assignment(y, 2),
|
| 159 |
+
]
|
| 160 |
+
|
| 161 |
+
raises(ValueError, lambda: CodeBlock.topological_sort(invalid_assignments))
|
| 162 |
+
|
| 163 |
+
# Free symbols
|
| 164 |
+
free_assignments = [
|
| 165 |
+
Assignment(x, y + z),
|
| 166 |
+
Assignment(z, a * b),
|
| 167 |
+
Assignment(t, x),
|
| 168 |
+
Assignment(y, b + 3),
|
| 169 |
+
]
|
| 170 |
+
|
| 171 |
+
free_assignments_ordered = [
|
| 172 |
+
Assignment(z, a * b),
|
| 173 |
+
Assignment(y, b + 3),
|
| 174 |
+
Assignment(x, y + z),
|
| 175 |
+
Assignment(t, x),
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
c2 = CodeBlock.topological_sort(free_assignments)
|
| 179 |
+
assert c2 == CodeBlock(*free_assignments_ordered)
|
| 180 |
+
|
| 181 |
+
def test_CodeBlock_free_symbols():
|
| 182 |
+
c1 = CodeBlock(
|
| 183 |
+
Assignment(x, y + z),
|
| 184 |
+
Assignment(z, 1),
|
| 185 |
+
Assignment(t, x),
|
| 186 |
+
Assignment(y, 2),
|
| 187 |
+
)
|
| 188 |
+
assert c1.free_symbols == set()
|
| 189 |
+
|
| 190 |
+
c2 = CodeBlock(
|
| 191 |
+
Assignment(x, y + z),
|
| 192 |
+
Assignment(z, a * b),
|
| 193 |
+
Assignment(t, x),
|
| 194 |
+
Assignment(y, b + 3),
|
| 195 |
+
)
|
| 196 |
+
assert c2.free_symbols == {a, b}
|
| 197 |
+
|
| 198 |
+
def test_CodeBlock_cse():
|
| 199 |
+
c1 = CodeBlock(
|
| 200 |
+
Assignment(y, 1),
|
| 201 |
+
Assignment(x, sin(y)),
|
| 202 |
+
Assignment(z, sin(y)),
|
| 203 |
+
Assignment(t, x*z),
|
| 204 |
+
)
|
| 205 |
+
assert c1.cse() == CodeBlock(
|
| 206 |
+
Assignment(y, 1),
|
| 207 |
+
Assignment(x0, sin(y)),
|
| 208 |
+
Assignment(x, x0),
|
| 209 |
+
Assignment(z, x0),
|
| 210 |
+
Assignment(t, x*z),
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
# Multiple assignments to same symbol not supported
|
| 214 |
+
raises(NotImplementedError, lambda: CodeBlock(
|
| 215 |
+
Assignment(x, 1),
|
| 216 |
+
Assignment(y, 1), Assignment(y, 2)
|
| 217 |
+
).cse())
|
| 218 |
+
|
| 219 |
+
# Check auto-generated symbols do not collide with existing ones
|
| 220 |
+
c2 = CodeBlock(
|
| 221 |
+
Assignment(x0, sin(y) + 1),
|
| 222 |
+
Assignment(x1, 2 * sin(y)),
|
| 223 |
+
Assignment(z, x * y),
|
| 224 |
+
)
|
| 225 |
+
assert c2.cse() == CodeBlock(
|
| 226 |
+
Assignment(x2, sin(y)),
|
| 227 |
+
Assignment(x0, x2 + 1),
|
| 228 |
+
Assignment(x1, 2 * x2),
|
| 229 |
+
Assignment(z, x * y),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def test_CodeBlock_cse__issue_14118():
|
| 234 |
+
# see https://github.com/sympy/sympy/issues/14118
|
| 235 |
+
c = CodeBlock(
|
| 236 |
+
Assignment(A22, Matrix([[x, sin(y)],[3, 4]])),
|
| 237 |
+
Assignment(B22, Matrix([[sin(y), 2*sin(y)], [sin(y)**2, 7]]))
|
| 238 |
+
)
|
| 239 |
+
assert c.cse() == CodeBlock(
|
| 240 |
+
Assignment(x0, sin(y)),
|
| 241 |
+
Assignment(A22, Matrix([[x, x0],[3, 4]])),
|
| 242 |
+
Assignment(B22, Matrix([[x0, 2*x0], [x0**2, 7]]))
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
def test_For():
|
| 246 |
+
f = For(n, Range(0, 3), (Assignment(A[n, 0], x + n), aug_assign(x, '+', y)))
|
| 247 |
+
f = For(n, (1, 2, 3, 4, 5), (Assignment(A[n, 0], x + n),))
|
| 248 |
+
assert f.func(*f.args) == f
|
| 249 |
+
raises(TypeError, lambda: For(n, x, (x + y,)))
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def test_none():
|
| 253 |
+
assert none.is_Atom
|
| 254 |
+
assert none == none
|
| 255 |
+
class Foo(Token):
|
| 256 |
+
pass
|
| 257 |
+
foo = Foo()
|
| 258 |
+
assert foo != none
|
| 259 |
+
assert none == None
|
| 260 |
+
assert none == NoneToken()
|
| 261 |
+
assert none.func(*none.args) == none
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def test_String():
|
| 265 |
+
st = String('foobar')
|
| 266 |
+
assert st.is_Atom
|
| 267 |
+
assert st == String('foobar')
|
| 268 |
+
assert st.text == 'foobar'
|
| 269 |
+
assert st.func(**st.kwargs()) == st
|
| 270 |
+
assert st.func(*st.args) == st
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class Signifier(String):
|
| 274 |
+
pass
|
| 275 |
+
|
| 276 |
+
si = Signifier('foobar')
|
| 277 |
+
assert si != st
|
| 278 |
+
assert si.text == st.text
|
| 279 |
+
s = String('foo')
|
| 280 |
+
assert str(s) == 'foo'
|
| 281 |
+
assert repr(s) == "String('foo')"
|
| 282 |
+
|
| 283 |
+
def test_Comment():
|
| 284 |
+
c = Comment('foobar')
|
| 285 |
+
assert c.text == 'foobar'
|
| 286 |
+
assert str(c) == 'foobar'
|
| 287 |
+
|
| 288 |
+
def test_Node():
|
| 289 |
+
n = Node()
|
| 290 |
+
assert n == Node()
|
| 291 |
+
assert n.func(*n.args) == n
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def test_Type():
|
| 295 |
+
t = Type('MyType')
|
| 296 |
+
assert len(t.args) == 1
|
| 297 |
+
assert t.name == String('MyType')
|
| 298 |
+
assert str(t) == 'MyType'
|
| 299 |
+
assert repr(t) == "Type(String('MyType'))"
|
| 300 |
+
assert Type(t) == t
|
| 301 |
+
assert t.func(*t.args) == t
|
| 302 |
+
t1 = Type('t1')
|
| 303 |
+
t2 = Type('t2')
|
| 304 |
+
assert t1 != t2
|
| 305 |
+
assert t1 == t1 and t2 == t2
|
| 306 |
+
t1b = Type('t1')
|
| 307 |
+
assert t1 == t1b
|
| 308 |
+
assert t2 != t1b
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def test_Type__from_expr():
|
| 312 |
+
assert Type.from_expr(i) == integer
|
| 313 |
+
u = symbols('u', real=True)
|
| 314 |
+
assert Type.from_expr(u) == real
|
| 315 |
+
assert Type.from_expr(n) == integer
|
| 316 |
+
assert Type.from_expr(3) == integer
|
| 317 |
+
assert Type.from_expr(3.0) == real
|
| 318 |
+
assert Type.from_expr(3+1j) == complex_
|
| 319 |
+
raises(ValueError, lambda: Type.from_expr(sum))
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def test_Type__cast_check__integers():
|
| 323 |
+
# Rounding
|
| 324 |
+
raises(ValueError, lambda: integer.cast_check(3.5))
|
| 325 |
+
assert integer.cast_check('3') == 3
|
| 326 |
+
assert integer.cast_check(Float('3.0000000000000000000')) == 3
|
| 327 |
+
assert integer.cast_check(Float('3.0000000000000000001')) == 3 # unintuitive maybe?
|
| 328 |
+
|
| 329 |
+
# Range
|
| 330 |
+
assert int8.cast_check(127.0) == 127
|
| 331 |
+
raises(ValueError, lambda: int8.cast_check(128))
|
| 332 |
+
assert int8.cast_check(-128) == -128
|
| 333 |
+
raises(ValueError, lambda: int8.cast_check(-129))
|
| 334 |
+
|
| 335 |
+
assert uint8.cast_check(0) == 0
|
| 336 |
+
assert uint8.cast_check(128) == 128
|
| 337 |
+
raises(ValueError, lambda: uint8.cast_check(256.0))
|
| 338 |
+
raises(ValueError, lambda: uint8.cast_check(-1))
|
| 339 |
+
|
| 340 |
+
def test_Attribute():
|
| 341 |
+
noexcept = Attribute('noexcept')
|
| 342 |
+
assert noexcept == Attribute('noexcept')
|
| 343 |
+
alignas16 = Attribute('alignas', [16])
|
| 344 |
+
alignas32 = Attribute('alignas', [32])
|
| 345 |
+
assert alignas16 != alignas32
|
| 346 |
+
assert alignas16.func(*alignas16.args) == alignas16
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def test_Variable():
|
| 350 |
+
v = Variable(x, type=real)
|
| 351 |
+
assert v == Variable(v)
|
| 352 |
+
assert v == Variable('x', type=real)
|
| 353 |
+
assert v.symbol == x
|
| 354 |
+
assert v.type == real
|
| 355 |
+
assert value_const not in v.attrs
|
| 356 |
+
assert v.func(*v.args) == v
|
| 357 |
+
assert str(v) == 'Variable(x, type=real)'
|
| 358 |
+
|
| 359 |
+
w = Variable(y, f32, attrs={value_const})
|
| 360 |
+
assert w.symbol == y
|
| 361 |
+
assert w.type == f32
|
| 362 |
+
assert value_const in w.attrs
|
| 363 |
+
assert w.func(*w.args) == w
|
| 364 |
+
|
| 365 |
+
v_n = Variable(n, type=Type.from_expr(n))
|
| 366 |
+
assert v_n.type == integer
|
| 367 |
+
assert v_n.func(*v_n.args) == v_n
|
| 368 |
+
v_i = Variable(i, type=Type.from_expr(n))
|
| 369 |
+
assert v_i.type == integer
|
| 370 |
+
assert v_i != v_n
|
| 371 |
+
|
| 372 |
+
a_i = Variable.deduced(i)
|
| 373 |
+
assert a_i.type == integer
|
| 374 |
+
assert Variable.deduced(Symbol('x', real=True)).type == real
|
| 375 |
+
assert a_i.func(*a_i.args) == a_i
|
| 376 |
+
|
| 377 |
+
v_n2 = Variable.deduced(n, value=3.5, cast_check=False)
|
| 378 |
+
assert v_n2.func(*v_n2.args) == v_n2
|
| 379 |
+
assert abs(v_n2.value - 3.5) < 1e-15
|
| 380 |
+
raises(ValueError, lambda: Variable.deduced(n, value=3.5, cast_check=True))
|
| 381 |
+
|
| 382 |
+
v_n3 = Variable.deduced(n)
|
| 383 |
+
assert v_n3.type == integer
|
| 384 |
+
assert str(v_n3) == 'Variable(n, type=integer)'
|
| 385 |
+
assert Variable.deduced(z, value=3).type == integer
|
| 386 |
+
assert Variable.deduced(z, value=3.0).type == real
|
| 387 |
+
assert Variable.deduced(z, value=3.0+1j).type == complex_
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def test_Pointer():
|
| 391 |
+
p = Pointer(x)
|
| 392 |
+
assert p.symbol == x
|
| 393 |
+
assert p.type == untyped
|
| 394 |
+
assert value_const not in p.attrs
|
| 395 |
+
assert pointer_const not in p.attrs
|
| 396 |
+
assert p.func(*p.args) == p
|
| 397 |
+
|
| 398 |
+
u = symbols('u', real=True)
|
| 399 |
+
pu = Pointer(u, type=Type.from_expr(u), attrs={value_const, pointer_const})
|
| 400 |
+
assert pu.symbol is u
|
| 401 |
+
assert pu.type == real
|
| 402 |
+
assert value_const in pu.attrs
|
| 403 |
+
assert pointer_const in pu.attrs
|
| 404 |
+
assert pu.func(*pu.args) == pu
|
| 405 |
+
|
| 406 |
+
i = symbols('i', integer=True)
|
| 407 |
+
deref = pu[i]
|
| 408 |
+
assert deref.indices == (i,)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def test_Declaration():
|
| 412 |
+
u = symbols('u', real=True)
|
| 413 |
+
vu = Variable(u, type=Type.from_expr(u))
|
| 414 |
+
assert Declaration(vu).variable.type == real
|
| 415 |
+
vn = Variable(n, type=Type.from_expr(n))
|
| 416 |
+
assert Declaration(vn).variable.type == integer
|
| 417 |
+
|
| 418 |
+
# PR 19107, does not allow comparison between expressions and Basic
|
| 419 |
+
# lt = StrictLessThan(vu, vn)
|
| 420 |
+
# assert isinstance(lt, StrictLessThan)
|
| 421 |
+
|
| 422 |
+
vuc = Variable(u, Type.from_expr(u), value=3.0, attrs={value_const})
|
| 423 |
+
assert value_const in vuc.attrs
|
| 424 |
+
assert pointer_const not in vuc.attrs
|
| 425 |
+
decl = Declaration(vuc)
|
| 426 |
+
assert decl.variable == vuc
|
| 427 |
+
assert isinstance(decl.variable.value, Float)
|
| 428 |
+
assert decl.variable.value == 3.0
|
| 429 |
+
assert decl.func(*decl.args) == decl
|
| 430 |
+
assert vuc.as_Declaration() == decl
|
| 431 |
+
assert vuc.as_Declaration(value=None, attrs=None) == Declaration(vu)
|
| 432 |
+
|
| 433 |
+
vy = Variable(y, type=integer, value=3)
|
| 434 |
+
decl2 = Declaration(vy)
|
| 435 |
+
assert decl2.variable == vy
|
| 436 |
+
assert decl2.variable.value == Integer(3)
|
| 437 |
+
|
| 438 |
+
vi = Variable(i, type=Type.from_expr(i), value=3.0)
|
| 439 |
+
decl3 = Declaration(vi)
|
| 440 |
+
assert decl3.variable.type == integer
|
| 441 |
+
assert decl3.variable.value == 3.0
|
| 442 |
+
|
| 443 |
+
raises(ValueError, lambda: Declaration(vi, 42))
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def test_IntBaseType():
|
| 447 |
+
assert intc.name == String('intc')
|
| 448 |
+
assert intc.args == (intc.name,)
|
| 449 |
+
assert str(IntBaseType('a').name) == 'a'
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def test_FloatType():
|
| 453 |
+
assert f16.dig == 3
|
| 454 |
+
assert f32.dig == 6
|
| 455 |
+
assert f64.dig == 15
|
| 456 |
+
assert f80.dig == 18
|
| 457 |
+
assert f128.dig == 33
|
| 458 |
+
|
| 459 |
+
assert f16.decimal_dig == 5
|
| 460 |
+
assert f32.decimal_dig == 9
|
| 461 |
+
assert f64.decimal_dig == 17
|
| 462 |
+
assert f80.decimal_dig == 21
|
| 463 |
+
assert f128.decimal_dig == 36
|
| 464 |
+
|
| 465 |
+
assert f16.max_exponent == 16
|
| 466 |
+
assert f32.max_exponent == 128
|
| 467 |
+
assert f64.max_exponent == 1024
|
| 468 |
+
assert f80.max_exponent == 16384
|
| 469 |
+
assert f128.max_exponent == 16384
|
| 470 |
+
|
| 471 |
+
assert f16.min_exponent == -13
|
| 472 |
+
assert f32.min_exponent == -125
|
| 473 |
+
assert f64.min_exponent == -1021
|
| 474 |
+
assert f80.min_exponent == -16381
|
| 475 |
+
assert f128.min_exponent == -16381
|
| 476 |
+
|
| 477 |
+
assert abs(f16.eps / Float('0.00097656', precision=16) - 1) < 0.1*10**-f16.dig
|
| 478 |
+
assert abs(f32.eps / Float('1.1920929e-07', precision=32) - 1) < 0.1*10**-f32.dig
|
| 479 |
+
assert abs(f64.eps / Float('2.2204460492503131e-16', precision=64) - 1) < 0.1*10**-f64.dig
|
| 480 |
+
assert abs(f80.eps / Float('1.08420217248550443401e-19', precision=80) - 1) < 0.1*10**-f80.dig
|
| 481 |
+
assert abs(f128.eps / Float(' 1.92592994438723585305597794258492732e-34', precision=128) - 1) < 0.1*10**-f128.dig
|
| 482 |
+
|
| 483 |
+
assert abs(f16.max / Float('65504', precision=16) - 1) < .1*10**-f16.dig
|
| 484 |
+
assert abs(f32.max / Float('3.40282347e+38', precision=32) - 1) < 0.1*10**-f32.dig
|
| 485 |
+
assert abs(f64.max / Float('1.79769313486231571e+308', precision=64) - 1) < 0.1*10**-f64.dig # cf. np.finfo(np.float64).max
|
| 486 |
+
assert abs(f80.max / Float('1.18973149535723176502e+4932', precision=80) - 1) < 0.1*10**-f80.dig
|
| 487 |
+
assert abs(f128.max / Float('1.18973149535723176508575932662800702e+4932', precision=128) - 1) < 0.1*10**-f128.dig
|
| 488 |
+
|
| 489 |
+
# cf. np.finfo(np.float32).tiny
|
| 490 |
+
assert abs(f16.tiny / Float('6.1035e-05', precision=16) - 1) < 0.1*10**-f16.dig
|
| 491 |
+
assert abs(f32.tiny / Float('1.17549435e-38', precision=32) - 1) < 0.1*10**-f32.dig
|
| 492 |
+
assert abs(f64.tiny / Float('2.22507385850720138e-308', precision=64) - 1) < 0.1*10**-f64.dig
|
| 493 |
+
assert abs(f80.tiny / Float('3.36210314311209350626e-4932', precision=80) - 1) < 0.1*10**-f80.dig
|
| 494 |
+
assert abs(f128.tiny / Float('3.3621031431120935062626778173217526e-4932', precision=128) - 1) < 0.1*10**-f128.dig
|
| 495 |
+
|
| 496 |
+
assert f64.cast_check(0.5) == Float(0.5, 17)
|
| 497 |
+
assert abs(f64.cast_check(3.7) - 3.7) < 3e-17
|
| 498 |
+
assert isinstance(f64.cast_check(3), (Float, float))
|
| 499 |
+
|
| 500 |
+
assert f64.cast_nocheck(oo) == float('inf')
|
| 501 |
+
assert f64.cast_nocheck(-oo) == float('-inf')
|
| 502 |
+
assert f64.cast_nocheck(float(oo)) == float('inf')
|
| 503 |
+
assert f64.cast_nocheck(float(-oo)) == float('-inf')
|
| 504 |
+
assert math.isnan(f64.cast_nocheck(nan))
|
| 505 |
+
|
| 506 |
+
assert f32 != f64
|
| 507 |
+
assert f64 == f64.func(*f64.args)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def test_Type__cast_check__floating_point():
|
| 511 |
+
raises(ValueError, lambda: f32.cast_check(123.45678949))
|
| 512 |
+
raises(ValueError, lambda: f32.cast_check(12.345678949))
|
| 513 |
+
raises(ValueError, lambda: f32.cast_check(1.2345678949))
|
| 514 |
+
raises(ValueError, lambda: f32.cast_check(.12345678949))
|
| 515 |
+
assert abs(123.456789049 - f32.cast_check(123.456789049) - 4.9e-8) < 1e-8
|
| 516 |
+
assert abs(0.12345678904 - f32.cast_check(0.12345678904) - 4e-11) < 1e-11
|
| 517 |
+
|
| 518 |
+
dcm21 = Float('0.123456789012345670499') # 21 decimals
|
| 519 |
+
assert abs(dcm21 - f64.cast_check(dcm21) - 4.99e-19) < 1e-19
|
| 520 |
+
|
| 521 |
+
f80.cast_check(Float('0.12345678901234567890103', precision=88))
|
| 522 |
+
raises(ValueError, lambda: f80.cast_check(Float('0.12345678901234567890149', precision=88)))
|
| 523 |
+
|
| 524 |
+
v10 = 12345.67894
|
| 525 |
+
raises(ValueError, lambda: f32.cast_check(v10))
|
| 526 |
+
assert abs(Float(str(v10), precision=64+8) - f64.cast_check(v10)) < v10*1e-16
|
| 527 |
+
|
| 528 |
+
assert abs(f32.cast_check(2147483647) - 2147483650) < 1
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def test_Type__cast_check__complex_floating_point():
|
| 532 |
+
val9_11 = 123.456789049 + 0.123456789049j
|
| 533 |
+
raises(ValueError, lambda: c64.cast_check(.12345678949 + .12345678949j))
|
| 534 |
+
assert abs(val9_11 - c64.cast_check(val9_11) - 4.9e-8) < 1e-8
|
| 535 |
+
|
| 536 |
+
dcm21 = Float('0.123456789012345670499') + 1e-20j # 21 decimals
|
| 537 |
+
assert abs(dcm21 - c128.cast_check(dcm21) - 4.99e-19) < 1e-19
|
| 538 |
+
v19 = Float('0.1234567890123456749') + 1j*Float('0.1234567890123456749')
|
| 539 |
+
raises(ValueError, lambda: c128.cast_check(v19))
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def test_While():
|
| 543 |
+
xpp = AddAugmentedAssignment(x, 1)
|
| 544 |
+
whl1 = While(x < 2, [xpp])
|
| 545 |
+
assert whl1.condition.args[0] == x
|
| 546 |
+
assert whl1.condition.args[1] == 2
|
| 547 |
+
assert whl1.condition == Lt(x, 2, evaluate=False)
|
| 548 |
+
assert whl1.body.args == (xpp,)
|
| 549 |
+
assert whl1.func(*whl1.args) == whl1
|
| 550 |
+
|
| 551 |
+
cblk = CodeBlock(AddAugmentedAssignment(x, 1))
|
| 552 |
+
whl2 = While(x < 2, cblk)
|
| 553 |
+
assert whl1 == whl2
|
| 554 |
+
assert whl1 != While(x < 3, [xpp])
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def test_Scope():
|
| 558 |
+
assign = Assignment(x, y)
|
| 559 |
+
incr = AddAugmentedAssignment(x, 1)
|
| 560 |
+
scp = Scope([assign, incr])
|
| 561 |
+
cblk = CodeBlock(assign, incr)
|
| 562 |
+
assert scp.body == cblk
|
| 563 |
+
assert scp == Scope(cblk)
|
| 564 |
+
assert scp != Scope([incr, assign])
|
| 565 |
+
assert scp.func(*scp.args) == scp
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def test_Print():
|
| 569 |
+
fmt = "%d %.3f"
|
| 570 |
+
ps = Print([n, x], fmt)
|
| 571 |
+
assert str(ps.format_string) == fmt
|
| 572 |
+
assert ps.print_args == Tuple(n, x)
|
| 573 |
+
assert ps.args == (Tuple(n, x), QuotedString(fmt), none)
|
| 574 |
+
assert ps == Print((n, x), fmt)
|
| 575 |
+
assert ps != Print([x, n], fmt)
|
| 576 |
+
assert ps.func(*ps.args) == ps
|
| 577 |
+
|
| 578 |
+
ps2 = Print([n, x])
|
| 579 |
+
assert ps2 == Print([n, x])
|
| 580 |
+
assert ps2 != ps
|
| 581 |
+
assert ps2.format_string == None
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
def test_FunctionPrototype_and_FunctionDefinition():
|
| 585 |
+
vx = Variable(x, type=real)
|
| 586 |
+
vn = Variable(n, type=integer)
|
| 587 |
+
fp1 = FunctionPrototype(real, 'power', [vx, vn])
|
| 588 |
+
assert fp1.return_type == real
|
| 589 |
+
assert fp1.name == String('power')
|
| 590 |
+
assert fp1.parameters == Tuple(vx, vn)
|
| 591 |
+
assert fp1 == FunctionPrototype(real, 'power', [vx, vn])
|
| 592 |
+
assert fp1 != FunctionPrototype(real, 'power', [vn, vx])
|
| 593 |
+
assert fp1.func(*fp1.args) == fp1
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
body = [Assignment(x, x**n), Return(x)]
|
| 597 |
+
fd1 = FunctionDefinition(real, 'power', [vx, vn], body)
|
| 598 |
+
assert fd1.return_type == real
|
| 599 |
+
assert str(fd1.name) == 'power'
|
| 600 |
+
assert fd1.parameters == Tuple(vx, vn)
|
| 601 |
+
assert fd1.body == CodeBlock(*body)
|
| 602 |
+
assert fd1 == FunctionDefinition(real, 'power', [vx, vn], body)
|
| 603 |
+
assert fd1 != FunctionDefinition(real, 'power', [vx, vn], body[::-1])
|
| 604 |
+
assert fd1.func(*fd1.args) == fd1
|
| 605 |
+
|
| 606 |
+
fp2 = FunctionPrototype.from_FunctionDefinition(fd1)
|
| 607 |
+
assert fp2 == fp1
|
| 608 |
+
|
| 609 |
+
fd2 = FunctionDefinition.from_FunctionPrototype(fp1, body)
|
| 610 |
+
assert fd2 == fd1
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
def test_Return():
|
| 614 |
+
rs = Return(x)
|
| 615 |
+
assert rs.args == (x,)
|
| 616 |
+
assert rs == Return(x)
|
| 617 |
+
assert rs != Return(y)
|
| 618 |
+
assert rs.func(*rs.args) == rs
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def test_FunctionCall():
|
| 622 |
+
fc = FunctionCall('power', (x, 3))
|
| 623 |
+
assert fc.function_args[0] == x
|
| 624 |
+
assert fc.function_args[1] == 3
|
| 625 |
+
assert len(fc.function_args) == 2
|
| 626 |
+
assert isinstance(fc.function_args[1], Integer)
|
| 627 |
+
assert fc == FunctionCall('power', (x, 3))
|
| 628 |
+
assert fc != FunctionCall('power', (3, x))
|
| 629 |
+
assert fc != FunctionCall('Power', (x, 3))
|
| 630 |
+
assert fc.func(*fc.args) == fc
|
| 631 |
+
|
| 632 |
+
fc2 = FunctionCall('fma', [2, 3, 4])
|
| 633 |
+
assert len(fc2.function_args) == 3
|
| 634 |
+
assert fc2.function_args[0] == 2
|
| 635 |
+
assert fc2.function_args[1] == 3
|
| 636 |
+
assert fc2.function_args[2] == 4
|
| 637 |
+
assert str(fc2) in ( # not sure if QuotedString is a better default...
|
| 638 |
+
'FunctionCall(fma, function_args=(2, 3, 4))',
|
| 639 |
+
'FunctionCall("fma", function_args=(2, 3, 4))',
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
def test_ast_replace():
|
| 643 |
+
x = Variable('x', real)
|
| 644 |
+
y = Variable('y', real)
|
| 645 |
+
n = Variable('n', integer)
|
| 646 |
+
|
| 647 |
+
pwer = FunctionDefinition(real, 'pwer', [x, n], [pow(x.symbol, n.symbol)])
|
| 648 |
+
pname = pwer.name
|
| 649 |
+
pcall = FunctionCall('pwer', [y, 3])
|
| 650 |
+
|
| 651 |
+
tree1 = CodeBlock(pwer, pcall)
|
| 652 |
+
assert str(tree1.args[0].name) == 'pwer'
|
| 653 |
+
assert str(tree1.args[1].name) == 'pwer'
|
| 654 |
+
for a, b in zip(tree1, [pwer, pcall]):
|
| 655 |
+
assert a == b
|
| 656 |
+
|
| 657 |
+
tree2 = tree1.replace(pname, String('power'))
|
| 658 |
+
assert str(tree1.args[0].name) == 'pwer'
|
| 659 |
+
assert str(tree1.args[1].name) == 'pwer'
|
| 660 |
+
assert str(tree2.args[0].name) == 'power'
|
| 661 |
+
assert str(tree2.args[1].name) == 'power'
|
janus/lib/python3.10/site-packages/sympy/codegen/tests/test_cfunctions.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy.core.numbers import (Rational, pi)
|
| 2 |
+
from sympy.core.singleton import S
|
| 3 |
+
from sympy.core.symbol import (Symbol, symbols)
|
| 4 |
+
from sympy.functions.elementary.exponential import (exp, log)
|
| 5 |
+
from sympy.codegen.cfunctions import (
|
| 6 |
+
expm1, log1p, exp2, log2, fma, log10, Sqrt, Cbrt, hypot
|
| 7 |
+
)
|
| 8 |
+
from sympy.core.function import expand_log
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_expm1():
|
| 12 |
+
# Eval
|
| 13 |
+
assert expm1(0) == 0
|
| 14 |
+
|
| 15 |
+
x = Symbol('x', real=True)
|
| 16 |
+
|
| 17 |
+
# Expand and rewrite
|
| 18 |
+
assert expm1(x).expand(func=True) - exp(x) == -1
|
| 19 |
+
assert expm1(x).rewrite('tractable') - exp(x) == -1
|
| 20 |
+
assert expm1(x).rewrite('exp') - exp(x) == -1
|
| 21 |
+
|
| 22 |
+
# Precision
|
| 23 |
+
assert not ((exp(1e-10).evalf() - 1) - 1e-10 - 5e-21) < 1e-22 # for comparison
|
| 24 |
+
assert abs(expm1(1e-10).evalf() - 1e-10 - 5e-21) < 1e-22
|
| 25 |
+
|
| 26 |
+
# Properties
|
| 27 |
+
assert expm1(x).is_real
|
| 28 |
+
assert expm1(x).is_finite
|
| 29 |
+
|
| 30 |
+
# Diff
|
| 31 |
+
assert expm1(42*x).diff(x) - 42*exp(42*x) == 0
|
| 32 |
+
assert expm1(42*x).diff(x) - expm1(42*x).expand(func=True).diff(x) == 0
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_log1p():
|
| 36 |
+
# Eval
|
| 37 |
+
assert log1p(0) == 0
|
| 38 |
+
d = S(10)
|
| 39 |
+
assert expand_log(log1p(d**-1000) - log(d**1000 + 1) + log(d**1000)) == 0
|
| 40 |
+
|
| 41 |
+
x = Symbol('x', real=True)
|
| 42 |
+
|
| 43 |
+
# Expand and rewrite
|
| 44 |
+
assert log1p(x).expand(func=True) - log(x + 1) == 0
|
| 45 |
+
assert log1p(x).rewrite('tractable') - log(x + 1) == 0
|
| 46 |
+
assert log1p(x).rewrite('log') - log(x + 1) == 0
|
| 47 |
+
|
| 48 |
+
# Precision
|
| 49 |
+
assert not abs(log(1e-99 + 1).evalf() - 1e-99) < 1e-100 # for comparison
|
| 50 |
+
assert abs(expand_log(log1p(1e-99)).evalf() - 1e-99) < 1e-100
|
| 51 |
+
|
| 52 |
+
# Properties
|
| 53 |
+
assert log1p(-2**Rational(-1, 2)).is_real
|
| 54 |
+
|
| 55 |
+
assert not log1p(-1).is_finite
|
| 56 |
+
assert log1p(pi).is_finite
|
| 57 |
+
|
| 58 |
+
assert not log1p(x).is_positive
|
| 59 |
+
assert log1p(Symbol('y', positive=True)).is_positive
|
| 60 |
+
|
| 61 |
+
assert not log1p(x).is_zero
|
| 62 |
+
assert log1p(Symbol('z', zero=True)).is_zero
|
| 63 |
+
|
| 64 |
+
assert not log1p(x).is_nonnegative
|
| 65 |
+
assert log1p(Symbol('o', nonnegative=True)).is_nonnegative
|
| 66 |
+
|
| 67 |
+
# Diff
|
| 68 |
+
assert log1p(42*x).diff(x) - 42/(42*x + 1) == 0
|
| 69 |
+
assert log1p(42*x).diff(x) - log1p(42*x).expand(func=True).diff(x) == 0
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def test_exp2():
|
| 73 |
+
# Eval
|
| 74 |
+
assert exp2(2) == 4
|
| 75 |
+
|
| 76 |
+
x = Symbol('x', real=True)
|
| 77 |
+
|
| 78 |
+
# Expand
|
| 79 |
+
assert exp2(x).expand(func=True) - 2**x == 0
|
| 80 |
+
|
| 81 |
+
# Diff
|
| 82 |
+
assert exp2(42*x).diff(x) - 42*exp2(42*x)*log(2) == 0
|
| 83 |
+
assert exp2(42*x).diff(x) - exp2(42*x).diff(x) == 0
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_log2():
|
| 87 |
+
# Eval
|
| 88 |
+
assert log2(8) == 3
|
| 89 |
+
assert log2(pi) != log(pi)/log(2) # log2 should *save* (CPU) instructions
|
| 90 |
+
|
| 91 |
+
x = Symbol('x', real=True)
|
| 92 |
+
assert log2(x) != log(x)/log(2)
|
| 93 |
+
assert log2(2**x) == x
|
| 94 |
+
|
| 95 |
+
# Expand
|
| 96 |
+
assert log2(x).expand(func=True) - log(x)/log(2) == 0
|
| 97 |
+
|
| 98 |
+
# Diff
|
| 99 |
+
assert log2(42*x).diff() - 1/(log(2)*x) == 0
|
| 100 |
+
assert log2(42*x).diff() - log2(42*x).expand(func=True).diff(x) == 0
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_fma():
|
| 104 |
+
x, y, z = symbols('x y z')
|
| 105 |
+
|
| 106 |
+
# Expand
|
| 107 |
+
assert fma(x, y, z).expand(func=True) - x*y - z == 0
|
| 108 |
+
|
| 109 |
+
expr = fma(17*x, 42*y, 101*z)
|
| 110 |
+
|
| 111 |
+
# Diff
|
| 112 |
+
assert expr.diff(x) - expr.expand(func=True).diff(x) == 0
|
| 113 |
+
assert expr.diff(y) - expr.expand(func=True).diff(y) == 0
|
| 114 |
+
assert expr.diff(z) - expr.expand(func=True).diff(z) == 0
|
| 115 |
+
|
| 116 |
+
assert expr.diff(x) - 17*42*y == 0
|
| 117 |
+
assert expr.diff(y) - 17*42*x == 0
|
| 118 |
+
assert expr.diff(z) - 101 == 0
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_log10():
|
| 122 |
+
x = Symbol('x')
|
| 123 |
+
|
| 124 |
+
# Expand
|
| 125 |
+
assert log10(x).expand(func=True) - log(x)/log(10) == 0
|
| 126 |
+
|
| 127 |
+
# Diff
|
| 128 |
+
assert log10(42*x).diff(x) - 1/(log(10)*x) == 0
|
| 129 |
+
assert log10(42*x).diff(x) - log10(42*x).expand(func=True).diff(x) == 0
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def test_Cbrt():
|
| 133 |
+
x = Symbol('x')
|
| 134 |
+
|
| 135 |
+
# Expand
|
| 136 |
+
assert Cbrt(x).expand(func=True) - x**Rational(1, 3) == 0
|
| 137 |
+
|
| 138 |
+
# Diff
|
| 139 |
+
assert Cbrt(42*x).diff(x) - 42*(42*x)**(Rational(1, 3) - 1)/3 == 0
|
| 140 |
+
assert Cbrt(42*x).diff(x) - Cbrt(42*x).expand(func=True).diff(x) == 0
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_Sqrt():
|
| 144 |
+
x = Symbol('x')
|
| 145 |
+
|
| 146 |
+
# Expand
|
| 147 |
+
assert Sqrt(x).expand(func=True) - x**S.Half == 0
|
| 148 |
+
|
| 149 |
+
# Diff
|
| 150 |
+
assert Sqrt(42*x).diff(x) - 42*(42*x)**(S.Half - 1)/2 == 0
|
| 151 |
+
assert Sqrt(42*x).diff(x) - Sqrt(42*x).expand(func=True).diff(x) == 0
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def test_hypot():
|
| 155 |
+
x, y = symbols('x y')
|
| 156 |
+
|
| 157 |
+
# Expand
|
| 158 |
+
assert hypot(x, y).expand(func=True) - (x**2 + y**2)**S.Half == 0
|
| 159 |
+
|
| 160 |
+
# Diff
|
| 161 |
+
assert hypot(17*x, 42*y).diff(x).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(x) == 0
|
| 162 |
+
assert hypot(17*x, 42*y).diff(y).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(y) == 0
|
| 163 |
+
|
| 164 |
+
assert hypot(17*x, 42*y).diff(x).expand(func=True) - 2*17*17*x*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
|
| 165 |
+
assert hypot(17*x, 42*y).diff(y).expand(func=True) - 2*42*42*y*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
|
janus/lib/python3.10/site-packages/sympy/codegen/tests/test_cnodes.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy.core.symbol import symbols
|
| 2 |
+
from sympy.printing.codeprinter import ccode
|
| 3 |
+
from sympy.codegen.ast import Declaration, Variable, float64, int64, String, CodeBlock
|
| 4 |
+
from sympy.codegen.cnodes import (
|
| 5 |
+
alignof, CommaOperator, goto, Label, PreDecrement, PostDecrement, PreIncrement, PostIncrement,
|
| 6 |
+
sizeof, union, struct
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
x, y = symbols('x y')
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_alignof():
|
| 13 |
+
ax = alignof(x)
|
| 14 |
+
assert ccode(ax) == 'alignof(x)'
|
| 15 |
+
assert ax.func(*ax.args) == ax
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_CommaOperator():
|
| 19 |
+
expr = CommaOperator(PreIncrement(x), 2*x)
|
| 20 |
+
assert ccode(expr) == '(++(x), 2*x)'
|
| 21 |
+
assert expr.func(*expr.args) == expr
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_goto_Label():
|
| 25 |
+
s = 'early_exit'
|
| 26 |
+
g = goto(s)
|
| 27 |
+
assert g.func(*g.args) == g
|
| 28 |
+
assert g != goto('foobar')
|
| 29 |
+
assert ccode(g) == 'goto early_exit'
|
| 30 |
+
|
| 31 |
+
l1 = Label(s)
|
| 32 |
+
assert ccode(l1) == 'early_exit:'
|
| 33 |
+
assert l1 == Label('early_exit')
|
| 34 |
+
assert l1 != Label('foobar')
|
| 35 |
+
|
| 36 |
+
body = [PreIncrement(x)]
|
| 37 |
+
l2 = Label(s, body)
|
| 38 |
+
assert l2.name == String("early_exit")
|
| 39 |
+
assert l2.body == CodeBlock(PreIncrement(x))
|
| 40 |
+
assert ccode(l2) == ("early_exit:\n"
|
| 41 |
+
"++(x);")
|
| 42 |
+
|
| 43 |
+
body = [PreIncrement(x), PreDecrement(y)]
|
| 44 |
+
l2 = Label(s, body)
|
| 45 |
+
assert l2.name == String("early_exit")
|
| 46 |
+
assert l2.body == CodeBlock(PreIncrement(x), PreDecrement(y))
|
| 47 |
+
assert ccode(l2) == ("early_exit:\n"
|
| 48 |
+
"{\n ++(x);\n --(y);\n}")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def test_PreDecrement():
|
| 52 |
+
p = PreDecrement(x)
|
| 53 |
+
assert p.func(*p.args) == p
|
| 54 |
+
assert ccode(p) == '--(x)'
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_PostDecrement():
|
| 58 |
+
p = PostDecrement(x)
|
| 59 |
+
assert p.func(*p.args) == p
|
| 60 |
+
assert ccode(p) == '(x)--'
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_PreIncrement():
|
| 64 |
+
p = PreIncrement(x)
|
| 65 |
+
assert p.func(*p.args) == p
|
| 66 |
+
assert ccode(p) == '++(x)'
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_PostIncrement():
|
| 70 |
+
p = PostIncrement(x)
|
| 71 |
+
assert p.func(*p.args) == p
|
| 72 |
+
assert ccode(p) == '(x)++'
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def test_sizeof():
|
| 76 |
+
typename = 'unsigned int'
|
| 77 |
+
sz = sizeof(typename)
|
| 78 |
+
assert ccode(sz) == 'sizeof(%s)' % typename
|
| 79 |
+
assert sz.func(*sz.args) == sz
|
| 80 |
+
assert not sz.is_Atom
|
| 81 |
+
assert sz.atoms() == {String('unsigned int'), String('sizeof')}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_struct():
|
| 85 |
+
vx, vy = Variable(x, type=float64), Variable(y, type=float64)
|
| 86 |
+
s = struct('vec2', [vx, vy])
|
| 87 |
+
assert s.func(*s.args) == s
|
| 88 |
+
assert s == struct('vec2', (vx, vy))
|
| 89 |
+
assert s != struct('vec2', (vy, vx))
|
| 90 |
+
assert str(s.name) == 'vec2'
|
| 91 |
+
assert len(s.declarations) == 2
|
| 92 |
+
assert all(isinstance(arg, Declaration) for arg in s.declarations)
|
| 93 |
+
assert ccode(s) == (
|
| 94 |
+
"struct vec2 {\n"
|
| 95 |
+
" double x;\n"
|
| 96 |
+
" double y;\n"
|
| 97 |
+
"}")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def test_union():
|
| 101 |
+
vx, vy = Variable(x, type=float64), Variable(y, type=int64)
|
| 102 |
+
u = union('dualuse', [vx, vy])
|
| 103 |
+
assert u.func(*u.args) == u
|
| 104 |
+
assert u == union('dualuse', (vx, vy))
|
| 105 |
+
assert str(u.name) == 'dualuse'
|
| 106 |
+
assert len(u.declarations) == 2
|
| 107 |
+
assert all(isinstance(arg, Declaration) for arg in u.declarations)
|
| 108 |
+
assert ccode(u) == (
|
| 109 |
+
"union dualuse {\n"
|
| 110 |
+
" double x;\n"
|
| 111 |
+
" int64_t y;\n"
|
| 112 |
+
"}")
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc
ADDED
|
Binary file (34.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc
ADDED
|
Binary file (38.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/generators.cpython-310.pyc
ADDED
|
Binary file (8.27 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc
ADDED
|
Binary file (16.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc
ADDED
|
Binary file (8.13 kB). View file
|
|
|