Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so +3 -0
- lib/python3.10/site-packages/numba/cuda/__init__.py +22 -0
- lib/python3.10/site-packages/numba/cuda/api.py +525 -0
- lib/python3.10/site-packages/numba/cuda/api_util.py +30 -0
- lib/python3.10/site-packages/numba/cuda/args.py +77 -0
- lib/python3.10/site-packages/numba/cuda/cg.py +62 -0
- lib/python3.10/site-packages/numba/cuda/codegen.py +378 -0
- lib/python3.10/site-packages/numba/cuda/compiler.py +422 -0
- lib/python3.10/site-packages/numba/cuda/cpp_function_wrappers.cu +47 -0
- lib/python3.10/site-packages/numba/cuda/cuda_fp16.h +0 -0
- lib/python3.10/site-packages/numba/cuda/cuda_fp16.hpp +2465 -0
- lib/python3.10/site-packages/numba/cuda/cuda_paths.py +258 -0
- lib/python3.10/site-packages/numba/cuda/cudadecl.py +806 -0
- lib/python3.10/site-packages/numba/cuda/cudaimpl.py +1055 -0
- lib/python3.10/site-packages/numba/cuda/cudamath.py +140 -0
- lib/python3.10/site-packages/numba/cuda/decorators.py +189 -0
- lib/python3.10/site-packages/numba/cuda/descriptor.py +33 -0
- lib/python3.10/site-packages/numba/cuda/device_init.py +89 -0
- lib/python3.10/site-packages/numba/cuda/deviceufunc.py +908 -0
- lib/python3.10/site-packages/numba/cuda/dispatcher.py +1057 -0
- lib/python3.10/site-packages/numba/cuda/errors.py +59 -0
- lib/python3.10/site-packages/numba/cuda/extending.py +7 -0
- lib/python3.10/site-packages/numba/cuda/initialize.py +13 -0
- lib/python3.10/site-packages/numba/cuda/intrinsic_wrapper.py +77 -0
- lib/python3.10/site-packages/numba/cuda/intrinsics.py +198 -0
- lib/python3.10/site-packages/numba/cuda/libdevice.py +3382 -0
- lib/python3.10/site-packages/numba/cuda/libdevicedecl.py +17 -0
- lib/python3.10/site-packages/numba/cuda/libdevicefuncs.py +1057 -0
- lib/python3.10/site-packages/numba/cuda/libdeviceimpl.py +83 -0
- lib/python3.10/site-packages/numba/cuda/mathimpl.py +448 -0
- lib/python3.10/site-packages/numba/cuda/models.py +48 -0
- lib/python3.10/site-packages/numba/cuda/nvvmutils.py +235 -0
- lib/python3.10/site-packages/numba/cuda/printimpl.py +86 -0
- lib/python3.10/site-packages/numba/cuda/random.py +292 -0
- lib/python3.10/site-packages/numba/cuda/simulator_init.py +17 -0
- lib/python3.10/site-packages/numba/cuda/stubs.py +902 -0
- lib/python3.10/site-packages/numba/cuda/target.py +440 -0
- lib/python3.10/site-packages/numba/cuda/testing.py +202 -0
- lib/python3.10/site-packages/numba/cuda/tests/__init__.py +24 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/__init__.py +0 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/cuda_include.cu +5 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/error.cu +7 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.cu +23 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.ptx +51 -0
- lib/python3.10/site-packages/numba/cuda/tests/data/warn.cu +7 -0
- lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +76 -0
- lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_montecarlo.py +109 -0
- lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_ufunc.py +50 -0
- lib/python3.10/site-packages/numba/cuda/tests/nocuda/__init__.py +8 -0
.gitattributes
CHANGED
|
@@ -91,3 +91,4 @@ lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.s
|
|
| 91 |
lib/python3.10/site-packages/av/data/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 92 |
lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 93 |
lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 91 |
lib/python3.10/site-packages/av/data/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 92 |
lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 93 |
lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08baa2d768f5ab5f417106d6d927bffd6d7f46d05db674e4db0cb0ba38ce2027
|
| 3 |
+
size 753129
|
lib/python3.10/site-packages/numba/cuda/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba import runtests
|
| 2 |
+
from numba.core import config
|
| 3 |
+
|
| 4 |
+
if config.ENABLE_CUDASIM:
|
| 5 |
+
from .simulator_init import *
|
| 6 |
+
else:
|
| 7 |
+
from .device_init import *
|
| 8 |
+
from .device_init import _auto_device
|
| 9 |
+
|
| 10 |
+
from numba.cuda.compiler import (compile, compile_for_current_device,
|
| 11 |
+
compile_ptx, compile_ptx_for_current_device)
|
| 12 |
+
|
| 13 |
+
# Are we the numba.cuda built in to upstream Numba, or the out-of-tree
|
| 14 |
+
# NVIDIA-maintained target?
|
| 15 |
+
implementation = "Built-in"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test(*args, **kwargs):
|
| 19 |
+
if not is_available():
|
| 20 |
+
raise cuda_error()
|
| 21 |
+
|
| 22 |
+
return runtests.main("numba.cuda.tests", *args, **kwargs)
|
lib/python3.10/site-packages/numba/cuda/api.py
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API that are reported to numba.cuda
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import contextlib
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from .cudadrv import devicearray, devices, driver
|
| 12 |
+
from numba.core import config
|
| 13 |
+
from numba.cuda.api_util import prepare_shape_strides_dtype
|
| 14 |
+
|
| 15 |
+
# NDarray device helper
|
| 16 |
+
|
| 17 |
+
require_context = devices.require_context
|
| 18 |
+
current_context = devices.get_context
|
| 19 |
+
gpus = devices.gpus
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@require_context
|
| 23 |
+
def from_cuda_array_interface(desc, owner=None, sync=True):
|
| 24 |
+
"""Create a DeviceNDArray from a cuda-array-interface description.
|
| 25 |
+
The ``owner`` is the owner of the underlying memory.
|
| 26 |
+
The resulting DeviceNDArray will acquire a reference from it.
|
| 27 |
+
|
| 28 |
+
If ``sync`` is ``True``, then the imported stream (if present) will be
|
| 29 |
+
synchronized.
|
| 30 |
+
"""
|
| 31 |
+
version = desc.get('version')
|
| 32 |
+
# Mask introduced in version 1
|
| 33 |
+
if 1 <= version:
|
| 34 |
+
mask = desc.get('mask')
|
| 35 |
+
# Would ideally be better to detect if the mask is all valid
|
| 36 |
+
if mask is not None:
|
| 37 |
+
raise NotImplementedError('Masked arrays are not supported')
|
| 38 |
+
|
| 39 |
+
shape = desc['shape']
|
| 40 |
+
strides = desc.get('strides')
|
| 41 |
+
dtype = np.dtype(desc['typestr'])
|
| 42 |
+
|
| 43 |
+
shape, strides, dtype = prepare_shape_strides_dtype(
|
| 44 |
+
shape, strides, dtype, order='C')
|
| 45 |
+
size = driver.memory_size_from_info(shape, strides, dtype.itemsize)
|
| 46 |
+
|
| 47 |
+
devptr = driver.get_devptr_for_active_ctx(desc['data'][0])
|
| 48 |
+
data = driver.MemoryPointer(
|
| 49 |
+
current_context(), devptr, size=size, owner=owner)
|
| 50 |
+
stream_ptr = desc.get('stream', None)
|
| 51 |
+
if stream_ptr is not None:
|
| 52 |
+
stream = external_stream(stream_ptr)
|
| 53 |
+
if sync and config.CUDA_ARRAY_INTERFACE_SYNC:
|
| 54 |
+
stream.synchronize()
|
| 55 |
+
else:
|
| 56 |
+
stream = 0 # No "Numba default stream", not the CUDA default stream
|
| 57 |
+
da = devicearray.DeviceNDArray(shape=shape, strides=strides,
|
| 58 |
+
dtype=dtype, gpu_data=data,
|
| 59 |
+
stream=stream)
|
| 60 |
+
return da
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def as_cuda_array(obj, sync=True):
|
| 64 |
+
"""Create a DeviceNDArray from any object that implements
|
| 65 |
+
the :ref:`cuda array interface <cuda-array-interface>`.
|
| 66 |
+
|
| 67 |
+
A view of the underlying GPU buffer is created. No copying of the data
|
| 68 |
+
is done. The resulting DeviceNDArray will acquire a reference from `obj`.
|
| 69 |
+
|
| 70 |
+
If ``sync`` is ``True``, then the imported stream (if present) will be
|
| 71 |
+
synchronized.
|
| 72 |
+
"""
|
| 73 |
+
if not is_cuda_array(obj):
|
| 74 |
+
raise TypeError("*obj* doesn't implement the cuda array interface.")
|
| 75 |
+
else:
|
| 76 |
+
return from_cuda_array_interface(obj.__cuda_array_interface__,
|
| 77 |
+
owner=obj, sync=sync)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def is_cuda_array(obj):
|
| 81 |
+
"""Test if the object has defined the `__cuda_array_interface__` attribute.
|
| 82 |
+
|
| 83 |
+
Does not verify the validity of the interface.
|
| 84 |
+
"""
|
| 85 |
+
return hasattr(obj, '__cuda_array_interface__')
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def is_float16_supported():
|
| 89 |
+
"""Whether 16-bit floats are supported.
|
| 90 |
+
|
| 91 |
+
float16 is always supported in current versions of Numba - returns True.
|
| 92 |
+
"""
|
| 93 |
+
return True
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@require_context
|
| 97 |
+
def to_device(obj, stream=0, copy=True, to=None):
|
| 98 |
+
"""to_device(obj, stream=0, copy=True, to=None)
|
| 99 |
+
|
| 100 |
+
Allocate and transfer a numpy ndarray or structured scalar to the device.
|
| 101 |
+
|
| 102 |
+
To copy host->device a numpy array::
|
| 103 |
+
|
| 104 |
+
ary = np.arange(10)
|
| 105 |
+
d_ary = cuda.to_device(ary)
|
| 106 |
+
|
| 107 |
+
To enqueue the transfer to a stream::
|
| 108 |
+
|
| 109 |
+
stream = cuda.stream()
|
| 110 |
+
d_ary = cuda.to_device(ary, stream=stream)
|
| 111 |
+
|
| 112 |
+
The resulting ``d_ary`` is a ``DeviceNDArray``.
|
| 113 |
+
|
| 114 |
+
To copy device->host::
|
| 115 |
+
|
| 116 |
+
hary = d_ary.copy_to_host()
|
| 117 |
+
|
| 118 |
+
To copy device->host to an existing array::
|
| 119 |
+
|
| 120 |
+
ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype)
|
| 121 |
+
d_ary.copy_to_host(ary)
|
| 122 |
+
|
| 123 |
+
To enqueue the transfer to a stream::
|
| 124 |
+
|
| 125 |
+
hary = d_ary.copy_to_host(stream=stream)
|
| 126 |
+
"""
|
| 127 |
+
if to is None:
|
| 128 |
+
to, new = devicearray.auto_device(obj, stream=stream, copy=copy,
|
| 129 |
+
user_explicit=True)
|
| 130 |
+
return to
|
| 131 |
+
if copy:
|
| 132 |
+
to.copy_to_device(obj, stream=stream)
|
| 133 |
+
return to
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@require_context
|
| 137 |
+
def device_array(shape, dtype=np.float64, strides=None, order='C', stream=0):
|
| 138 |
+
"""device_array(shape, dtype=np.float64, strides=None, order='C', stream=0)
|
| 139 |
+
|
| 140 |
+
Allocate an empty device ndarray. Similar to :meth:`numpy.empty`.
|
| 141 |
+
"""
|
| 142 |
+
shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype,
|
| 143 |
+
order)
|
| 144 |
+
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,
|
| 145 |
+
stream=stream)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@require_context
|
| 149 |
+
def managed_array(shape, dtype=np.float64, strides=None, order='C', stream=0,
|
| 150 |
+
attach_global=True):
|
| 151 |
+
"""managed_array(shape, dtype=np.float64, strides=None, order='C', stream=0,
|
| 152 |
+
attach_global=True)
|
| 153 |
+
|
| 154 |
+
Allocate a np.ndarray with a buffer that is managed.
|
| 155 |
+
Similar to np.empty().
|
| 156 |
+
|
| 157 |
+
Managed memory is supported on Linux / x86 and PowerPC, and is considered
|
| 158 |
+
experimental on Windows and Linux / AArch64.
|
| 159 |
+
|
| 160 |
+
:param attach_global: A flag indicating whether to attach globally. Global
|
| 161 |
+
attachment implies that the memory is accessible from
|
| 162 |
+
any stream on any device. If ``False``, attachment is
|
| 163 |
+
*host*, and memory is only accessible by devices
|
| 164 |
+
with Compute Capability 6.0 and later.
|
| 165 |
+
"""
|
| 166 |
+
shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype,
|
| 167 |
+
order)
|
| 168 |
+
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
|
| 169 |
+
buffer = current_context().memallocmanaged(bytesize,
|
| 170 |
+
attach_global=attach_global)
|
| 171 |
+
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
|
| 172 |
+
buffer=buffer)
|
| 173 |
+
managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray)
|
| 174 |
+
managedview.device_setup(buffer, stream=stream)
|
| 175 |
+
return managedview
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@require_context
|
| 179 |
+
def pinned_array(shape, dtype=np.float64, strides=None, order='C'):
|
| 180 |
+
"""pinned_array(shape, dtype=np.float64, strides=None, order='C')
|
| 181 |
+
|
| 182 |
+
Allocate an :class:`ndarray <numpy.ndarray>` with a buffer that is pinned
|
| 183 |
+
(pagelocked). Similar to :func:`np.empty() <numpy.empty>`.
|
| 184 |
+
"""
|
| 185 |
+
shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype,
|
| 186 |
+
order)
|
| 187 |
+
bytesize = driver.memory_size_from_info(shape, strides,
|
| 188 |
+
dtype.itemsize)
|
| 189 |
+
buffer = current_context().memhostalloc(bytesize)
|
| 190 |
+
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
|
| 191 |
+
buffer=buffer)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@require_context
|
| 195 |
+
def mapped_array(shape, dtype=np.float64, strides=None, order='C', stream=0,
|
| 196 |
+
portable=False, wc=False):
|
| 197 |
+
"""mapped_array(shape, dtype=np.float64, strides=None, order='C', stream=0,
|
| 198 |
+
portable=False, wc=False)
|
| 199 |
+
|
| 200 |
+
Allocate a mapped ndarray with a buffer that is pinned and mapped on
|
| 201 |
+
to the device. Similar to np.empty()
|
| 202 |
+
|
| 203 |
+
:param portable: a boolean flag to allow the allocated device memory to be
|
| 204 |
+
usable in multiple devices.
|
| 205 |
+
:param wc: a boolean flag to enable writecombined allocation which is faster
|
| 206 |
+
to write by the host and to read by the device, but slower to
|
| 207 |
+
write by the host and slower to write by the device.
|
| 208 |
+
"""
|
| 209 |
+
shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype,
|
| 210 |
+
order)
|
| 211 |
+
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
|
| 212 |
+
buffer = current_context().memhostalloc(bytesize, mapped=True)
|
| 213 |
+
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
|
| 214 |
+
buffer=buffer)
|
| 215 |
+
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
|
| 216 |
+
mappedview.device_setup(buffer, stream=stream)
|
| 217 |
+
return mappedview
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@contextlib.contextmanager
|
| 221 |
+
@require_context
|
| 222 |
+
def open_ipc_array(handle, shape, dtype, strides=None, offset=0):
|
| 223 |
+
"""
|
| 224 |
+
A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is
|
| 225 |
+
represented as a sequence of bytes (e.g. *bytes*, tuple of int)
|
| 226 |
+
and represent it as an array of the given *shape*, *strides* and *dtype*.
|
| 227 |
+
The *strides* can be omitted. In that case, it is assumed to be a 1D
|
| 228 |
+
C contiguous array.
|
| 229 |
+
|
| 230 |
+
Yields a device array.
|
| 231 |
+
|
| 232 |
+
The IPC handle is closed automatically when context manager exits.
|
| 233 |
+
"""
|
| 234 |
+
dtype = np.dtype(dtype)
|
| 235 |
+
# compute size
|
| 236 |
+
size = np.prod(shape) * dtype.itemsize
|
| 237 |
+
# manually recreate the IPC mem handle
|
| 238 |
+
if driver.USE_NV_BINDING:
|
| 239 |
+
driver_handle = driver.binding.CUipcMemHandle()
|
| 240 |
+
driver_handle.reserved = handle
|
| 241 |
+
else:
|
| 242 |
+
driver_handle = driver.drvapi.cu_ipc_mem_handle(*handle)
|
| 243 |
+
# use *IpcHandle* to open the IPC memory
|
| 244 |
+
ipchandle = driver.IpcHandle(None, driver_handle, size, offset=offset)
|
| 245 |
+
yield ipchandle.open_array(current_context(), shape=shape,
|
| 246 |
+
strides=strides, dtype=dtype)
|
| 247 |
+
ipchandle.close()
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def synchronize():
|
| 251 |
+
"Synchronize the current context."
|
| 252 |
+
return current_context().synchronize()
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _contiguous_strides_like_array(ary):
|
| 256 |
+
"""
|
| 257 |
+
Given an array, compute strides for a new contiguous array of the same
|
| 258 |
+
shape.
|
| 259 |
+
"""
|
| 260 |
+
# Don't recompute strides if the default strides will be sufficient to
|
| 261 |
+
# create a contiguous array.
|
| 262 |
+
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
|
| 263 |
+
return None
|
| 264 |
+
|
| 265 |
+
# Otherwise, we need to compute new strides using an algorithm adapted from
|
| 266 |
+
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
|
| 267 |
+
# core/src/multiarray/ctors.c. We permute the strides in ascending order
|
| 268 |
+
# then compute the stride for the dimensions with the same permutation.
|
| 269 |
+
|
| 270 |
+
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
|
| 271 |
+
# [(1, -2), (0, 4), (2, 12)]
|
| 272 |
+
strideperm = [ x for x in enumerate(ary.strides) ]
|
| 273 |
+
strideperm.sort(key=lambda x: x[1])
|
| 274 |
+
|
| 275 |
+
# Compute new strides using permutation
|
| 276 |
+
strides = [0] * len(ary.strides)
|
| 277 |
+
stride = ary.dtype.itemsize
|
| 278 |
+
for i_perm, _ in strideperm:
|
| 279 |
+
strides[i_perm] = stride
|
| 280 |
+
stride *= ary.shape[i_perm]
|
| 281 |
+
return tuple(strides)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _order_like_array(ary):
|
| 285 |
+
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
|
| 286 |
+
return 'F'
|
| 287 |
+
else:
|
| 288 |
+
return 'C'
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def device_array_like(ary, stream=0):
|
| 292 |
+
"""
|
| 293 |
+
Call :func:`device_array() <numba.cuda.device_array>` with information from
|
| 294 |
+
the array.
|
| 295 |
+
"""
|
| 296 |
+
strides = _contiguous_strides_like_array(ary)
|
| 297 |
+
order = _order_like_array(ary)
|
| 298 |
+
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
|
| 299 |
+
order=order, stream=stream)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def mapped_array_like(ary, stream=0, portable=False, wc=False):
|
| 303 |
+
"""
|
| 304 |
+
Call :func:`mapped_array() <numba.cuda.mapped_array>` with the information
|
| 305 |
+
from the array.
|
| 306 |
+
"""
|
| 307 |
+
strides = _contiguous_strides_like_array(ary)
|
| 308 |
+
order = _order_like_array(ary)
|
| 309 |
+
return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
|
| 310 |
+
order=order, stream=stream, portable=portable, wc=wc)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def pinned_array_like(ary):
|
| 314 |
+
"""
|
| 315 |
+
Call :func:`pinned_array() <numba.cuda.pinned_array>` with the information
|
| 316 |
+
from the array.
|
| 317 |
+
"""
|
| 318 |
+
strides = _contiguous_strides_like_array(ary)
|
| 319 |
+
order = _order_like_array(ary)
|
| 320 |
+
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
|
| 321 |
+
order=order)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
# Stream helper
|
| 325 |
+
@require_context
|
| 326 |
+
def stream():
|
| 327 |
+
"""
|
| 328 |
+
Create a CUDA stream that represents a command queue for the device.
|
| 329 |
+
"""
|
| 330 |
+
return current_context().create_stream()
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
@require_context
|
| 334 |
+
def default_stream():
|
| 335 |
+
"""
|
| 336 |
+
Get the default CUDA stream. CUDA semantics in general are that the default
|
| 337 |
+
stream is either the legacy default stream or the per-thread default stream
|
| 338 |
+
depending on which CUDA APIs are in use. In Numba, the APIs for the legacy
|
| 339 |
+
default stream are always the ones in use, but an option to use APIs for
|
| 340 |
+
the per-thread default stream may be provided in future.
|
| 341 |
+
"""
|
| 342 |
+
return current_context().get_default_stream()
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
@require_context
|
| 346 |
+
def legacy_default_stream():
|
| 347 |
+
"""
|
| 348 |
+
Get the legacy default CUDA stream.
|
| 349 |
+
"""
|
| 350 |
+
return current_context().get_legacy_default_stream()
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
@require_context
|
| 354 |
+
def per_thread_default_stream():
|
| 355 |
+
"""
|
| 356 |
+
Get the per-thread default CUDA stream.
|
| 357 |
+
"""
|
| 358 |
+
return current_context().get_per_thread_default_stream()
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
@require_context
|
| 362 |
+
def external_stream(ptr):
|
| 363 |
+
"""Create a Numba stream object for a stream allocated outside Numba.
|
| 364 |
+
|
| 365 |
+
:param ptr: Pointer to the external stream to wrap in a Numba Stream
|
| 366 |
+
:type ptr: int
|
| 367 |
+
"""
|
| 368 |
+
return current_context().create_external_stream(ptr)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# Page lock
|
| 372 |
+
@require_context
|
| 373 |
+
@contextlib.contextmanager
|
| 374 |
+
def pinned(*arylist):
|
| 375 |
+
"""A context manager for temporary pinning a sequence of host ndarrays.
|
| 376 |
+
"""
|
| 377 |
+
pmlist = []
|
| 378 |
+
for ary in arylist:
|
| 379 |
+
pm = current_context().mempin(ary, driver.host_pointer(ary),
|
| 380 |
+
driver.host_memory_size(ary),
|
| 381 |
+
mapped=False)
|
| 382 |
+
pmlist.append(pm)
|
| 383 |
+
yield
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
@require_context
|
| 387 |
+
@contextlib.contextmanager
|
| 388 |
+
def mapped(*arylist, **kws):
|
| 389 |
+
"""A context manager for temporarily mapping a sequence of host ndarrays.
|
| 390 |
+
"""
|
| 391 |
+
assert not kws or 'stream' in kws, "Only accept 'stream' as keyword."
|
| 392 |
+
stream = kws.get('stream', 0)
|
| 393 |
+
pmlist = []
|
| 394 |
+
devarylist = []
|
| 395 |
+
for ary in arylist:
|
| 396 |
+
pm = current_context().mempin(ary, driver.host_pointer(ary),
|
| 397 |
+
driver.host_memory_size(ary),
|
| 398 |
+
mapped=True)
|
| 399 |
+
pmlist.append(pm)
|
| 400 |
+
devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)
|
| 401 |
+
devarylist.append(devary)
|
| 402 |
+
try:
|
| 403 |
+
if len(devarylist) == 1:
|
| 404 |
+
yield devarylist[0]
|
| 405 |
+
else:
|
| 406 |
+
yield devarylist
|
| 407 |
+
finally:
|
| 408 |
+
# When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name
|
| 409 |
+
# `mapped_arrs` stays in scope, blocking automatic unmapping based on
|
| 410 |
+
# reference count. We therefore invoke the finalizer manually.
|
| 411 |
+
for pm in pmlist:
|
| 412 |
+
pm.free()
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def event(timing=True):
|
| 416 |
+
"""
|
| 417 |
+
Create a CUDA event. Timing data is only recorded by the event if it is
|
| 418 |
+
created with ``timing=True``.
|
| 419 |
+
"""
|
| 420 |
+
evt = current_context().create_event(timing=timing)
|
| 421 |
+
return evt
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
event_elapsed_time = driver.event_elapsed_time
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
# Device selection
|
| 428 |
+
|
| 429 |
+
def select_device(device_id):
|
| 430 |
+
"""
|
| 431 |
+
Make the context associated with device *device_id* the current context.
|
| 432 |
+
|
| 433 |
+
Returns a Device instance.
|
| 434 |
+
|
| 435 |
+
Raises exception on error.
|
| 436 |
+
"""
|
| 437 |
+
context = devices.get_context(device_id)
|
| 438 |
+
return context.device
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def get_current_device():
|
| 442 |
+
"Get current device associated with the current thread"
|
| 443 |
+
return current_context().device
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def list_devices():
|
| 447 |
+
"Return a list of all detected devices"
|
| 448 |
+
return devices.gpus
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def close():
|
| 452 |
+
"""
|
| 453 |
+
Explicitly clears all contexts in the current thread, and destroys all
|
| 454 |
+
contexts if the current thread is the main thread.
|
| 455 |
+
"""
|
| 456 |
+
devices.reset()
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def _auto_device(ary, stream=0, copy=True):
|
| 460 |
+
return devicearray.auto_device(ary, stream=stream, copy=copy)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def detect():
|
| 464 |
+
"""
|
| 465 |
+
Detect supported CUDA hardware and print a summary of the detected hardware.
|
| 466 |
+
|
| 467 |
+
Returns a boolean indicating whether any supported devices were detected.
|
| 468 |
+
"""
|
| 469 |
+
devlist = list_devices()
|
| 470 |
+
print('Found %d CUDA devices' % len(devlist))
|
| 471 |
+
supported_count = 0
|
| 472 |
+
for dev in devlist:
|
| 473 |
+
attrs = []
|
| 474 |
+
cc = dev.compute_capability
|
| 475 |
+
kernel_timeout = dev.KERNEL_EXEC_TIMEOUT
|
| 476 |
+
tcc = dev.TCC_DRIVER
|
| 477 |
+
fp32_to_fp64_ratio = dev.SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO
|
| 478 |
+
attrs += [('Compute Capability', '%d.%d' % cc)]
|
| 479 |
+
attrs += [('PCI Device ID', dev.PCI_DEVICE_ID)]
|
| 480 |
+
attrs += [('PCI Bus ID', dev.PCI_BUS_ID)]
|
| 481 |
+
attrs += [('UUID', dev.uuid)]
|
| 482 |
+
attrs += [('Watchdog', 'Enabled' if kernel_timeout else 'Disabled')]
|
| 483 |
+
if os.name == "nt":
|
| 484 |
+
attrs += [('Compute Mode', 'TCC' if tcc else 'WDDM')]
|
| 485 |
+
attrs += [('FP32/FP64 Performance Ratio', fp32_to_fp64_ratio)]
|
| 486 |
+
if cc < (3, 5):
|
| 487 |
+
support = '[NOT SUPPORTED: CC < 3.5]'
|
| 488 |
+
elif cc < (5, 0):
|
| 489 |
+
support = '[SUPPORTED (DEPRECATED)]'
|
| 490 |
+
supported_count += 1
|
| 491 |
+
else:
|
| 492 |
+
support = '[SUPPORTED]'
|
| 493 |
+
supported_count += 1
|
| 494 |
+
|
| 495 |
+
print('id %d %20s %40s' % (dev.id, dev.name, support))
|
| 496 |
+
for key, val in attrs:
|
| 497 |
+
print('%40s: %s' % (key, val))
|
| 498 |
+
|
| 499 |
+
print('Summary:')
|
| 500 |
+
print('\t%d/%d devices are supported' % (supported_count, len(devlist)))
|
| 501 |
+
return supported_count > 0
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@contextlib.contextmanager
|
| 505 |
+
def defer_cleanup():
|
| 506 |
+
"""
|
| 507 |
+
Temporarily disable memory deallocation.
|
| 508 |
+
Use this to prevent resource deallocation breaking asynchronous execution.
|
| 509 |
+
|
| 510 |
+
For example::
|
| 511 |
+
|
| 512 |
+
with defer_cleanup():
|
| 513 |
+
# all cleanup is deferred in here
|
| 514 |
+
do_speed_critical_code()
|
| 515 |
+
# cleanup can occur here
|
| 516 |
+
|
| 517 |
+
Note: this context manager can be nested.
|
| 518 |
+
"""
|
| 519 |
+
with current_context().defer_cleanup():
|
| 520 |
+
yield
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
profiling = require_context(driver.profiling)
|
| 524 |
+
profile_start = require_context(driver.profile_start)
|
| 525 |
+
profile_stop = require_context(driver.profile_stop)
|
lib/python3.10/site-packages/numba/cuda/api_util.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def prepare_shape_strides_dtype(shape, strides, dtype, order):
|
| 5 |
+
dtype = np.dtype(dtype)
|
| 6 |
+
if isinstance(shape, int):
|
| 7 |
+
shape = (shape,)
|
| 8 |
+
if isinstance(strides, int):
|
| 9 |
+
strides = (strides,)
|
| 10 |
+
else:
|
| 11 |
+
strides = strides or _fill_stride_by_order(shape, dtype, order)
|
| 12 |
+
return shape, strides, dtype
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _fill_stride_by_order(shape, dtype, order):
|
| 16 |
+
nd = len(shape)
|
| 17 |
+
if nd == 0:
|
| 18 |
+
return ()
|
| 19 |
+
strides = [0] * nd
|
| 20 |
+
if order == 'C':
|
| 21 |
+
strides[-1] = dtype.itemsize
|
| 22 |
+
for d in reversed(range(nd - 1)):
|
| 23 |
+
strides[d] = strides[d + 1] * shape[d + 1]
|
| 24 |
+
elif order == 'F':
|
| 25 |
+
strides[0] = dtype.itemsize
|
| 26 |
+
for d in range(1, nd):
|
| 27 |
+
strides[d] = strides[d - 1] * shape[d - 1]
|
| 28 |
+
else:
|
| 29 |
+
raise ValueError('must be either C/F order')
|
| 30 |
+
return tuple(strides)
|
lib/python3.10/site-packages/numba/cuda/args.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hints to wrap Kernel arguments to indicate how to manage host-device
|
| 3 |
+
memory transfers before & after the kernel call.
|
| 4 |
+
"""
|
| 5 |
+
import abc
|
| 6 |
+
|
| 7 |
+
from numba.core.typing.typeof import typeof, Purpose
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ArgHint(metaclass=abc.ABCMeta):
|
| 11 |
+
def __init__(self, value):
|
| 12 |
+
self.value = value
|
| 13 |
+
|
| 14 |
+
@abc.abstractmethod
|
| 15 |
+
def to_device(self, retr, stream=0):
|
| 16 |
+
"""
|
| 17 |
+
:param stream: a stream to use when copying data
|
| 18 |
+
:param retr:
|
| 19 |
+
a list of clean-up work to do after the kernel's been run.
|
| 20 |
+
Append 0-arg lambdas to it!
|
| 21 |
+
:return: a value (usually an `DeviceNDArray`) to be passed to
|
| 22 |
+
the kernel
|
| 23 |
+
"""
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def _numba_type_(self):
|
| 28 |
+
return typeof(self.value, Purpose.argument)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class In(ArgHint):
|
| 32 |
+
def to_device(self, retr, stream=0):
|
| 33 |
+
from .cudadrv.devicearray import auto_device
|
| 34 |
+
devary, _ = auto_device(
|
| 35 |
+
self.value,
|
| 36 |
+
stream=stream)
|
| 37 |
+
# A dummy writeback functor to keep devary alive until the kernel
|
| 38 |
+
# is called.
|
| 39 |
+
retr.append(lambda: devary)
|
| 40 |
+
return devary
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class Out(ArgHint):
|
| 44 |
+
def to_device(self, retr, stream=0):
|
| 45 |
+
from .cudadrv.devicearray import auto_device
|
| 46 |
+
devary, conv = auto_device(
|
| 47 |
+
self.value,
|
| 48 |
+
copy=False,
|
| 49 |
+
stream=stream)
|
| 50 |
+
if conv:
|
| 51 |
+
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
|
| 52 |
+
return devary
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class InOut(ArgHint):
|
| 56 |
+
def to_device(self, retr, stream=0):
|
| 57 |
+
from .cudadrv.devicearray import auto_device
|
| 58 |
+
devary, conv = auto_device(
|
| 59 |
+
self.value,
|
| 60 |
+
stream=stream)
|
| 61 |
+
if conv:
|
| 62 |
+
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
|
| 63 |
+
return devary
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def wrap_arg(value, default=InOut):
|
| 67 |
+
return value if isinstance(value, ArgHint) else default(value)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
__all__ = [
|
| 71 |
+
'In',
|
| 72 |
+
'Out',
|
| 73 |
+
'InOut',
|
| 74 |
+
|
| 75 |
+
'ArgHint',
|
| 76 |
+
'wrap_arg',
|
| 77 |
+
]
|
lib/python3.10/site-packages/numba/cuda/cg.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.core import types
|
| 2 |
+
from numba.core.extending import overload, overload_method
|
| 3 |
+
from numba.core.typing import signature
|
| 4 |
+
from numba.cuda import nvvmutils
|
| 5 |
+
from numba.cuda.extending import intrinsic
|
| 6 |
+
from numba.cuda.types import grid_group, GridGroup as GridGroupClass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class GridGroup:
|
| 10 |
+
"""A cooperative group representing the entire grid"""
|
| 11 |
+
|
| 12 |
+
def sync() -> None:
|
| 13 |
+
"""Synchronize this grid group"""
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def this_grid() -> GridGroup:
|
| 17 |
+
"""Get the current grid group."""
|
| 18 |
+
return GridGroup()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@intrinsic
|
| 22 |
+
def _this_grid(typingctx):
|
| 23 |
+
sig = signature(grid_group)
|
| 24 |
+
|
| 25 |
+
def codegen(context, builder, sig, args):
|
| 26 |
+
one = context.get_constant(types.int32, 1)
|
| 27 |
+
mod = builder.module
|
| 28 |
+
return builder.call(
|
| 29 |
+
nvvmutils.declare_cudaCGGetIntrinsicHandle(mod),
|
| 30 |
+
(one,))
|
| 31 |
+
|
| 32 |
+
return sig, codegen
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@overload(this_grid, target='cuda')
|
| 36 |
+
def _ol_this_grid():
|
| 37 |
+
def impl():
|
| 38 |
+
return _this_grid()
|
| 39 |
+
|
| 40 |
+
return impl
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@intrinsic
|
| 44 |
+
def _grid_group_sync(typingctx, group):
|
| 45 |
+
sig = signature(types.int32, group)
|
| 46 |
+
|
| 47 |
+
def codegen(context, builder, sig, args):
|
| 48 |
+
flags = context.get_constant(types.int32, 0)
|
| 49 |
+
mod = builder.module
|
| 50 |
+
return builder.call(
|
| 51 |
+
nvvmutils.declare_cudaCGSynchronize(mod),
|
| 52 |
+
(*args, flags))
|
| 53 |
+
|
| 54 |
+
return sig, codegen
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@overload_method(GridGroupClass, 'sync', target='cuda')
|
| 58 |
+
def _ol_grid_group_sync(group):
|
| 59 |
+
def impl(group):
|
| 60 |
+
return _grid_group_sync(group)
|
| 61 |
+
|
| 62 |
+
return impl
|
lib/python3.10/site-packages/numba/cuda/codegen.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llvmlite import ir
|
| 2 |
+
|
| 3 |
+
from numba.core import config, serialize
|
| 4 |
+
from numba.core.codegen import Codegen, CodeLibrary
|
| 5 |
+
from .cudadrv import devices, driver, nvvm, runtime
|
| 6 |
+
from numba.cuda.cudadrv.libs import get_cudalib
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import subprocess
|
| 10 |
+
import tempfile
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
CUDA_TRIPLE = 'nvptx64-nvidia-cuda'
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def run_nvdisasm(cubin, flags):
|
| 17 |
+
# nvdisasm only accepts input from a file, so we need to write out to a
|
| 18 |
+
# temp file and clean up afterwards.
|
| 19 |
+
fd = None
|
| 20 |
+
fname = None
|
| 21 |
+
try:
|
| 22 |
+
fd, fname = tempfile.mkstemp()
|
| 23 |
+
with open(fname, 'wb') as f:
|
| 24 |
+
f.write(cubin)
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
cp = subprocess.run(['nvdisasm', *flags, fname], check=True,
|
| 28 |
+
stdout=subprocess.PIPE,
|
| 29 |
+
stderr=subprocess.PIPE)
|
| 30 |
+
except FileNotFoundError as e:
|
| 31 |
+
msg = ("nvdisasm has not been found. You may need "
|
| 32 |
+
"to install the CUDA toolkit and ensure that "
|
| 33 |
+
"it is available on your PATH.\n")
|
| 34 |
+
raise RuntimeError(msg) from e
|
| 35 |
+
return cp.stdout.decode('utf-8')
|
| 36 |
+
finally:
|
| 37 |
+
if fd is not None:
|
| 38 |
+
os.close(fd)
|
| 39 |
+
if fname is not None:
|
| 40 |
+
os.unlink(fname)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def disassemble_cubin(cubin):
|
| 44 |
+
# Request lineinfo in disassembly
|
| 45 |
+
flags = ['-gi']
|
| 46 |
+
return run_nvdisasm(cubin, flags)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def disassemble_cubin_for_cfg(cubin):
|
| 50 |
+
# Request control flow graph in disassembly
|
| 51 |
+
flags = ['-cfg']
|
| 52 |
+
return run_nvdisasm(cubin, flags)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class CUDACodeLibrary(serialize.ReduceMixin, CodeLibrary):
|
| 56 |
+
"""
|
| 57 |
+
The CUDACodeLibrary generates PTX, SASS, cubins for multiple different
|
| 58 |
+
compute capabilities. It also loads cubins to multiple devices (via
|
| 59 |
+
get_cufunc), which may be of different compute capabilities.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(self, codegen, name, entry_name=None, max_registers=None,
|
| 63 |
+
nvvm_options=None):
|
| 64 |
+
"""
|
| 65 |
+
codegen:
|
| 66 |
+
Codegen object.
|
| 67 |
+
name:
|
| 68 |
+
Name of the function in the source.
|
| 69 |
+
entry_name:
|
| 70 |
+
Name of the kernel function in the binary, if this is a global
|
| 71 |
+
kernel and not a device function.
|
| 72 |
+
max_registers:
|
| 73 |
+
The maximum register usage to aim for when linking.
|
| 74 |
+
nvvm_options:
|
| 75 |
+
Dict of options to pass to NVVM.
|
| 76 |
+
"""
|
| 77 |
+
super().__init__(codegen, name)
|
| 78 |
+
|
| 79 |
+
# The llvmlite module for this library.
|
| 80 |
+
self._module = None
|
| 81 |
+
# CodeLibrary objects that will be "linked" into this library. The
|
| 82 |
+
# modules within them are compiled from NVVM IR to PTX along with the
|
| 83 |
+
# IR from this module - in that sense they are "linked" by NVVM at PTX
|
| 84 |
+
# generation time, rather than at link time.
|
| 85 |
+
self._linking_libraries = set()
|
| 86 |
+
# Files to link with the generated PTX. These are linked using the
|
| 87 |
+
# Driver API at link time.
|
| 88 |
+
self._linking_files = set()
|
| 89 |
+
# Should we link libcudadevrt?
|
| 90 |
+
self.needs_cudadevrt = False
|
| 91 |
+
|
| 92 |
+
# Cache the LLVM IR string
|
| 93 |
+
self._llvm_strs = None
|
| 94 |
+
# Maps CC -> PTX string
|
| 95 |
+
self._ptx_cache = {}
|
| 96 |
+
# Maps CC -> LTO-IR
|
| 97 |
+
self._ltoir_cache = {}
|
| 98 |
+
# Maps CC -> cubin
|
| 99 |
+
self._cubin_cache = {}
|
| 100 |
+
# Maps CC -> linker info output for cubin
|
| 101 |
+
self._linkerinfo_cache = {}
|
| 102 |
+
# Maps Device numeric ID -> cufunc
|
| 103 |
+
self._cufunc_cache = {}
|
| 104 |
+
|
| 105 |
+
self._max_registers = max_registers
|
| 106 |
+
if nvvm_options is None:
|
| 107 |
+
nvvm_options = {}
|
| 108 |
+
self._nvvm_options = nvvm_options
|
| 109 |
+
self._entry_name = entry_name
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def llvm_strs(self):
|
| 113 |
+
if self._llvm_strs is None:
|
| 114 |
+
self._llvm_strs = [str(mod) for mod in self.modules]
|
| 115 |
+
return self._llvm_strs
|
| 116 |
+
|
| 117 |
+
def get_llvm_str(self):
|
| 118 |
+
return "\n\n".join(self.llvm_strs)
|
| 119 |
+
|
| 120 |
+
def _ensure_cc(self, cc):
|
| 121 |
+
if cc is not None:
|
| 122 |
+
return cc
|
| 123 |
+
|
| 124 |
+
device = devices.get_context().device
|
| 125 |
+
return device.compute_capability
|
| 126 |
+
|
| 127 |
+
def get_asm_str(self, cc=None):
|
| 128 |
+
cc = self._ensure_cc(cc)
|
| 129 |
+
|
| 130 |
+
ptxes = self._ptx_cache.get(cc, None)
|
| 131 |
+
if ptxes:
|
| 132 |
+
return ptxes
|
| 133 |
+
|
| 134 |
+
arch = nvvm.get_arch_option(*cc)
|
| 135 |
+
options = self._nvvm_options.copy()
|
| 136 |
+
options['arch'] = arch
|
| 137 |
+
|
| 138 |
+
irs = self.llvm_strs
|
| 139 |
+
|
| 140 |
+
ptx = nvvm.compile_ir(irs, **options)
|
| 141 |
+
|
| 142 |
+
# Sometimes the result from NVVM contains trailing whitespace and
|
| 143 |
+
# nulls, which we strip so that the assembly dump looks a little
|
| 144 |
+
# tidier.
|
| 145 |
+
ptx = ptx.decode().strip('\x00').strip()
|
| 146 |
+
|
| 147 |
+
if config.DUMP_ASSEMBLY:
|
| 148 |
+
print(("ASSEMBLY %s" % self._name).center(80, '-'))
|
| 149 |
+
print(ptx)
|
| 150 |
+
print('=' * 80)
|
| 151 |
+
|
| 152 |
+
self._ptx_cache[cc] = ptx
|
| 153 |
+
|
| 154 |
+
return ptx
|
| 155 |
+
|
| 156 |
+
def get_ltoir(self, cc=None):
|
| 157 |
+
cc = self._ensure_cc(cc)
|
| 158 |
+
|
| 159 |
+
ltoir = self._ltoir_cache.get(cc, None)
|
| 160 |
+
if ltoir is not None:
|
| 161 |
+
return ltoir
|
| 162 |
+
|
| 163 |
+
arch = nvvm.get_arch_option(*cc)
|
| 164 |
+
options = self._nvvm_options.copy()
|
| 165 |
+
options['arch'] = arch
|
| 166 |
+
options['gen-lto'] = None
|
| 167 |
+
|
| 168 |
+
irs = self.llvm_strs
|
| 169 |
+
ltoir = nvvm.compile_ir(irs, **options)
|
| 170 |
+
self._ltoir_cache[cc] = ltoir
|
| 171 |
+
|
| 172 |
+
return ltoir
|
| 173 |
+
|
| 174 |
+
def get_cubin(self, cc=None):
|
| 175 |
+
cc = self._ensure_cc(cc)
|
| 176 |
+
|
| 177 |
+
cubin = self._cubin_cache.get(cc, None)
|
| 178 |
+
if cubin:
|
| 179 |
+
return cubin
|
| 180 |
+
|
| 181 |
+
linker = driver.Linker.new(max_registers=self._max_registers, cc=cc)
|
| 182 |
+
|
| 183 |
+
if linker.lto:
|
| 184 |
+
ltoir = self.get_ltoir(cc=cc)
|
| 185 |
+
linker.add_ltoir(ltoir)
|
| 186 |
+
else:
|
| 187 |
+
ptx = self.get_asm_str(cc=cc)
|
| 188 |
+
linker.add_ptx(ptx.encode())
|
| 189 |
+
|
| 190 |
+
for path in self._linking_files:
|
| 191 |
+
linker.add_file_guess_ext(path)
|
| 192 |
+
if self.needs_cudadevrt:
|
| 193 |
+
linker.add_file_guess_ext(get_cudalib('cudadevrt', static=True))
|
| 194 |
+
|
| 195 |
+
cubin = linker.complete()
|
| 196 |
+
self._cubin_cache[cc] = cubin
|
| 197 |
+
self._linkerinfo_cache[cc] = linker.info_log
|
| 198 |
+
|
| 199 |
+
return cubin
|
| 200 |
+
|
| 201 |
+
def get_cufunc(self):
|
| 202 |
+
if self._entry_name is None:
|
| 203 |
+
msg = "Missing entry_name - are you trying to get the cufunc " \
|
| 204 |
+
"for a device function?"
|
| 205 |
+
raise RuntimeError(msg)
|
| 206 |
+
|
| 207 |
+
ctx = devices.get_context()
|
| 208 |
+
device = ctx.device
|
| 209 |
+
|
| 210 |
+
cufunc = self._cufunc_cache.get(device.id, None)
|
| 211 |
+
if cufunc:
|
| 212 |
+
return cufunc
|
| 213 |
+
|
| 214 |
+
cubin = self.get_cubin(cc=device.compute_capability)
|
| 215 |
+
module = ctx.create_module_image(cubin)
|
| 216 |
+
|
| 217 |
+
# Load
|
| 218 |
+
cufunc = module.get_function(self._entry_name)
|
| 219 |
+
|
| 220 |
+
# Populate caches
|
| 221 |
+
self._cufunc_cache[device.id] = cufunc
|
| 222 |
+
|
| 223 |
+
return cufunc
|
| 224 |
+
|
| 225 |
+
def get_linkerinfo(self, cc):
|
| 226 |
+
try:
|
| 227 |
+
return self._linkerinfo_cache[cc]
|
| 228 |
+
except KeyError:
|
| 229 |
+
raise KeyError(f'No linkerinfo for CC {cc}')
|
| 230 |
+
|
| 231 |
+
def get_sass(self, cc=None):
|
| 232 |
+
return disassemble_cubin(self.get_cubin(cc=cc))
|
| 233 |
+
|
| 234 |
+
def get_sass_cfg(self, cc=None):
|
| 235 |
+
return disassemble_cubin_for_cfg(self.get_cubin(cc=cc))
|
| 236 |
+
|
| 237 |
+
def add_ir_module(self, mod):
|
| 238 |
+
self._raise_if_finalized()
|
| 239 |
+
if self._module is not None:
|
| 240 |
+
raise RuntimeError('CUDACodeLibrary only supports one module')
|
| 241 |
+
self._module = mod
|
| 242 |
+
|
| 243 |
+
def add_linking_library(self, library):
|
| 244 |
+
library._ensure_finalized()
|
| 245 |
+
|
| 246 |
+
# We don't want to allow linking more libraries in after finalization
|
| 247 |
+
# because our linked libraries are modified by the finalization, and we
|
| 248 |
+
# won't be able to finalize again after adding new ones
|
| 249 |
+
self._raise_if_finalized()
|
| 250 |
+
|
| 251 |
+
self._linking_libraries.add(library)
|
| 252 |
+
|
| 253 |
+
def add_linking_file(self, filepath):
|
| 254 |
+
self._linking_files.add(filepath)
|
| 255 |
+
|
| 256 |
+
def get_function(self, name):
|
| 257 |
+
for fn in self._module.functions:
|
| 258 |
+
if fn.name == name:
|
| 259 |
+
return fn
|
| 260 |
+
raise KeyError(f'Function {name} not found')
|
| 261 |
+
|
| 262 |
+
@property
|
| 263 |
+
def modules(self):
|
| 264 |
+
return [self._module] + [mod for lib in self._linking_libraries
|
| 265 |
+
for mod in lib.modules]
|
| 266 |
+
|
| 267 |
+
@property
|
| 268 |
+
def linking_libraries(self):
|
| 269 |
+
# Libraries we link to may link to other libraries, so we recursively
|
| 270 |
+
# traverse the linking libraries property to build up a list of all
|
| 271 |
+
# linked libraries.
|
| 272 |
+
libs = []
|
| 273 |
+
for lib in self._linking_libraries:
|
| 274 |
+
libs.extend(lib.linking_libraries)
|
| 275 |
+
libs.append(lib)
|
| 276 |
+
return libs
|
| 277 |
+
|
| 278 |
+
def finalize(self):
|
| 279 |
+
# Unlike the CPUCodeLibrary, we don't invoke the binding layer here -
|
| 280 |
+
# we only adjust the linkage of functions. Global kernels (with
|
| 281 |
+
# external linkage) have their linkage untouched. Device functions are
|
| 282 |
+
# set linkonce_odr to prevent them appearing in the PTX.
|
| 283 |
+
|
| 284 |
+
self._raise_if_finalized()
|
| 285 |
+
|
| 286 |
+
# Note in-place modification of the linkage of functions in linked
|
| 287 |
+
# libraries. This presently causes no issues as only device functions
|
| 288 |
+
# are shared across code libraries, so they would always need their
|
| 289 |
+
# linkage set to linkonce_odr. If in a future scenario some code
|
| 290 |
+
# libraries require linkonce_odr linkage of functions in linked
|
| 291 |
+
# modules, and another code library requires another linkage, each code
|
| 292 |
+
# library will need to take its own private copy of its linked modules.
|
| 293 |
+
#
|
| 294 |
+
# See also discussion on PR #890:
|
| 295 |
+
# https://github.com/numba/numba/pull/890
|
| 296 |
+
for library in self._linking_libraries:
|
| 297 |
+
for mod in library.modules:
|
| 298 |
+
for fn in mod.functions:
|
| 299 |
+
if not fn.is_declaration:
|
| 300 |
+
fn.linkage = 'linkonce_odr'
|
| 301 |
+
|
| 302 |
+
self._finalized = True
|
| 303 |
+
|
| 304 |
+
def _reduce_states(self):
|
| 305 |
+
"""
|
| 306 |
+
Reduce the instance for serialization. We retain the PTX and cubins,
|
| 307 |
+
but loaded functions are discarded. They are recreated when needed
|
| 308 |
+
after deserialization.
|
| 309 |
+
"""
|
| 310 |
+
if self._linking_files:
|
| 311 |
+
msg = 'Cannot pickle CUDACodeLibrary with linking files'
|
| 312 |
+
raise RuntimeError(msg)
|
| 313 |
+
if not self._finalized:
|
| 314 |
+
raise RuntimeError('Cannot pickle unfinalized CUDACodeLibrary')
|
| 315 |
+
return dict(
|
| 316 |
+
codegen=None,
|
| 317 |
+
name=self.name,
|
| 318 |
+
entry_name=self._entry_name,
|
| 319 |
+
llvm_strs=self.llvm_strs,
|
| 320 |
+
ptx_cache=self._ptx_cache,
|
| 321 |
+
cubin_cache=self._cubin_cache,
|
| 322 |
+
linkerinfo_cache=self._linkerinfo_cache,
|
| 323 |
+
max_registers=self._max_registers,
|
| 324 |
+
nvvm_options=self._nvvm_options,
|
| 325 |
+
needs_cudadevrt=self.needs_cudadevrt
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
@classmethod
|
| 329 |
+
def _rebuild(cls, codegen, name, entry_name, llvm_strs, ptx_cache,
|
| 330 |
+
cubin_cache, linkerinfo_cache, max_registers, nvvm_options,
|
| 331 |
+
needs_cudadevrt):
|
| 332 |
+
"""
|
| 333 |
+
Rebuild an instance.
|
| 334 |
+
"""
|
| 335 |
+
instance = cls(codegen, name, entry_name=entry_name)
|
| 336 |
+
|
| 337 |
+
instance._llvm_strs = llvm_strs
|
| 338 |
+
instance._ptx_cache = ptx_cache
|
| 339 |
+
instance._cubin_cache = cubin_cache
|
| 340 |
+
instance._linkerinfo_cache = linkerinfo_cache
|
| 341 |
+
|
| 342 |
+
instance._max_registers = max_registers
|
| 343 |
+
instance._nvvm_options = nvvm_options
|
| 344 |
+
instance.needs_cudadevrt = needs_cudadevrt
|
| 345 |
+
|
| 346 |
+
instance._finalized = True
|
| 347 |
+
|
| 348 |
+
return instance
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class JITCUDACodegen(Codegen):
|
| 352 |
+
"""
|
| 353 |
+
This codegen implementation for CUDA only generates optimized LLVM IR.
|
| 354 |
+
Generation of PTX code is done separately (see numba.cuda.compiler).
|
| 355 |
+
"""
|
| 356 |
+
|
| 357 |
+
_library_class = CUDACodeLibrary
|
| 358 |
+
|
| 359 |
+
def __init__(self, module_name):
|
| 360 |
+
pass
|
| 361 |
+
|
| 362 |
+
def _create_empty_module(self, name):
|
| 363 |
+
ir_module = ir.Module(name)
|
| 364 |
+
ir_module.triple = CUDA_TRIPLE
|
| 365 |
+
ir_module.data_layout = nvvm.NVVM().data_layout
|
| 366 |
+
nvvm.add_ir_version(ir_module)
|
| 367 |
+
return ir_module
|
| 368 |
+
|
| 369 |
+
def _add_module(self, module):
|
| 370 |
+
pass
|
| 371 |
+
|
| 372 |
+
def magic_tuple(self):
|
| 373 |
+
"""
|
| 374 |
+
Return a tuple unambiguously describing the codegen behaviour.
|
| 375 |
+
"""
|
| 376 |
+
ctx = devices.get_context()
|
| 377 |
+
cc = ctx.device.compute_capability
|
| 378 |
+
return (runtime.runtime.get_version(), cc)
|
lib/python3.10/site-packages/numba/cuda/compiler.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llvmlite import ir
|
| 2 |
+
from numba.core.typing.templates import ConcreteTemplate
|
| 3 |
+
from numba.core import types, typing, funcdesc, config, compiler, sigutils
|
| 4 |
+
from numba.core.compiler import (sanitize_compile_result_entries, CompilerBase,
|
| 5 |
+
DefaultPassBuilder, Flags, Option,
|
| 6 |
+
CompileResult)
|
| 7 |
+
from numba.core.compiler_lock import global_compiler_lock
|
| 8 |
+
from numba.core.compiler_machinery import (LoweringPass,
|
| 9 |
+
PassManager, register_pass)
|
| 10 |
+
from numba.core.errors import NumbaInvalidConfigWarning
|
| 11 |
+
from numba.core.typed_passes import (IRLegalization, NativeLowering,
|
| 12 |
+
AnnotateTypes)
|
| 13 |
+
from warnings import warn
|
| 14 |
+
from numba.cuda.api import get_current_device
|
| 15 |
+
from numba.cuda.target import CUDACABICallConv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _nvvm_options_type(x):
|
| 19 |
+
if x is None:
|
| 20 |
+
return None
|
| 21 |
+
|
| 22 |
+
else:
|
| 23 |
+
assert isinstance(x, dict)
|
| 24 |
+
return x
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class CUDAFlags(Flags):
|
| 28 |
+
nvvm_options = Option(
|
| 29 |
+
type=_nvvm_options_type,
|
| 30 |
+
default=None,
|
| 31 |
+
doc="NVVM options",
|
| 32 |
+
)
|
| 33 |
+
compute_capability = Option(
|
| 34 |
+
type=tuple,
|
| 35 |
+
default=None,
|
| 36 |
+
doc="Compute Capability",
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# The CUDACompileResult (CCR) has a specially-defined entry point equal to its
|
| 41 |
+
# id. This is because the entry point is used as a key into a dict of
|
| 42 |
+
# overloads by the base dispatcher. The id of the CCR is the only small and
|
| 43 |
+
# unique property of a CompileResult in the CUDA target (cf. the CPU target,
|
| 44 |
+
# which uses its entry_point, which is a pointer value).
|
| 45 |
+
#
|
| 46 |
+
# This does feel a little hackish, and there are two ways in which this could
|
| 47 |
+
# be improved:
|
| 48 |
+
#
|
| 49 |
+
# 1. We could change the core of Numba so that each CompileResult has its own
|
| 50 |
+
# unique ID that can be used as a key - e.g. a count, similar to the way in
|
| 51 |
+
# which types have unique counts.
|
| 52 |
+
# 2. At some future time when kernel launch uses a compiled function, the entry
|
| 53 |
+
# point will no longer need to be a synthetic value, but will instead be a
|
| 54 |
+
# pointer to the compiled function as in the CPU target.
|
| 55 |
+
|
| 56 |
+
class CUDACompileResult(CompileResult):
|
| 57 |
+
@property
|
| 58 |
+
def entry_point(self):
|
| 59 |
+
return id(self)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def cuda_compile_result(**entries):
|
| 63 |
+
entries = sanitize_compile_result_entries(entries)
|
| 64 |
+
return CUDACompileResult(**entries)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@register_pass(mutates_CFG=True, analysis_only=False)
|
| 68 |
+
class CUDABackend(LoweringPass):
|
| 69 |
+
|
| 70 |
+
_name = "cuda_backend"
|
| 71 |
+
|
| 72 |
+
def __init__(self):
|
| 73 |
+
LoweringPass.__init__(self)
|
| 74 |
+
|
| 75 |
+
def run_pass(self, state):
|
| 76 |
+
"""
|
| 77 |
+
Back-end: Packages lowering output in a compile result
|
| 78 |
+
"""
|
| 79 |
+
lowered = state['cr']
|
| 80 |
+
signature = typing.signature(state.return_type, *state.args)
|
| 81 |
+
|
| 82 |
+
state.cr = cuda_compile_result(
|
| 83 |
+
typing_context=state.typingctx,
|
| 84 |
+
target_context=state.targetctx,
|
| 85 |
+
typing_error=state.status.fail_reason,
|
| 86 |
+
type_annotation=state.type_annotation,
|
| 87 |
+
library=state.library,
|
| 88 |
+
call_helper=lowered.call_helper,
|
| 89 |
+
signature=signature,
|
| 90 |
+
fndesc=lowered.fndesc,
|
| 91 |
+
)
|
| 92 |
+
return True
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@register_pass(mutates_CFG=False, analysis_only=False)
|
| 96 |
+
class CreateLibrary(LoweringPass):
|
| 97 |
+
"""
|
| 98 |
+
Create a CUDACodeLibrary for the NativeLowering pass to populate. The
|
| 99 |
+
NativeLowering pass will create a code library if none exists, but we need
|
| 100 |
+
to set it up with nvvm_options from the flags if they are present.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
_name = "create_library"
|
| 104 |
+
|
| 105 |
+
def __init__(self):
|
| 106 |
+
LoweringPass.__init__(self)
|
| 107 |
+
|
| 108 |
+
def run_pass(self, state):
|
| 109 |
+
codegen = state.targetctx.codegen()
|
| 110 |
+
name = state.func_id.func_qualname
|
| 111 |
+
nvvm_options = state.flags.nvvm_options
|
| 112 |
+
state.library = codegen.create_library(name, nvvm_options=nvvm_options)
|
| 113 |
+
# Enable object caching upfront so that the library can be serialized.
|
| 114 |
+
state.library.enable_object_caching()
|
| 115 |
+
|
| 116 |
+
return True
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class CUDACompiler(CompilerBase):
|
| 120 |
+
def define_pipelines(self):
|
| 121 |
+
dpb = DefaultPassBuilder
|
| 122 |
+
pm = PassManager('cuda')
|
| 123 |
+
|
| 124 |
+
untyped_passes = dpb.define_untyped_pipeline(self.state)
|
| 125 |
+
pm.passes.extend(untyped_passes.passes)
|
| 126 |
+
|
| 127 |
+
typed_passes = dpb.define_typed_pipeline(self.state)
|
| 128 |
+
pm.passes.extend(typed_passes.passes)
|
| 129 |
+
|
| 130 |
+
lowering_passes = self.define_cuda_lowering_pipeline(self.state)
|
| 131 |
+
pm.passes.extend(lowering_passes.passes)
|
| 132 |
+
|
| 133 |
+
pm.finalize()
|
| 134 |
+
return [pm]
|
| 135 |
+
|
| 136 |
+
def define_cuda_lowering_pipeline(self, state):
|
| 137 |
+
pm = PassManager('cuda_lowering')
|
| 138 |
+
# legalise
|
| 139 |
+
pm.add_pass(IRLegalization,
|
| 140 |
+
"ensure IR is legal prior to lowering")
|
| 141 |
+
pm.add_pass(AnnotateTypes, "annotate types")
|
| 142 |
+
|
| 143 |
+
# lower
|
| 144 |
+
pm.add_pass(CreateLibrary, "create library")
|
| 145 |
+
pm.add_pass(NativeLowering, "native lowering")
|
| 146 |
+
pm.add_pass(CUDABackend, "cuda backend")
|
| 147 |
+
|
| 148 |
+
pm.finalize()
|
| 149 |
+
return pm
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@global_compiler_lock
|
| 153 |
+
def compile_cuda(pyfunc, return_type, args, debug=False, lineinfo=False,
|
| 154 |
+
inline=False, fastmath=False, nvvm_options=None,
|
| 155 |
+
cc=None):
|
| 156 |
+
if cc is None:
|
| 157 |
+
raise ValueError('Compute Capability must be supplied')
|
| 158 |
+
|
| 159 |
+
from .descriptor import cuda_target
|
| 160 |
+
typingctx = cuda_target.typing_context
|
| 161 |
+
targetctx = cuda_target.target_context
|
| 162 |
+
|
| 163 |
+
flags = CUDAFlags()
|
| 164 |
+
# Do not compile (generate native code), just lower (to LLVM)
|
| 165 |
+
flags.no_compile = True
|
| 166 |
+
flags.no_cpython_wrapper = True
|
| 167 |
+
flags.no_cfunc_wrapper = True
|
| 168 |
+
|
| 169 |
+
# Both debug and lineinfo turn on debug information in the compiled code,
|
| 170 |
+
# but we keep them separate arguments in case we later want to overload
|
| 171 |
+
# some other behavior on the debug flag. In particular, -opt=3 is not
|
| 172 |
+
# supported with debug enabled, and enabling only lineinfo should not
|
| 173 |
+
# affect the error model.
|
| 174 |
+
if debug or lineinfo:
|
| 175 |
+
flags.debuginfo = True
|
| 176 |
+
|
| 177 |
+
if lineinfo:
|
| 178 |
+
flags.dbg_directives_only = True
|
| 179 |
+
|
| 180 |
+
if debug:
|
| 181 |
+
flags.error_model = 'python'
|
| 182 |
+
else:
|
| 183 |
+
flags.error_model = 'numpy'
|
| 184 |
+
|
| 185 |
+
if inline:
|
| 186 |
+
flags.forceinline = True
|
| 187 |
+
if fastmath:
|
| 188 |
+
flags.fastmath = True
|
| 189 |
+
if nvvm_options:
|
| 190 |
+
flags.nvvm_options = nvvm_options
|
| 191 |
+
flags.compute_capability = cc
|
| 192 |
+
|
| 193 |
+
# Run compilation pipeline
|
| 194 |
+
from numba.core.target_extension import target_override
|
| 195 |
+
with target_override('cuda'):
|
| 196 |
+
cres = compiler.compile_extra(typingctx=typingctx,
|
| 197 |
+
targetctx=targetctx,
|
| 198 |
+
func=pyfunc,
|
| 199 |
+
args=args,
|
| 200 |
+
return_type=return_type,
|
| 201 |
+
flags=flags,
|
| 202 |
+
locals={},
|
| 203 |
+
pipeline_class=CUDACompiler)
|
| 204 |
+
|
| 205 |
+
library = cres.library
|
| 206 |
+
library.finalize()
|
| 207 |
+
|
| 208 |
+
return cres
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def cabi_wrap_function(context, lib, fndesc, wrapper_function_name,
|
| 212 |
+
nvvm_options):
|
| 213 |
+
"""
|
| 214 |
+
Wrap a Numba ABI function in a C ABI wrapper at the NVVM IR level.
|
| 215 |
+
|
| 216 |
+
The C ABI wrapper will have the same name as the source Python function.
|
| 217 |
+
"""
|
| 218 |
+
# The wrapper will be contained in a new library that links to the wrapped
|
| 219 |
+
# function's library
|
| 220 |
+
library = lib.codegen.create_library(f'{lib.name}_function_',
|
| 221 |
+
entry_name=wrapper_function_name,
|
| 222 |
+
nvvm_options=nvvm_options)
|
| 223 |
+
library.add_linking_library(lib)
|
| 224 |
+
|
| 225 |
+
# Determine the caller (C ABI) and wrapper (Numba ABI) function types
|
| 226 |
+
argtypes = fndesc.argtypes
|
| 227 |
+
restype = fndesc.restype
|
| 228 |
+
c_call_conv = CUDACABICallConv(context)
|
| 229 |
+
wrapfnty = c_call_conv.get_function_type(restype, argtypes)
|
| 230 |
+
fnty = context.call_conv.get_function_type(fndesc.restype, argtypes)
|
| 231 |
+
|
| 232 |
+
# Create a new module and declare the callee
|
| 233 |
+
wrapper_module = context.create_module("cuda.cabi.wrapper")
|
| 234 |
+
func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
|
| 235 |
+
|
| 236 |
+
# Define the caller - populate it with a call to the callee and return
|
| 237 |
+
# its return value
|
| 238 |
+
|
| 239 |
+
wrapfn = ir.Function(wrapper_module, wrapfnty, wrapper_function_name)
|
| 240 |
+
builder = ir.IRBuilder(wrapfn.append_basic_block(''))
|
| 241 |
+
|
| 242 |
+
arginfo = context.get_arg_packer(argtypes)
|
| 243 |
+
callargs = arginfo.from_arguments(builder, wrapfn.args)
|
| 244 |
+
# We get (status, return_value), but we ignore the status since we
|
| 245 |
+
# can't propagate it through the C ABI anyway
|
| 246 |
+
_, return_value = context.call_conv.call_function(
|
| 247 |
+
builder, func, restype, argtypes, callargs)
|
| 248 |
+
builder.ret(return_value)
|
| 249 |
+
|
| 250 |
+
library.add_ir_module(wrapper_module)
|
| 251 |
+
library.finalize()
|
| 252 |
+
return library
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@global_compiler_lock
|
| 256 |
+
def compile(pyfunc, sig, debug=False, lineinfo=False, device=True,
|
| 257 |
+
fastmath=False, cc=None, opt=True, abi="c", abi_info=None,
|
| 258 |
+
output='ptx'):
|
| 259 |
+
"""Compile a Python function to PTX or LTO-IR for a given set of argument
|
| 260 |
+
types.
|
| 261 |
+
|
| 262 |
+
:param pyfunc: The Python function to compile.
|
| 263 |
+
:param sig: The signature representing the function's input and output
|
| 264 |
+
types. If this is a tuple of argument types without a return
|
| 265 |
+
type, the inferred return type is returned by this function. If
|
| 266 |
+
a signature including a return type is passed, the compiled code
|
| 267 |
+
will include a cast from the inferred return type to the
|
| 268 |
+
specified return type, and this function will return the
|
| 269 |
+
specified return type.
|
| 270 |
+
:param debug: Whether to include debug info in the compiled code.
|
| 271 |
+
:type debug: bool
|
| 272 |
+
:param lineinfo: Whether to include a line mapping from the compiled code
|
| 273 |
+
to the source code. Usually this is used with optimized
|
| 274 |
+
code (since debug mode would automatically include this),
|
| 275 |
+
so we want debug info in the LLVM IR but only the line
|
| 276 |
+
mapping in the final output.
|
| 277 |
+
:type lineinfo: bool
|
| 278 |
+
:param device: Whether to compile a device function.
|
| 279 |
+
:type device: bool
|
| 280 |
+
:param fastmath: Whether to enable fast math flags (ftz=1, prec_sqrt=0,
|
| 281 |
+
prec_div=, and fma=1)
|
| 282 |
+
:type fastmath: bool
|
| 283 |
+
:param cc: Compute capability to compile for, as a tuple
|
| 284 |
+
``(MAJOR, MINOR)``. Defaults to ``(5, 0)``.
|
| 285 |
+
:type cc: tuple
|
| 286 |
+
:param opt: Enable optimizations. Defaults to ``True``.
|
| 287 |
+
:type opt: bool
|
| 288 |
+
:param abi: The ABI for a compiled function - either ``"numba"`` or
|
| 289 |
+
``"c"``. Note that the Numba ABI is not considered stable.
|
| 290 |
+
The C ABI is only supported for device functions at present.
|
| 291 |
+
:type abi: str
|
| 292 |
+
:param abi_info: A dict of ABI-specific options. The ``"c"`` ABI supports
|
| 293 |
+
one option, ``"abi_name"``, for providing the wrapper
|
| 294 |
+
function's name. The ``"numba"`` ABI has no options.
|
| 295 |
+
:type abi_info: dict
|
| 296 |
+
:param output: Type of output to generate, either ``"ptx"`` or ``"ltoir"``.
|
| 297 |
+
:type output: str
|
| 298 |
+
:return: (code, resty): The compiled code and inferred return type
|
| 299 |
+
:rtype: tuple
|
| 300 |
+
"""
|
| 301 |
+
if abi not in ("numba", "c"):
|
| 302 |
+
raise NotImplementedError(f'Unsupported ABI: {abi}')
|
| 303 |
+
|
| 304 |
+
if abi == 'c' and not device:
|
| 305 |
+
raise NotImplementedError('The C ABI is not supported for kernels')
|
| 306 |
+
|
| 307 |
+
if output not in ("ptx", "ltoir"):
|
| 308 |
+
raise NotImplementedError(f'Unsupported output type: {output}')
|
| 309 |
+
|
| 310 |
+
if debug and opt:
|
| 311 |
+
msg = ("debug=True with opt=True (the default) "
|
| 312 |
+
"is not supported by CUDA. This may result in a crash"
|
| 313 |
+
" - set debug=False or opt=False.")
|
| 314 |
+
warn(NumbaInvalidConfigWarning(msg))
|
| 315 |
+
|
| 316 |
+
lto = (output == 'ltoir')
|
| 317 |
+
abi_info = abi_info or dict()
|
| 318 |
+
|
| 319 |
+
nvvm_options = {
|
| 320 |
+
'fastmath': fastmath,
|
| 321 |
+
'opt': 3 if opt else 0
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
if lto:
|
| 325 |
+
nvvm_options['gen-lto'] = None
|
| 326 |
+
|
| 327 |
+
args, return_type = sigutils.normalize_signature(sig)
|
| 328 |
+
|
| 329 |
+
cc = cc or config.CUDA_DEFAULT_PTX_CC
|
| 330 |
+
cres = compile_cuda(pyfunc, return_type, args, debug=debug,
|
| 331 |
+
lineinfo=lineinfo, fastmath=fastmath,
|
| 332 |
+
nvvm_options=nvvm_options, cc=cc)
|
| 333 |
+
resty = cres.signature.return_type
|
| 334 |
+
|
| 335 |
+
if resty and not device and resty != types.void:
|
| 336 |
+
raise TypeError("CUDA kernel must have void return type.")
|
| 337 |
+
|
| 338 |
+
tgt = cres.target_context
|
| 339 |
+
|
| 340 |
+
if device:
|
| 341 |
+
lib = cres.library
|
| 342 |
+
if abi == "c":
|
| 343 |
+
wrapper_name = abi_info.get('abi_name', pyfunc.__name__)
|
| 344 |
+
lib = cabi_wrap_function(tgt, lib, cres.fndesc, wrapper_name,
|
| 345 |
+
nvvm_options)
|
| 346 |
+
else:
|
| 347 |
+
code = pyfunc.__code__
|
| 348 |
+
filename = code.co_filename
|
| 349 |
+
linenum = code.co_firstlineno
|
| 350 |
+
|
| 351 |
+
lib, kernel = tgt.prepare_cuda_kernel(cres.library, cres.fndesc, debug,
|
| 352 |
+
lineinfo, nvvm_options, filename,
|
| 353 |
+
linenum)
|
| 354 |
+
|
| 355 |
+
if lto:
|
| 356 |
+
code = lib.get_ltoir(cc=cc)
|
| 357 |
+
else:
|
| 358 |
+
code = lib.get_asm_str(cc=cc)
|
| 359 |
+
return code, resty
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def compile_for_current_device(pyfunc, sig, debug=False, lineinfo=False,
|
| 363 |
+
device=True, fastmath=False, opt=True,
|
| 364 |
+
abi="c", abi_info=None, output='ptx'):
|
| 365 |
+
"""Compile a Python function to PTX or LTO-IR for a given signature for the
|
| 366 |
+
current device's compute capabilility. This calls :func:`compile` with an
|
| 367 |
+
appropriate ``cc`` value for the current device."""
|
| 368 |
+
cc = get_current_device().compute_capability
|
| 369 |
+
return compile(pyfunc, sig, debug=debug, lineinfo=lineinfo, device=device,
|
| 370 |
+
fastmath=fastmath, cc=cc, opt=opt, abi=abi,
|
| 371 |
+
abi_info=abi_info, output=output)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def compile_ptx(pyfunc, sig, debug=False, lineinfo=False, device=False,
|
| 375 |
+
fastmath=False, cc=None, opt=True, abi="numba", abi_info=None):
|
| 376 |
+
"""Compile a Python function to PTX for a given signature. See
|
| 377 |
+
:func:`compile`. The defaults for this function are to compile a kernel
|
| 378 |
+
with the Numba ABI, rather than :func:`compile`'s default of compiling a
|
| 379 |
+
device function with the C ABI."""
|
| 380 |
+
return compile(pyfunc, sig, debug=debug, lineinfo=lineinfo, device=device,
|
| 381 |
+
fastmath=fastmath, cc=cc, opt=opt, abi=abi,
|
| 382 |
+
abi_info=abi_info, output='ptx')
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def compile_ptx_for_current_device(pyfunc, sig, debug=False, lineinfo=False,
|
| 386 |
+
device=False, fastmath=False, opt=True,
|
| 387 |
+
abi="numba", abi_info=None):
|
| 388 |
+
"""Compile a Python function to PTX for a given signature for the current
|
| 389 |
+
device's compute capabilility. See :func:`compile_ptx`."""
|
| 390 |
+
cc = get_current_device().compute_capability
|
| 391 |
+
return compile_ptx(pyfunc, sig, debug=debug, lineinfo=lineinfo,
|
| 392 |
+
device=device, fastmath=fastmath, cc=cc, opt=opt,
|
| 393 |
+
abi=abi, abi_info=abi_info)
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def declare_device_function(name, restype, argtypes):
|
| 397 |
+
return declare_device_function_template(name, restype, argtypes).key
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def declare_device_function_template(name, restype, argtypes):
|
| 401 |
+
from .descriptor import cuda_target
|
| 402 |
+
typingctx = cuda_target.typing_context
|
| 403 |
+
targetctx = cuda_target.target_context
|
| 404 |
+
sig = typing.signature(restype, *argtypes)
|
| 405 |
+
extfn = ExternFunction(name, sig)
|
| 406 |
+
|
| 407 |
+
class device_function_template(ConcreteTemplate):
|
| 408 |
+
key = extfn
|
| 409 |
+
cases = [sig]
|
| 410 |
+
|
| 411 |
+
fndesc = funcdesc.ExternalFunctionDescriptor(
|
| 412 |
+
name=name, restype=restype, argtypes=argtypes)
|
| 413 |
+
typingctx.insert_user_function(extfn, device_function_template)
|
| 414 |
+
targetctx.insert_user_function(extfn, fndesc)
|
| 415 |
+
|
| 416 |
+
return device_function_template
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
class ExternFunction(object):
|
| 420 |
+
def __init__(self, name, sig):
|
| 421 |
+
self.name = name
|
| 422 |
+
self.sig = sig
|
lib/python3.10/site-packages/numba/cuda/cpp_function_wrappers.cu
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "cuda_fp16.h"
|
| 2 |
+
|
| 3 |
+
#define FNDEF(fname) __numba_wrapper_ ## fname
|
| 4 |
+
|
| 5 |
+
#define UNARY_FUNCTION(fname) extern "C" __device__ int\
|
| 6 |
+
FNDEF(fname)( \
|
| 7 |
+
short* return_value,\
|
| 8 |
+
short x\
|
| 9 |
+
)\
|
| 10 |
+
{\
|
| 11 |
+
__half retval = fname(__short_as_half (x));\
|
| 12 |
+
\
|
| 13 |
+
*return_value = __half_as_short (retval);\
|
| 14 |
+
/* Signal that no Python exception occurred */ \
|
| 15 |
+
return 0;\
|
| 16 |
+
}\
|
| 17 |
+
|
| 18 |
+
extern "C" __device__ int
|
| 19 |
+
FNDEF(hdiv)(
|
| 20 |
+
short* return_value,
|
| 21 |
+
short x,
|
| 22 |
+
short y
|
| 23 |
+
)
|
| 24 |
+
{
|
| 25 |
+
__half retval = __hdiv(__short_as_half (x), __short_as_half (y));
|
| 26 |
+
|
| 27 |
+
*return_value = __half_as_short (retval);
|
| 28 |
+
// Signal that no Python exception occurred
|
| 29 |
+
return 0;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
UNARY_FUNCTION(hsin)
|
| 33 |
+
UNARY_FUNCTION(hcos)
|
| 34 |
+
UNARY_FUNCTION(hlog)
|
| 35 |
+
UNARY_FUNCTION(hlog10)
|
| 36 |
+
UNARY_FUNCTION(hlog2)
|
| 37 |
+
UNARY_FUNCTION(hexp)
|
| 38 |
+
UNARY_FUNCTION(hexp10)
|
| 39 |
+
UNARY_FUNCTION(hexp2)
|
| 40 |
+
UNARY_FUNCTION(hsqrt)
|
| 41 |
+
UNARY_FUNCTION(hrsqrt)
|
| 42 |
+
UNARY_FUNCTION(hfloor)
|
| 43 |
+
UNARY_FUNCTION(hceil)
|
| 44 |
+
UNARY_FUNCTION(hrcp)
|
| 45 |
+
UNARY_FUNCTION(hrint)
|
| 46 |
+
UNARY_FUNCTION(htrunc)
|
| 47 |
+
|
lib/python3.10/site-packages/numba/cuda/cuda_fp16.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
lib/python3.10/site-packages/numba/cuda/cuda_fp16.hpp
ADDED
|
@@ -0,0 +1,2465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_FP16_HPP__)
|
| 51 |
+
#define __CUDA_FP16_HPP__
|
| 52 |
+
|
| 53 |
+
#if !defined(__CUDA_FP16_H__)
|
| 54 |
+
#error "Do not include this file directly. Instead, include cuda_fp16.h."
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#if !defined(_MSC_VER) && __cplusplus >= 201103L
|
| 58 |
+
# define __CPP_VERSION_AT_LEAST_11_FP16
|
| 59 |
+
#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L
|
| 60 |
+
# define __CPP_VERSION_AT_LEAST_11_FP16
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
/* C++11 header for std::move.
|
| 64 |
+
* In RTC mode, std::move is provided implicitly; don't include the header
|
| 65 |
+
*/
|
| 66 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16) && !defined(__CUDACC_RTC__)
|
| 67 |
+
#include <utility>
|
| 68 |
+
#endif /* __cplusplus >= 201103L && !defined(__CUDACC_RTC__) */
|
| 69 |
+
|
| 70 |
+
/* C++ header for std::memcpy (used for type punning in host-side implementations).
|
| 71 |
+
* When compiling as a CUDA source file memcpy is provided implicitly.
|
| 72 |
+
* !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
|
| 73 |
+
*/
|
| 74 |
+
#if defined(__cplusplus) && !defined(__CUDACC__)
|
| 75 |
+
#include <cstring>
|
| 76 |
+
#endif /* defined(__cplusplus) && !defined(__CUDACC__) */
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
/* Set up function decorations */
|
| 80 |
+
#if defined(__CUDACC__)
|
| 81 |
+
#define __CUDA_FP16_DECL__ static __device__ __inline__
|
| 82 |
+
#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__
|
| 83 |
+
#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
|
| 84 |
+
#define __CUDA_HOSTDEVICE__ __host__ __device__
|
| 85 |
+
#else /* !defined(__CUDACC__) */
|
| 86 |
+
#if defined(__GNUC__)
|
| 87 |
+
#define __CUDA_HOSTDEVICE_FP16_DECL__ static __attribute__ ((unused))
|
| 88 |
+
#else
|
| 89 |
+
#define __CUDA_HOSTDEVICE_FP16_DECL__ static
|
| 90 |
+
#endif /* defined(__GNUC__) */
|
| 91 |
+
#define __CUDA_HOSTDEVICE__
|
| 92 |
+
#endif /* defined(__CUDACC_) */
|
| 93 |
+
|
| 94 |
+
/* Set up structure-alignment attribute */
|
| 95 |
+
#if defined(__CUDACC__)
|
| 96 |
+
#define __CUDA_ALIGN__(align) __align__(align)
|
| 97 |
+
#else
|
| 98 |
+
/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */
|
| 99 |
+
#if __cplusplus >= 201103L
|
| 100 |
+
#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */
|
| 101 |
+
#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP16)*/
|
| 102 |
+
#if defined(__GNUC__)
|
| 103 |
+
#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n)))
|
| 104 |
+
#elif defined(_MSC_VER)
|
| 105 |
+
#define __CUDA_ALIGN__(n) __declspec(align(n))
|
| 106 |
+
#else
|
| 107 |
+
#define __CUDA_ALIGN__(n)
|
| 108 |
+
#endif /* defined(__GNUC__) */
|
| 109 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
| 110 |
+
#endif /* defined(__CUDACC__) */
|
| 111 |
+
|
| 112 |
+
/* Macros to allow half & half2 to be used by inline assembly */
|
| 113 |
+
#define __HALF_TO_US(var) *(reinterpret_cast<unsigned short *>(&(var)))
|
| 114 |
+
#define __HALF_TO_CUS(var) *(reinterpret_cast<const unsigned short *>(&(var)))
|
| 115 |
+
#define __HALF2_TO_UI(var) *(reinterpret_cast<unsigned int *>(&(var)))
|
| 116 |
+
#define __HALF2_TO_CUI(var) *(reinterpret_cast<const unsigned int *>(&(var)))
|
| 117 |
+
|
| 118 |
+
/* Macros for half & half2 binary arithmetic */
|
| 119 |
+
#define __BINARY_OP_HALF_MACRO(name) /* do */ {\
|
| 120 |
+
__half val; \
|
| 121 |
+
asm( "{"#name".f16 %0,%1,%2;\n}" \
|
| 122 |
+
:"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \
|
| 123 |
+
return val; \
|
| 124 |
+
} /* while(0) */
|
| 125 |
+
#define __BINARY_OP_HALF2_MACRO(name) /* do */ {\
|
| 126 |
+
__half2 val; \
|
| 127 |
+
asm( "{"#name".f16x2 %0,%1,%2;\n}" \
|
| 128 |
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
| 129 |
+
return val; \
|
| 130 |
+
} /* while(0) */
|
| 131 |
+
#define __TERNARY_OP_HALF_MACRO(name) /* do */ {\
|
| 132 |
+
__half val; \
|
| 133 |
+
asm( "{"#name".f16 %0,%1,%2,%3;\n}" \
|
| 134 |
+
:"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \
|
| 135 |
+
return val; \
|
| 136 |
+
} /* while(0) */
|
| 137 |
+
#define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\
|
| 138 |
+
__half2 val; \
|
| 139 |
+
asm( "{"#name".f16x2 %0,%1,%2,%3;\n}" \
|
| 140 |
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \
|
| 141 |
+
return val; \
|
| 142 |
+
} /* while(0) */
|
| 143 |
+
|
| 144 |
+
/**
|
| 145 |
+
* Types which allow static initialization of "half" and "half2" until
|
| 146 |
+
* these become an actual builtin. Note this initialization is as a
|
| 147 |
+
* bitfield representation of "half", and not a conversion from short->half.
|
| 148 |
+
* Such a representation will be deprecated in a future version of CUDA.
|
| 149 |
+
* (Note these are visible to non-nvcc compilers, including C-only compilation)
|
| 150 |
+
*/
|
| 151 |
+
typedef struct __CUDA_ALIGN__(2) {
|
| 152 |
+
unsigned short x;
|
| 153 |
+
} __half_raw;
|
| 154 |
+
|
| 155 |
+
typedef struct __CUDA_ALIGN__(4) {
|
| 156 |
+
unsigned short x;
|
| 157 |
+
unsigned short y;
|
| 158 |
+
} __half2_raw;
|
| 159 |
+
|
| 160 |
+
/* All other definitions in this file are only visible to C++ compilers */
|
| 161 |
+
#if defined(__cplusplus)
|
| 162 |
+
|
| 163 |
+
/* Hide GCC member initialization list warnings because of host/device in-function init requirement */
|
| 164 |
+
#if defined(__GNUC__)
|
| 165 |
+
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
| 166 |
+
#pragma GCC diagnostic push
|
| 167 |
+
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
| 168 |
+
#pragma GCC diagnostic ignored "-Weffc++"
|
| 169 |
+
#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
|
| 170 |
+
#endif /* defined(__GNUC__) */
|
| 171 |
+
|
| 172 |
+
/* class' : multiple assignment operators specified
|
| 173 |
+
The class has multiple assignment operators of a single type. This warning is informational */
|
| 174 |
+
#if defined(_MSC_VER) && _MSC_VER >= 1500
|
| 175 |
+
#pragma warning( push )
|
| 176 |
+
#pragma warning( disable:4522 )
|
| 177 |
+
#endif /* defined(__GNUC__) */
|
| 178 |
+
|
| 179 |
+
struct __CUDA_ALIGN__(2) __half {
|
| 180 |
+
protected:
|
| 181 |
+
unsigned short __x;
|
| 182 |
+
|
| 183 |
+
public:
|
| 184 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16)
|
| 185 |
+
__half() = default;
|
| 186 |
+
#else
|
| 187 |
+
__CUDA_HOSTDEVICE__ __half() { }
|
| 188 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
| 189 |
+
|
| 190 |
+
/* Convert to/from __half_raw */
|
| 191 |
+
__CUDA_HOSTDEVICE__ __half(const __half_raw &hr) : __x(hr.x) { }
|
| 192 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const __half_raw &hr) { __x = hr.x; return *this; }
|
| 193 |
+
__CUDA_HOSTDEVICE__ volatile __half &operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; }
|
| 194 |
+
__CUDA_HOSTDEVICE__ volatile __half &operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; }
|
| 195 |
+
__CUDA_HOSTDEVICE__ operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; }
|
| 196 |
+
__CUDA_HOSTDEVICE__ operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; }
|
| 197 |
+
|
| 198 |
+
#if !defined(__CUDA_NO_HALF_CONVERSIONS__)
|
| 199 |
+
|
| 200 |
+
/* Construct from float/double */
|
| 201 |
+
__CUDA_HOSTDEVICE__ __half(const float f) { __x = __float2half(f).__x; }
|
| 202 |
+
__CUDA_HOSTDEVICE__ __half(const double f) { __x = __double2half(f).__x; }
|
| 203 |
+
|
| 204 |
+
__CUDA_HOSTDEVICE__ operator float() const { return __half2float(*this); }
|
| 205 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const float f) { __x = __float2half(f).__x; return *this; }
|
| 206 |
+
|
| 207 |
+
/* We omit "cast to double" operator, so as to not be ambiguous about up-cast */
|
| 208 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const double f) { __x = __double2half(f).__x; return *this; }
|
| 209 |
+
|
| 210 |
+
/* Member functions only available to nvcc compilation so far */
|
| 211 |
+
#if defined(__CUDACC__)
|
| 212 |
+
/* Allow automatic construction from types supported natively in hardware */
|
| 213 |
+
/* Note we do avoid constructor init-list because of special host/device compilation rules */
|
| 214 |
+
__CUDA_HOSTDEVICE__ __half(const short val) { __x = __short2half_rn(val).__x; }
|
| 215 |
+
__CUDA_HOSTDEVICE__ __half(const unsigned short val) { __x = __ushort2half_rn(val).__x; }
|
| 216 |
+
__CUDA_HOSTDEVICE__ __half(const int val) { __x = __int2half_rn(val).__x; }
|
| 217 |
+
__CUDA_HOSTDEVICE__ __half(const unsigned int val) { __x = __uint2half_rn(val).__x; }
|
| 218 |
+
__CUDA_HOSTDEVICE__ __half(const long long val) { __x = __ll2half_rn(val).__x; }
|
| 219 |
+
__CUDA_HOSTDEVICE__ __half(const unsigned long long val) { __x = __ull2half_rn(val).__x; }
|
| 220 |
+
|
| 221 |
+
/* Allow automatic casts to supported builtin types, matching all that are permitted with float */
|
| 222 |
+
__CUDA_HOSTDEVICE__ operator short() const { return __half2short_rz(*this); }
|
| 223 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const short val) { __x = __short2half_rn(val).__x; return *this; }
|
| 224 |
+
|
| 225 |
+
__CUDA_HOSTDEVICE__ operator unsigned short() const { return __half2ushort_rz(*this); }
|
| 226 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; }
|
| 227 |
+
|
| 228 |
+
__CUDA_HOSTDEVICE__ operator int() const { return __half2int_rz(*this); }
|
| 229 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const int val) { __x = __int2half_rn(val).__x; return *this; }
|
| 230 |
+
|
| 231 |
+
__CUDA_HOSTDEVICE__ operator unsigned int() const { return __half2uint_rz(*this); }
|
| 232 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; }
|
| 233 |
+
|
| 234 |
+
__CUDA_HOSTDEVICE__ operator long long() const { return __half2ll_rz(*this); }
|
| 235 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; }
|
| 236 |
+
|
| 237 |
+
__CUDA_HOSTDEVICE__ operator unsigned long long() const { return __half2ull_rz(*this); }
|
| 238 |
+
__CUDA_HOSTDEVICE__ __half &operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; }
|
| 239 |
+
|
| 240 |
+
/* Boolean conversion - note both 0 and -0 must return false */
|
| 241 |
+
__CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFFU) != 0U; }
|
| 242 |
+
#endif /* defined(__CUDACC__) */
|
| 243 |
+
#endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */
|
| 244 |
+
};
|
| 245 |
+
|
| 246 |
+
/* Global-space operator functions are only available to nvcc compilation */
|
| 247 |
+
#if defined(__CUDACC__)
|
| 248 |
+
|
| 249 |
+
/* Arithmetic FP16 operations only supported on arch >= 5.3 */
|
| 250 |
+
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
|
| 251 |
+
#if !defined(__CUDA_NO_HALF_OPERATORS__)
|
| 252 |
+
/* Some basic arithmetic operations expected of a builtin */
|
| 253 |
+
__device__ __forceinline__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); }
|
| 254 |
+
__device__ __forceinline__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); }
|
| 255 |
+
__device__ __forceinline__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); }
|
| 256 |
+
__device__ __forceinline__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); }
|
| 257 |
+
|
| 258 |
+
__device__ __forceinline__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; }
|
| 259 |
+
__device__ __forceinline__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; }
|
| 260 |
+
__device__ __forceinline__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; }
|
| 261 |
+
__device__ __forceinline__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; }
|
| 262 |
+
|
| 263 |
+
/* Note for increment and decrement we use the raw value 0x3C00U equating to half(1.0F), to avoid the extra conversion */
|
| 264 |
+
__device__ __forceinline__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; }
|
| 265 |
+
__device__ __forceinline__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; }
|
| 266 |
+
__device__ __forceinline__ __half operator++(__half &h, const int ignored) { const __half ret = h; __half_raw one; one.x = 0x3C00U; h += one; return ret; }
|
| 267 |
+
__device__ __forceinline__ __half operator--(__half &h, const int ignored) { const __half ret = h; __half_raw one; one.x = 0x3C00U; h -= one; return ret; }
|
| 268 |
+
|
| 269 |
+
/* Unary plus and inverse operators */
|
| 270 |
+
__device__ __forceinline__ __half operator+(const __half &h) { return h; }
|
| 271 |
+
__device__ __forceinline__ __half operator-(const __half &h) { return __hneg(h); }
|
| 272 |
+
|
| 273 |
+
/* Some basic comparison operations to make it look like a builtin */
|
| 274 |
+
__device__ __forceinline__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); }
|
| 275 |
+
__device__ __forceinline__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); }
|
| 276 |
+
__device__ __forceinline__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); }
|
| 277 |
+
__device__ __forceinline__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); }
|
| 278 |
+
__device__ __forceinline__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); }
|
| 279 |
+
__device__ __forceinline__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); }
|
| 280 |
+
#endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */
|
| 281 |
+
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
|
| 282 |
+
#endif /* defined(__CUDACC__) */
|
| 283 |
+
|
| 284 |
+
/* __half2 is visible to non-nvcc host compilers */
|
| 285 |
+
struct __CUDA_ALIGN__(4) __half2 {
|
| 286 |
+
__half x;
|
| 287 |
+
__half y;
|
| 288 |
+
|
| 289 |
+
// All construct/copy/assign/move
|
| 290 |
+
public:
|
| 291 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16)
|
| 292 |
+
__half2() = default;
|
| 293 |
+
__CUDA_HOSTDEVICE__ __half2(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); }
|
| 294 |
+
__CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); return *this; }
|
| 295 |
+
#else
|
| 296 |
+
__CUDA_HOSTDEVICE__ __half2() { }
|
| 297 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
| 298 |
+
__CUDA_HOSTDEVICE__ __half2(const __half &a, const __half &b) : x(a), y(b) { }
|
| 299 |
+
__CUDA_HOSTDEVICE__ __half2(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); }
|
| 300 |
+
__CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); return *this; }
|
| 301 |
+
|
| 302 |
+
/* Convert to/from __half2_raw */
|
| 303 |
+
__CUDA_HOSTDEVICE__ __half2(const __half2_raw &h2r ) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); }
|
| 304 |
+
__CUDA_HOSTDEVICE__ __half2 &operator=(const __half2_raw &h2r) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); return *this; }
|
| 305 |
+
__CUDA_HOSTDEVICE__ operator __half2_raw() const { __half2_raw ret; ret.x = 0U; ret.y = 0U; __HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this); return ret; }
|
| 306 |
+
};
|
| 307 |
+
|
| 308 |
+
/* Global-space operator functions are only available to nvcc compilation */
|
| 309 |
+
#if defined(__CUDACC__)
|
| 310 |
+
|
| 311 |
+
/* Arithmetic FP16x2 operations only supported on arch >= 5.3 */
|
| 312 |
+
#if (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)) && !defined(__CUDA_NO_HALF2_OPERATORS__)
|
| 313 |
+
|
| 314 |
+
__device__ __forceinline__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); }
|
| 315 |
+
__device__ __forceinline__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); }
|
| 316 |
+
__device__ __forceinline__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); }
|
| 317 |
+
__device__ __forceinline__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); }
|
| 318 |
+
|
| 319 |
+
__device__ __forceinline__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; }
|
| 320 |
+
__device__ __forceinline__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; }
|
| 321 |
+
__device__ __forceinline__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; }
|
| 322 |
+
__device__ __forceinline__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; }
|
| 323 |
+
|
| 324 |
+
__device__ __forceinline__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; }
|
| 325 |
+
__device__ __forceinline__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; }
|
| 326 |
+
__device__ __forceinline__ __half2 operator++(__half2 &h, const int ignored) { const __half2 ret = h; __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return ret; }
|
| 327 |
+
__device__ __forceinline__ __half2 operator--(__half2 &h, const int ignored) { const __half2 ret = h; __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return ret; }
|
| 328 |
+
|
| 329 |
+
__device__ __forceinline__ __half2 operator+(const __half2 &h) { return h; }
|
| 330 |
+
__device__ __forceinline__ __half2 operator-(const __half2 &h) { return __hneg2(h); }
|
| 331 |
+
|
| 332 |
+
__device__ __forceinline__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); }
|
| 333 |
+
__device__ __forceinline__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); }
|
| 334 |
+
__device__ __forceinline__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); }
|
| 335 |
+
__device__ __forceinline__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); }
|
| 336 |
+
__device__ __forceinline__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); }
|
| 337 |
+
__device__ __forceinline__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); }
|
| 338 |
+
|
| 339 |
+
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */
|
| 340 |
+
#endif /* defined(__CUDACC__) */
|
| 341 |
+
|
| 342 |
+
/* Restore warning for multiple assignment operators */
|
| 343 |
+
#if defined(_MSC_VER) && _MSC_VER >= 1500
|
| 344 |
+
#pragma warning( pop )
|
| 345 |
+
#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */
|
| 346 |
+
|
| 347 |
+
/* Restore -Weffc++ warnings from here on */
|
| 348 |
+
#if defined(__GNUC__)
|
| 349 |
+
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
| 350 |
+
#pragma GCC diagnostic pop
|
| 351 |
+
#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
|
| 352 |
+
#endif /* defined(__GNUC__) */
|
| 353 |
+
|
| 354 |
+
#undef __CUDA_HOSTDEVICE__
|
| 355 |
+
#undef __CUDA_ALIGN__
|
| 356 |
+
|
| 357 |
+
#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
|
| 358 |
+
static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder)
|
| 359 |
+
{
|
| 360 |
+
unsigned int x;
|
| 361 |
+
unsigned int u;
|
| 362 |
+
unsigned int result;
|
| 363 |
+
#if defined(__CUDACC__)
|
| 364 |
+
(void)memcpy(&x, &f, sizeof(f));
|
| 365 |
+
#else
|
| 366 |
+
(void)std::memcpy(&x, &f, sizeof(f));
|
| 367 |
+
#endif
|
| 368 |
+
u = (x & 0x7fffffffU);
|
| 369 |
+
sign = ((x >> 16U) & 0x8000U);
|
| 370 |
+
// NaN/+Inf/-Inf
|
| 371 |
+
if (u >= 0x7f800000U) {
|
| 372 |
+
remainder = 0U;
|
| 373 |
+
result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU);
|
| 374 |
+
} else if (u > 0x477fefffU) { // Overflows
|
| 375 |
+
remainder = 0x80000000U;
|
| 376 |
+
result = (sign | 0x7bffU);
|
| 377 |
+
} else if (u >= 0x38800000U) { // Normal numbers
|
| 378 |
+
remainder = u << 19U;
|
| 379 |
+
u -= 0x38000000U;
|
| 380 |
+
result = (sign | (u >> 13U));
|
| 381 |
+
} else if (u < 0x33000001U) { // +0/-0
|
| 382 |
+
remainder = u;
|
| 383 |
+
result = sign;
|
| 384 |
+
} else { // Denormal numbers
|
| 385 |
+
const unsigned int exponent = u >> 23U;
|
| 386 |
+
const unsigned int shift = 0x7eU - exponent;
|
| 387 |
+
unsigned int mantissa = (u & 0x7fffffU);
|
| 388 |
+
mantissa |= 0x800000U;
|
| 389 |
+
remainder = mantissa << (32U - shift);
|
| 390 |
+
result = (sign | (mantissa >> shift));
|
| 391 |
+
}
|
| 392 |
+
return static_cast<unsigned short>(result);
|
| 393 |
+
}
|
| 394 |
+
#endif /* #if !defined(__CUDACC_RTC__) */
|
| 395 |
+
|
| 396 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a)
|
| 397 |
+
{
|
| 398 |
+
#if defined(__CUDA_ARCH__)
|
| 399 |
+
__half val;
|
| 400 |
+
asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a));
|
| 401 |
+
return val;
|
| 402 |
+
#else
|
| 403 |
+
__half result;
|
| 404 |
+
// Perform rounding to 11 bits of precision, convert value
|
| 405 |
+
// to float and call existing float to half conversion.
|
| 406 |
+
// By pre-rounding to 11 bits we avoid additional rounding
|
| 407 |
+
// in float to half conversion.
|
| 408 |
+
unsigned long long int absa;
|
| 409 |
+
unsigned long long int ua;
|
| 410 |
+
#if defined(__CUDACC__)
|
| 411 |
+
(void)memcpy(&ua, &a, sizeof(a));
|
| 412 |
+
#else
|
| 413 |
+
(void)std::memcpy(&ua, &a, sizeof(a));
|
| 414 |
+
#endif
|
| 415 |
+
absa = (ua & 0x7fffffffffffffffULL);
|
| 416 |
+
if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL))
|
| 417 |
+
{
|
| 418 |
+
// |a| >= 2^16 or NaN or |a| <= 2^(-25)
|
| 419 |
+
// double-rounding is not a problem
|
| 420 |
+
result = __float2half(static_cast<float>(a));
|
| 421 |
+
}
|
| 422 |
+
else
|
| 423 |
+
{
|
| 424 |
+
// here 2^(-25) < |a| < 2^16
|
| 425 |
+
// prepare shifter value such that a + shifter
|
| 426 |
+
// done in double precision performs round-to-nearest-even
|
| 427 |
+
// and (a + shifter) - shifter results in a rounded to
|
| 428 |
+
// 11 bits of precision. Shifter needs to have exponent of
|
| 429 |
+
// a plus 53 - 11 = 42 and a leading bit in mantissa to guard
|
| 430 |
+
// against negative values.
|
| 431 |
+
// So need to have |a| capped to avoid overflow in exponent.
|
| 432 |
+
// For inputs that are smaller than half precision minnorm
|
| 433 |
+
// we prepare fixed shifter exponent.
|
| 434 |
+
unsigned long long shifterBits;
|
| 435 |
+
if (absa >= 0x3f10000000000000ULL)
|
| 436 |
+
{ // Here if |a| >= 2^(-14)
|
| 437 |
+
// add 42 to exponent bits
|
| 438 |
+
shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL;
|
| 439 |
+
}
|
| 440 |
+
else
|
| 441 |
+
{ // 2^(-25) < |a| < 2^(-14), potentially results in denormal
|
| 442 |
+
// set exponent bits to 42 - 14 + bias
|
| 443 |
+
shifterBits = 0x41B0000000000000ULL;
|
| 444 |
+
}
|
| 445 |
+
// set leading mantissa bit to protect against negative inputs
|
| 446 |
+
shifterBits |= 0x0008000000000000ULL;
|
| 447 |
+
double shifter;
|
| 448 |
+
#if defined(__CUDACC__)
|
| 449 |
+
(void)memcpy(&shifter, &shifterBits, sizeof(shifterBits));
|
| 450 |
+
#else
|
| 451 |
+
(void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits));
|
| 452 |
+
#endif
|
| 453 |
+
double aShiftRound = a + shifter;
|
| 454 |
+
|
| 455 |
+
// Prevent the compiler from optimizing away a + shifter - shifter
|
| 456 |
+
// by doing intermediate memcopy and harmless bitwize operation
|
| 457 |
+
unsigned long long int aShiftRoundBits;
|
| 458 |
+
#if defined(__CUDACC__)
|
| 459 |
+
(void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
|
| 460 |
+
#else
|
| 461 |
+
(void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
|
| 462 |
+
#endif
|
| 463 |
+
|
| 464 |
+
// the value is positive, so this operation doesn't change anything
|
| 465 |
+
aShiftRoundBits &= 0x7fffffffffffffffULL;
|
| 466 |
+
|
| 467 |
+
#if defined(__CUDACC__)
|
| 468 |
+
(void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
|
| 469 |
+
#else
|
| 470 |
+
(void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
|
| 471 |
+
#endif
|
| 472 |
+
|
| 473 |
+
result = __float2half(static_cast<float>(aShiftRound - shifter));
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
return result;
|
| 477 |
+
#endif
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a)
|
| 481 |
+
{
|
| 482 |
+
__half val;
|
| 483 |
+
#if defined(__CUDA_ARCH__)
|
| 484 |
+
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
| 485 |
+
#else
|
| 486 |
+
__half_raw r;
|
| 487 |
+
unsigned int sign = 0U;
|
| 488 |
+
unsigned int remainder = 0U;
|
| 489 |
+
r.x = __internal_float2half(a, sign, remainder);
|
| 490 |
+
if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
|
| 491 |
+
r.x++;
|
| 492 |
+
}
|
| 493 |
+
val = r;
|
| 494 |
+
#endif
|
| 495 |
+
return val;
|
| 496 |
+
}
|
| 497 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a)
|
| 498 |
+
{
|
| 499 |
+
__half val;
|
| 500 |
+
#if defined(__CUDA_ARCH__)
|
| 501 |
+
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
| 502 |
+
#else
|
| 503 |
+
__half_raw r;
|
| 504 |
+
unsigned int sign = 0U;
|
| 505 |
+
unsigned int remainder = 0U;
|
| 506 |
+
r.x = __internal_float2half(a, sign, remainder);
|
| 507 |
+
if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
|
| 508 |
+
r.x++;
|
| 509 |
+
}
|
| 510 |
+
val = r;
|
| 511 |
+
#endif
|
| 512 |
+
return val;
|
| 513 |
+
}
|
| 514 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a)
|
| 515 |
+
{
|
| 516 |
+
__half val;
|
| 517 |
+
#if defined(__CUDA_ARCH__)
|
| 518 |
+
asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
| 519 |
+
#else
|
| 520 |
+
__half_raw r;
|
| 521 |
+
unsigned int sign = 0U;
|
| 522 |
+
unsigned int remainder = 0U;
|
| 523 |
+
r.x = __internal_float2half(a, sign, remainder);
|
| 524 |
+
val = r;
|
| 525 |
+
#endif
|
| 526 |
+
return val;
|
| 527 |
+
}
|
| 528 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a)
|
| 529 |
+
{
|
| 530 |
+
__half val;
|
| 531 |
+
#if defined(__CUDA_ARCH__)
|
| 532 |
+
asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
| 533 |
+
#else
|
| 534 |
+
__half_raw r;
|
| 535 |
+
unsigned int sign = 0U;
|
| 536 |
+
unsigned int remainder = 0U;
|
| 537 |
+
r.x = __internal_float2half(a, sign, remainder);
|
| 538 |
+
if ((remainder != 0U) && (sign != 0U)) {
|
| 539 |
+
r.x++;
|
| 540 |
+
}
|
| 541 |
+
val = r;
|
| 542 |
+
#endif
|
| 543 |
+
return val;
|
| 544 |
+
}
|
| 545 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a)
|
| 546 |
+
{
|
| 547 |
+
__half val;
|
| 548 |
+
#if defined(__CUDA_ARCH__)
|
| 549 |
+
asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
| 550 |
+
#else
|
| 551 |
+
__half_raw r;
|
| 552 |
+
unsigned int sign = 0U;
|
| 553 |
+
unsigned int remainder = 0U;
|
| 554 |
+
r.x = __internal_float2half(a, sign, remainder);
|
| 555 |
+
if ((remainder != 0U) && (sign == 0U)) {
|
| 556 |
+
r.x++;
|
| 557 |
+
}
|
| 558 |
+
val = r;
|
| 559 |
+
#endif
|
| 560 |
+
return val;
|
| 561 |
+
}
|
| 562 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a)
|
| 563 |
+
{
|
| 564 |
+
__half2 val;
|
| 565 |
+
#if defined(__CUDA_ARCH__)
|
| 566 |
+
asm("{.reg .f16 low;\n"
|
| 567 |
+
" cvt.rn.f16.f32 low, %1;\n"
|
| 568 |
+
" mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a));
|
| 569 |
+
#else
|
| 570 |
+
val = __half2(__float2half_rn(a), __float2half_rn(a));
|
| 571 |
+
#endif
|
| 572 |
+
return val;
|
| 573 |
+
}
|
| 574 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b)
|
| 575 |
+
{
|
| 576 |
+
__half2 val;
|
| 577 |
+
#if defined(__CUDA_ARCH__)
|
| 578 |
+
asm("{.reg .f16 low,high;\n"
|
| 579 |
+
" cvt.rn.f16.f32 low, %1;\n"
|
| 580 |
+
" cvt.rn.f16.f32 high, %2;\n"
|
| 581 |
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b));
|
| 582 |
+
#else
|
| 583 |
+
val = __half2(__float2half_rn(a), __float2half_rn(b));
|
| 584 |
+
#endif
|
| 585 |
+
return val;
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
|
| 589 |
+
static inline float __internal_half2float(const unsigned short h)
|
| 590 |
+
{
|
| 591 |
+
unsigned int sign = ((static_cast<unsigned int>(h) >> 15U) & 1U);
|
| 592 |
+
unsigned int exponent = ((static_cast<unsigned int>(h) >> 10U) & 0x1fU);
|
| 593 |
+
unsigned int mantissa = ((static_cast<unsigned int>(h) & 0x3ffU) << 13U);
|
| 594 |
+
float f;
|
| 595 |
+
if (exponent == 0x1fU) { /* NaN or Inf */
|
| 596 |
+
/* discard sign of a NaN */
|
| 597 |
+
sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
|
| 598 |
+
mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
|
| 599 |
+
exponent = 0xffU;
|
| 600 |
+
} else if (exponent == 0U) { /* Denorm or Zero */
|
| 601 |
+
if (mantissa != 0U) {
|
| 602 |
+
unsigned int msb;
|
| 603 |
+
exponent = 0x71U;
|
| 604 |
+
do {
|
| 605 |
+
msb = (mantissa & 0x400000U);
|
| 606 |
+
mantissa <<= 1U; /* normalize */
|
| 607 |
+
--exponent;
|
| 608 |
+
} while (msb == 0U);
|
| 609 |
+
mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
|
| 610 |
+
}
|
| 611 |
+
} else {
|
| 612 |
+
exponent += 0x70U;
|
| 613 |
+
}
|
| 614 |
+
unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
|
| 615 |
+
#if defined(__CUDACC__)
|
| 616 |
+
(void)memcpy(&f, &u, sizeof(u));
|
| 617 |
+
#else
|
| 618 |
+
(void)std::memcpy(&f, &u, sizeof(u));
|
| 619 |
+
#endif
|
| 620 |
+
return f;
|
| 621 |
+
}
|
| 622 |
+
#endif /* !defined(__CUDACC_RTC__) */
|
| 623 |
+
|
| 624 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a)
|
| 625 |
+
{
|
| 626 |
+
float val;
|
| 627 |
+
#if defined(__CUDA_ARCH__)
|
| 628 |
+
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a)));
|
| 629 |
+
#else
|
| 630 |
+
val = __internal_half2float(static_cast<__half_raw>(a).x);
|
| 631 |
+
#endif
|
| 632 |
+
return val;
|
| 633 |
+
}
|
| 634 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a)
|
| 635 |
+
{
|
| 636 |
+
float val;
|
| 637 |
+
#if defined(__CUDA_ARCH__)
|
| 638 |
+
asm("{.reg .f16 low,high;\n"
|
| 639 |
+
" mov.b32 {low,high},%1;\n"
|
| 640 |
+
" cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
|
| 641 |
+
#else
|
| 642 |
+
val = __internal_half2float(static_cast<__half2_raw>(a).x);
|
| 643 |
+
#endif
|
| 644 |
+
return val;
|
| 645 |
+
}
|
| 646 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a)
|
| 647 |
+
{
|
| 648 |
+
float val;
|
| 649 |
+
#if defined(__CUDA_ARCH__)
|
| 650 |
+
asm("{.reg .f16 low,high;\n"
|
| 651 |
+
" mov.b32 {low,high},%1;\n"
|
| 652 |
+
" cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
|
| 653 |
+
#else
|
| 654 |
+
val = __internal_half2float(static_cast<__half2_raw>(a).y);
|
| 655 |
+
#endif
|
| 656 |
+
return val;
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
/* Intrinsic functions only available to nvcc compilers */
|
| 660 |
+
#if defined(__CUDACC__)
|
| 661 |
+
|
| 662 |
+
/* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */
|
| 663 |
+
__VECTOR_FUNCTIONS_DECL__ __half2 make_half2(const __half x, const __half y)
|
| 664 |
+
{
|
| 665 |
+
__half2 t; t.x = x; t.y = y; return t;
|
| 666 |
+
}
|
| 667 |
+
#undef __VECTOR_FUNCTIONS_DECL__
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
/* Definitions of intrinsics */
|
| 671 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a)
|
| 672 |
+
{
|
| 673 |
+
const __half2 val = __floats2half2_rn(a.x, a.y);
|
| 674 |
+
return val;
|
| 675 |
+
}
|
| 676 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a)
|
| 677 |
+
{
|
| 678 |
+
float hi_float;
|
| 679 |
+
float lo_float;
|
| 680 |
+
#if defined(__CUDA_ARCH__)
|
| 681 |
+
asm("{.reg .f16 low,high;\n"
|
| 682 |
+
" mov.b32 {low,high},%1;\n"
|
| 683 |
+
" cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a)));
|
| 684 |
+
|
| 685 |
+
asm("{.reg .f16 low,high;\n"
|
| 686 |
+
" mov.b32 {low,high},%1;\n"
|
| 687 |
+
" cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a)));
|
| 688 |
+
#else
|
| 689 |
+
lo_float = __internal_half2float(((__half2_raw)a).x);
|
| 690 |
+
hi_float = __internal_half2float(((__half2_raw)a).y);
|
| 691 |
+
#endif
|
| 692 |
+
return make_float2(lo_float, hi_float);
|
| 693 |
+
}
|
| 694 |
+
__CUDA_FP16_DECL__ int __half2int_rn(const __half h)
|
| 695 |
+
{
|
| 696 |
+
int i;
|
| 697 |
+
asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 698 |
+
return i;
|
| 699 |
+
}
|
| 700 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h)
|
| 701 |
+
{
|
| 702 |
+
int i;
|
| 703 |
+
#if defined __CUDA_ARCH__
|
| 704 |
+
asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 705 |
+
#else
|
| 706 |
+
const float f = __half2float(h);
|
| 707 |
+
i = static_cast<int>(f);
|
| 708 |
+
const int max_val = (int)0x7fffffffU;
|
| 709 |
+
const int min_val = (int)0x80000000U;
|
| 710 |
+
// saturation fixup
|
| 711 |
+
if (f != f) {
|
| 712 |
+
// NaN
|
| 713 |
+
i = 0;
|
| 714 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 715 |
+
// saturate maximum
|
| 716 |
+
i = max_val;
|
| 717 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 718 |
+
// saturate minimum
|
| 719 |
+
i = min_val;
|
| 720 |
+
}
|
| 721 |
+
#endif
|
| 722 |
+
return i;
|
| 723 |
+
}
|
| 724 |
+
__CUDA_FP16_DECL__ int __half2int_rd(const __half h)
|
| 725 |
+
{
|
| 726 |
+
int i;
|
| 727 |
+
asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 728 |
+
return i;
|
| 729 |
+
}
|
| 730 |
+
__CUDA_FP16_DECL__ int __half2int_ru(const __half h)
|
| 731 |
+
{
|
| 732 |
+
int i;
|
| 733 |
+
asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 734 |
+
return i;
|
| 735 |
+
}
|
| 736 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i)
|
| 737 |
+
{
|
| 738 |
+
__half h;
|
| 739 |
+
#if defined(__CUDA_ARCH__)
|
| 740 |
+
asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 741 |
+
#else
|
| 742 |
+
// double-rounding is not a problem here: if integer
|
| 743 |
+
// has more than 24 bits, it is already too large to
|
| 744 |
+
// be represented in half precision, and result will
|
| 745 |
+
// be infinity.
|
| 746 |
+
const float f = static_cast<float>(i);
|
| 747 |
+
h = __float2half_rn(f);
|
| 748 |
+
#endif
|
| 749 |
+
return h;
|
| 750 |
+
}
|
| 751 |
+
__CUDA_FP16_DECL__ __half __int2half_rz(const int i)
|
| 752 |
+
{
|
| 753 |
+
__half h;
|
| 754 |
+
asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 755 |
+
return h;
|
| 756 |
+
}
|
| 757 |
+
__CUDA_FP16_DECL__ __half __int2half_rd(const int i)
|
| 758 |
+
{
|
| 759 |
+
__half h;
|
| 760 |
+
asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 761 |
+
return h;
|
| 762 |
+
}
|
| 763 |
+
__CUDA_FP16_DECL__ __half __int2half_ru(const int i)
|
| 764 |
+
{
|
| 765 |
+
__half h;
|
| 766 |
+
asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 767 |
+
return h;
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
__CUDA_FP16_DECL__ short int __half2short_rn(const __half h)
|
| 771 |
+
{
|
| 772 |
+
short int i;
|
| 773 |
+
asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 774 |
+
return i;
|
| 775 |
+
}
|
| 776 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h)
|
| 777 |
+
{
|
| 778 |
+
short int i;
|
| 779 |
+
#if defined __CUDA_ARCH__
|
| 780 |
+
asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 781 |
+
#else
|
| 782 |
+
const float f = __half2float(h);
|
| 783 |
+
i = static_cast<short int>(f);
|
| 784 |
+
const short int max_val = (short int)0x7fffU;
|
| 785 |
+
const short int min_val = (short int)0x8000U;
|
| 786 |
+
// saturation fixup
|
| 787 |
+
if (f != f) {
|
| 788 |
+
// NaN
|
| 789 |
+
i = 0;
|
| 790 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 791 |
+
// saturate maximum
|
| 792 |
+
i = max_val;
|
| 793 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 794 |
+
// saturate minimum
|
| 795 |
+
i = min_val;
|
| 796 |
+
}
|
| 797 |
+
#endif
|
| 798 |
+
return i;
|
| 799 |
+
}
|
| 800 |
+
__CUDA_FP16_DECL__ short int __half2short_rd(const __half h)
|
| 801 |
+
{
|
| 802 |
+
short int i;
|
| 803 |
+
asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 804 |
+
return i;
|
| 805 |
+
}
|
| 806 |
+
__CUDA_FP16_DECL__ short int __half2short_ru(const __half h)
|
| 807 |
+
{
|
| 808 |
+
short int i;
|
| 809 |
+
asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 810 |
+
return i;
|
| 811 |
+
}
|
| 812 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i)
|
| 813 |
+
{
|
| 814 |
+
__half h;
|
| 815 |
+
#if defined __CUDA_ARCH__
|
| 816 |
+
asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 817 |
+
#else
|
| 818 |
+
const float f = static_cast<float>(i);
|
| 819 |
+
h = __float2half_rn(f);
|
| 820 |
+
#endif
|
| 821 |
+
return h;
|
| 822 |
+
}
|
| 823 |
+
__CUDA_FP16_DECL__ __half __short2half_rz(const short int i)
|
| 824 |
+
{
|
| 825 |
+
__half h;
|
| 826 |
+
asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 827 |
+
return h;
|
| 828 |
+
}
|
| 829 |
+
__CUDA_FP16_DECL__ __half __short2half_rd(const short int i)
|
| 830 |
+
{
|
| 831 |
+
__half h;
|
| 832 |
+
asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 833 |
+
return h;
|
| 834 |
+
}
|
| 835 |
+
__CUDA_FP16_DECL__ __half __short2half_ru(const short int i)
|
| 836 |
+
{
|
| 837 |
+
__half h;
|
| 838 |
+
asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 839 |
+
return h;
|
| 840 |
+
}
|
| 841 |
+
|
| 842 |
+
__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h)
|
| 843 |
+
{
|
| 844 |
+
unsigned int i;
|
| 845 |
+
asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 846 |
+
return i;
|
| 847 |
+
}
|
| 848 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h)
|
| 849 |
+
{
|
| 850 |
+
unsigned int i;
|
| 851 |
+
#if defined __CUDA_ARCH__
|
| 852 |
+
asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 853 |
+
#else
|
| 854 |
+
const float f = __half2float(h);
|
| 855 |
+
i = static_cast<unsigned int>(f);
|
| 856 |
+
const unsigned int max_val = 0xffffffffU;
|
| 857 |
+
const unsigned int min_val = 0U;
|
| 858 |
+
// saturation fixup
|
| 859 |
+
if (f != f) {
|
| 860 |
+
// NaN
|
| 861 |
+
i = 0U;
|
| 862 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 863 |
+
// saturate maximum
|
| 864 |
+
i = max_val;
|
| 865 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 866 |
+
// saturate minimum
|
| 867 |
+
i = min_val;
|
| 868 |
+
}
|
| 869 |
+
#endif
|
| 870 |
+
return i;
|
| 871 |
+
}
|
| 872 |
+
__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h)
|
| 873 |
+
{
|
| 874 |
+
unsigned int i;
|
| 875 |
+
asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 876 |
+
return i;
|
| 877 |
+
}
|
| 878 |
+
__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h)
|
| 879 |
+
{
|
| 880 |
+
unsigned int i;
|
| 881 |
+
asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
| 882 |
+
return i;
|
| 883 |
+
}
|
| 884 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i)
|
| 885 |
+
{
|
| 886 |
+
__half h;
|
| 887 |
+
#if defined __CUDA_ARCH__
|
| 888 |
+
asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 889 |
+
#else
|
| 890 |
+
// double-rounding is not a problem here: if integer
|
| 891 |
+
// has more than 24 bits, it is already too large to
|
| 892 |
+
// be represented in half precision, and result will
|
| 893 |
+
// be infinity.
|
| 894 |
+
const float f = static_cast<float>(i);
|
| 895 |
+
h = __float2half_rn(f);
|
| 896 |
+
#endif
|
| 897 |
+
return h;
|
| 898 |
+
}
|
| 899 |
+
__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i)
|
| 900 |
+
{
|
| 901 |
+
__half h;
|
| 902 |
+
asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 903 |
+
return h;
|
| 904 |
+
}
|
| 905 |
+
__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i)
|
| 906 |
+
{
|
| 907 |
+
__half h;
|
| 908 |
+
asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 909 |
+
return h;
|
| 910 |
+
}
|
| 911 |
+
__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i)
|
| 912 |
+
{
|
| 913 |
+
__half h;
|
| 914 |
+
asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
| 915 |
+
return h;
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h)
|
| 919 |
+
{
|
| 920 |
+
unsigned short int i;
|
| 921 |
+
asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 922 |
+
return i;
|
| 923 |
+
}
|
| 924 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h)
|
| 925 |
+
{
|
| 926 |
+
unsigned short int i;
|
| 927 |
+
#if defined __CUDA_ARCH__
|
| 928 |
+
asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 929 |
+
#else
|
| 930 |
+
const float f = __half2float(h);
|
| 931 |
+
i = static_cast<unsigned short int>(f);
|
| 932 |
+
const unsigned short int max_val = 0xffffU;
|
| 933 |
+
const unsigned short int min_val = 0U;
|
| 934 |
+
// saturation fixup
|
| 935 |
+
if (f != f) {
|
| 936 |
+
// NaN
|
| 937 |
+
i = 0U;
|
| 938 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 939 |
+
// saturate maximum
|
| 940 |
+
i = max_val;
|
| 941 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 942 |
+
// saturate minimum
|
| 943 |
+
i = min_val;
|
| 944 |
+
}
|
| 945 |
+
#endif
|
| 946 |
+
return i;
|
| 947 |
+
}
|
| 948 |
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h)
|
| 949 |
+
{
|
| 950 |
+
unsigned short int i;
|
| 951 |
+
asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 952 |
+
return i;
|
| 953 |
+
}
|
| 954 |
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h)
|
| 955 |
+
{
|
| 956 |
+
unsigned short int i;
|
| 957 |
+
asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
| 958 |
+
return i;
|
| 959 |
+
}
|
| 960 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i)
|
| 961 |
+
{
|
| 962 |
+
__half h;
|
| 963 |
+
#if defined __CUDA_ARCH__
|
| 964 |
+
asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 965 |
+
#else
|
| 966 |
+
const float f = static_cast<float>(i);
|
| 967 |
+
h = __float2half_rn(f);
|
| 968 |
+
#endif
|
| 969 |
+
return h;
|
| 970 |
+
}
|
| 971 |
+
__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i)
|
| 972 |
+
{
|
| 973 |
+
__half h;
|
| 974 |
+
asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 975 |
+
return h;
|
| 976 |
+
}
|
| 977 |
+
__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i)
|
| 978 |
+
{
|
| 979 |
+
__half h;
|
| 980 |
+
asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 981 |
+
return h;
|
| 982 |
+
}
|
| 983 |
+
__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i)
|
| 984 |
+
{
|
| 985 |
+
__half h;
|
| 986 |
+
asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
| 987 |
+
return h;
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h)
|
| 991 |
+
{
|
| 992 |
+
unsigned long long int i;
|
| 993 |
+
asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 994 |
+
return i;
|
| 995 |
+
}
|
| 996 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h)
|
| 997 |
+
{
|
| 998 |
+
unsigned long long int i;
|
| 999 |
+
#if defined __CUDA_ARCH__
|
| 1000 |
+
asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1001 |
+
#else
|
| 1002 |
+
const float f = __half2float(h);
|
| 1003 |
+
i = static_cast<unsigned long long int>(f);
|
| 1004 |
+
const unsigned long long int max_val = 0xffffffffffffffffULL;
|
| 1005 |
+
const unsigned long long int min_val = 0ULL;
|
| 1006 |
+
// saturation fixup
|
| 1007 |
+
if (f != f) {
|
| 1008 |
+
// NaN
|
| 1009 |
+
i = 0x8000000000000000ULL;
|
| 1010 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 1011 |
+
// saturate maximum
|
| 1012 |
+
i = max_val;
|
| 1013 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 1014 |
+
// saturate minimum
|
| 1015 |
+
i = min_val;
|
| 1016 |
+
}
|
| 1017 |
+
#endif
|
| 1018 |
+
return i;
|
| 1019 |
+
}
|
| 1020 |
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h)
|
| 1021 |
+
{
|
| 1022 |
+
unsigned long long int i;
|
| 1023 |
+
asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1024 |
+
return i;
|
| 1025 |
+
}
|
| 1026 |
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h)
|
| 1027 |
+
{
|
| 1028 |
+
unsigned long long int i;
|
| 1029 |
+
asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1030 |
+
return i;
|
| 1031 |
+
}
|
| 1032 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i)
|
| 1033 |
+
{
|
| 1034 |
+
__half h;
|
| 1035 |
+
#if defined(__CUDA_ARCH__)
|
| 1036 |
+
asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1037 |
+
#else
|
| 1038 |
+
// double-rounding is not a problem here: if integer
|
| 1039 |
+
// has more than 24 bits, it is already too large to
|
| 1040 |
+
// be represented in half precision, and result will
|
| 1041 |
+
// be infinity.
|
| 1042 |
+
const float f = static_cast<float>(i);
|
| 1043 |
+
h = __float2half_rn(f);
|
| 1044 |
+
#endif
|
| 1045 |
+
return h;
|
| 1046 |
+
}
|
| 1047 |
+
__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i)
|
| 1048 |
+
{
|
| 1049 |
+
__half h;
|
| 1050 |
+
asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1051 |
+
return h;
|
| 1052 |
+
}
|
| 1053 |
+
__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i)
|
| 1054 |
+
{
|
| 1055 |
+
__half h;
|
| 1056 |
+
asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1057 |
+
return h;
|
| 1058 |
+
}
|
| 1059 |
+
__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i)
|
| 1060 |
+
{
|
| 1061 |
+
__half h;
|
| 1062 |
+
asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1063 |
+
return h;
|
| 1064 |
+
}
|
| 1065 |
+
|
| 1066 |
+
__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h)
|
| 1067 |
+
{
|
| 1068 |
+
long long int i;
|
| 1069 |
+
asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1070 |
+
return i;
|
| 1071 |
+
}
|
| 1072 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h)
|
| 1073 |
+
{
|
| 1074 |
+
long long int i;
|
| 1075 |
+
#if defined __CUDA_ARCH__
|
| 1076 |
+
asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1077 |
+
#else
|
| 1078 |
+
const float f = __half2float(h);
|
| 1079 |
+
i = static_cast<long long int>(f);
|
| 1080 |
+
const long long int max_val = (long long int)0x7fffffffffffffffULL;
|
| 1081 |
+
const long long int min_val = (long long int)0x8000000000000000ULL;
|
| 1082 |
+
// saturation fixup
|
| 1083 |
+
if (f != f) {
|
| 1084 |
+
// NaN
|
| 1085 |
+
i = min_val;
|
| 1086 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 1087 |
+
// saturate maximum
|
| 1088 |
+
i = max_val;
|
| 1089 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 1090 |
+
// saturate minimum
|
| 1091 |
+
i = min_val;
|
| 1092 |
+
}
|
| 1093 |
+
#endif
|
| 1094 |
+
return i;
|
| 1095 |
+
}
|
| 1096 |
+
__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h)
|
| 1097 |
+
{
|
| 1098 |
+
long long int i;
|
| 1099 |
+
asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1100 |
+
return i;
|
| 1101 |
+
}
|
| 1102 |
+
__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h)
|
| 1103 |
+
{
|
| 1104 |
+
long long int i;
|
| 1105 |
+
asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
| 1106 |
+
return i;
|
| 1107 |
+
}
|
| 1108 |
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i)
|
| 1109 |
+
{
|
| 1110 |
+
__half h;
|
| 1111 |
+
#if defined(__CUDA_ARCH__)
|
| 1112 |
+
asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1113 |
+
#else
|
| 1114 |
+
// double-rounding is not a problem here: if integer
|
| 1115 |
+
// has more than 24 bits, it is already too large to
|
| 1116 |
+
// be represented in half precision, and result will
|
| 1117 |
+
// be infinity.
|
| 1118 |
+
const float f = static_cast<float>(i);
|
| 1119 |
+
h = __float2half_rn(f);
|
| 1120 |
+
#endif
|
| 1121 |
+
return h;
|
| 1122 |
+
}
|
| 1123 |
+
__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i)
|
| 1124 |
+
{
|
| 1125 |
+
__half h;
|
| 1126 |
+
asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1127 |
+
return h;
|
| 1128 |
+
}
|
| 1129 |
+
__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i)
|
| 1130 |
+
{
|
| 1131 |
+
__half h;
|
| 1132 |
+
asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1133 |
+
return h;
|
| 1134 |
+
}
|
| 1135 |
+
__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i)
|
| 1136 |
+
{
|
| 1137 |
+
__half h;
|
| 1138 |
+
asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
| 1139 |
+
return h;
|
| 1140 |
+
}
|
| 1141 |
+
|
| 1142 |
+
__CUDA_FP16_DECL__ __half htrunc(const __half h)
|
| 1143 |
+
{
|
| 1144 |
+
__half r;
|
| 1145 |
+
asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
| 1146 |
+
return r;
|
| 1147 |
+
}
|
| 1148 |
+
__CUDA_FP16_DECL__ __half hceil(const __half h)
|
| 1149 |
+
{
|
| 1150 |
+
__half r;
|
| 1151 |
+
asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
| 1152 |
+
return r;
|
| 1153 |
+
}
|
| 1154 |
+
__CUDA_FP16_DECL__ __half hfloor(const __half h)
|
| 1155 |
+
{
|
| 1156 |
+
__half r;
|
| 1157 |
+
asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
| 1158 |
+
return r;
|
| 1159 |
+
}
|
| 1160 |
+
__CUDA_FP16_DECL__ __half hrint(const __half h)
|
| 1161 |
+
{
|
| 1162 |
+
__half r;
|
| 1163 |
+
asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
| 1164 |
+
return r;
|
| 1165 |
+
}
|
| 1166 |
+
|
| 1167 |
+
__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h)
|
| 1168 |
+
{
|
| 1169 |
+
__half2 val;
|
| 1170 |
+
asm("{.reg .f16 low,high;\n"
|
| 1171 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1172 |
+
" cvt.rzi.f16.f16 low, low;\n"
|
| 1173 |
+
" cvt.rzi.f16.f16 high, high;\n"
|
| 1174 |
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
| 1175 |
+
return val;
|
| 1176 |
+
}
|
| 1177 |
+
__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h)
|
| 1178 |
+
{
|
| 1179 |
+
__half2 val;
|
| 1180 |
+
asm("{.reg .f16 low,high;\n"
|
| 1181 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1182 |
+
" cvt.rpi.f16.f16 low, low;\n"
|
| 1183 |
+
" cvt.rpi.f16.f16 high, high;\n"
|
| 1184 |
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
| 1185 |
+
return val;
|
| 1186 |
+
}
|
| 1187 |
+
__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h)
|
| 1188 |
+
{
|
| 1189 |
+
__half2 val;
|
| 1190 |
+
asm("{.reg .f16 low,high;\n"
|
| 1191 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1192 |
+
" cvt.rmi.f16.f16 low, low;\n"
|
| 1193 |
+
" cvt.rmi.f16.f16 high, high;\n"
|
| 1194 |
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
| 1195 |
+
return val;
|
| 1196 |
+
}
|
| 1197 |
+
__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h)
|
| 1198 |
+
{
|
| 1199 |
+
__half2 val;
|
| 1200 |
+
asm("{.reg .f16 low,high;\n"
|
| 1201 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1202 |
+
" cvt.rni.f16.f16 low, low;\n"
|
| 1203 |
+
" cvt.rni.f16.f16 high, high;\n"
|
| 1204 |
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
| 1205 |
+
return val;
|
| 1206 |
+
}
|
| 1207 |
+
__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b)
|
| 1208 |
+
{
|
| 1209 |
+
__half2 val;
|
| 1210 |
+
asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
|
| 1211 |
+
" mov.b32 {alow,ahigh}, %1;\n"
|
| 1212 |
+
" mov.b32 {blow,bhigh}, %2;\n"
|
| 1213 |
+
" mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
|
| 1214 |
+
return val;
|
| 1215 |
+
}
|
| 1216 |
+
__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b)
|
| 1217 |
+
{
|
| 1218 |
+
__half2 val;
|
| 1219 |
+
asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
|
| 1220 |
+
" mov.b32 {alow,ahigh}, %1;\n"
|
| 1221 |
+
" mov.b32 {blow,bhigh}, %2;\n"
|
| 1222 |
+
" mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
|
| 1223 |
+
return val;
|
| 1224 |
+
}
|
| 1225 |
+
__CUDA_FP16_DECL__ __half __low2half(const __half2 a)
|
| 1226 |
+
{
|
| 1227 |
+
__half ret;
|
| 1228 |
+
asm("{.reg .f16 low,high;\n"
|
| 1229 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1230 |
+
" mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
|
| 1231 |
+
return ret;
|
| 1232 |
+
}
|
| 1233 |
+
__CUDA_FP16_DECL__ int __hisinf(const __half a)
|
| 1234 |
+
{
|
| 1235 |
+
int retval;
|
| 1236 |
+
if (__HALF_TO_CUS(a) == 0xFC00U) {
|
| 1237 |
+
retval = -1;
|
| 1238 |
+
} else if (__HALF_TO_CUS(a) == 0x7C00U) {
|
| 1239 |
+
retval = 1;
|
| 1240 |
+
} else {
|
| 1241 |
+
retval = 0;
|
| 1242 |
+
}
|
| 1243 |
+
return retval;
|
| 1244 |
+
}
|
| 1245 |
+
__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a)
|
| 1246 |
+
{
|
| 1247 |
+
__half2 val;
|
| 1248 |
+
asm("{.reg .f16 low,high;\n"
|
| 1249 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1250 |
+
" mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 1251 |
+
return val;
|
| 1252 |
+
}
|
| 1253 |
+
__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a)
|
| 1254 |
+
{
|
| 1255 |
+
__half2 val;
|
| 1256 |
+
asm("{.reg .f16 low,high;\n"
|
| 1257 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1258 |
+
" mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 1259 |
+
return val;
|
| 1260 |
+
}
|
| 1261 |
+
__CUDA_FP16_DECL__ __half __high2half(const __half2 a)
|
| 1262 |
+
{
|
| 1263 |
+
__half ret;
|
| 1264 |
+
asm("{.reg .f16 low,high;\n"
|
| 1265 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1266 |
+
" mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
|
| 1267 |
+
return ret;
|
| 1268 |
+
}
|
| 1269 |
+
__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b)
|
| 1270 |
+
{
|
| 1271 |
+
__half2 val;
|
| 1272 |
+
asm("{ mov.b32 %0, {%1,%2};}\n"
|
| 1273 |
+
: "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b)));
|
| 1274 |
+
return val;
|
| 1275 |
+
}
|
| 1276 |
+
__CUDA_FP16_DECL__ __half2 __half2half2(const __half a)
|
| 1277 |
+
{
|
| 1278 |
+
__half2 val;
|
| 1279 |
+
asm("{ mov.b32 %0, {%1,%1};}\n"
|
| 1280 |
+
: "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)));
|
| 1281 |
+
return val;
|
| 1282 |
+
}
|
| 1283 |
+
__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a)
|
| 1284 |
+
{
|
| 1285 |
+
__half2 val;
|
| 1286 |
+
asm("{.reg .f16 low,high;\n"
|
| 1287 |
+
" mov.b32 {low,high}, %1;\n"
|
| 1288 |
+
" mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 1289 |
+
return val;
|
| 1290 |
+
}
|
| 1291 |
+
__CUDA_FP16_DECL__ short int __half_as_short(const __half h)
|
| 1292 |
+
{
|
| 1293 |
+
return static_cast<short int>(__HALF_TO_CUS(h));
|
| 1294 |
+
}
|
| 1295 |
+
__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h)
|
| 1296 |
+
{
|
| 1297 |
+
return __HALF_TO_CUS(h);
|
| 1298 |
+
}
|
| 1299 |
+
__CUDA_FP16_DECL__ __half __short_as_half(const short int i)
|
| 1300 |
+
{
|
| 1301 |
+
__half h;
|
| 1302 |
+
__HALF_TO_US(h) = static_cast<unsigned short int>(i);
|
| 1303 |
+
return h;
|
| 1304 |
+
}
|
| 1305 |
+
__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i)
|
| 1306 |
+
{
|
| 1307 |
+
__half h;
|
| 1308 |
+
__HALF_TO_US(h) = i;
|
| 1309 |
+
return h;
|
| 1310 |
+
}
|
| 1311 |
+
|
| 1312 |
+
#if __CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__)
|
| 1313 |
+
/******************************************************************************
|
| 1314 |
+
* __half, __half2 warp shuffle *
|
| 1315 |
+
******************************************************************************/
|
| 1316 |
+
#define __SHUFFLE_HALF2_MACRO(name) /* do */ {\
|
| 1317 |
+
__half2 r; \
|
| 1318 |
+
asm volatile ("{"#name" %0,%1,%2,%3;\n}" \
|
| 1319 |
+
:"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \
|
| 1320 |
+
return r; \
|
| 1321 |
+
} /* while(0) */
|
| 1322 |
+
|
| 1323 |
+
#define __SHUFFLE_SYNC_HALF2_MACRO(name) /* do */ {\
|
| 1324 |
+
__half2 r; \
|
| 1325 |
+
asm volatile ("{"#name" %0,%1,%2,%3,%4;\n}" \
|
| 1326 |
+
:"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \
|
| 1327 |
+
return r; \
|
| 1328 |
+
} /* while(0) */
|
| 1329 |
+
|
| 1330 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 1331 |
+
|
| 1332 |
+
__CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width)
|
| 1333 |
+
{
|
| 1334 |
+
unsigned int warp_size;
|
| 1335 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1336 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1337 |
+
__SHUFFLE_HALF2_MACRO(shfl.idx.b32)
|
| 1338 |
+
}
|
| 1339 |
+
__CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width)
|
| 1340 |
+
{
|
| 1341 |
+
unsigned int warp_size;
|
| 1342 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1343 |
+
const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
|
| 1344 |
+
__SHUFFLE_HALF2_MACRO(shfl.up.b32)
|
| 1345 |
+
}
|
| 1346 |
+
__CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width)
|
| 1347 |
+
{
|
| 1348 |
+
unsigned int warp_size;
|
| 1349 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1350 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1351 |
+
__SHUFFLE_HALF2_MACRO(shfl.down.b32)
|
| 1352 |
+
}
|
| 1353 |
+
__CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width)
|
| 1354 |
+
{
|
| 1355 |
+
unsigned int warp_size;
|
| 1356 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1357 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1358 |
+
__SHUFFLE_HALF2_MACRO(shfl.bfly.b32)
|
| 1359 |
+
}
|
| 1360 |
+
|
| 1361 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
|
| 1362 |
+
|
| 1363 |
+
__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width)
|
| 1364 |
+
{
|
| 1365 |
+
unsigned int warp_size;
|
| 1366 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1367 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1368 |
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32)
|
| 1369 |
+
}
|
| 1370 |
+
__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width)
|
| 1371 |
+
{
|
| 1372 |
+
unsigned int warp_size;
|
| 1373 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1374 |
+
const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
|
| 1375 |
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32)
|
| 1376 |
+
}
|
| 1377 |
+
__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width)
|
| 1378 |
+
{
|
| 1379 |
+
unsigned int warp_size;
|
| 1380 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1381 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1382 |
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32)
|
| 1383 |
+
}
|
| 1384 |
+
__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width)
|
| 1385 |
+
{
|
| 1386 |
+
unsigned int warp_size;
|
| 1387 |
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
| 1388 |
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
| 1389 |
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32)
|
| 1390 |
+
}
|
| 1391 |
+
|
| 1392 |
+
#undef __SHUFFLE_HALF2_MACRO
|
| 1393 |
+
#undef __SHUFFLE_SYNC_HALF2_MACRO
|
| 1394 |
+
|
| 1395 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 1396 |
+
|
| 1397 |
+
__CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width)
|
| 1398 |
+
{
|
| 1399 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1400 |
+
const __half2 temp2 = __shfl(temp1, delta, width);
|
| 1401 |
+
return __low2half(temp2);
|
| 1402 |
+
}
|
| 1403 |
+
__CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width)
|
| 1404 |
+
{
|
| 1405 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1406 |
+
const __half2 temp2 = __shfl_up(temp1, delta, width);
|
| 1407 |
+
return __low2half(temp2);
|
| 1408 |
+
}
|
| 1409 |
+
__CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width)
|
| 1410 |
+
{
|
| 1411 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1412 |
+
const __half2 temp2 = __shfl_down(temp1, delta, width);
|
| 1413 |
+
return __low2half(temp2);
|
| 1414 |
+
}
|
| 1415 |
+
__CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width)
|
| 1416 |
+
{
|
| 1417 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1418 |
+
const __half2 temp2 = __shfl_xor(temp1, delta, width);
|
| 1419 |
+
return __low2half(temp2);
|
| 1420 |
+
}
|
| 1421 |
+
|
| 1422 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
|
| 1423 |
+
|
| 1424 |
+
__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width)
|
| 1425 |
+
{
|
| 1426 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1427 |
+
const __half2 temp2 = __shfl_sync(mask, temp1, delta, width);
|
| 1428 |
+
return __low2half(temp2);
|
| 1429 |
+
}
|
| 1430 |
+
__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width)
|
| 1431 |
+
{
|
| 1432 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1433 |
+
const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width);
|
| 1434 |
+
return __low2half(temp2);
|
| 1435 |
+
}
|
| 1436 |
+
__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width)
|
| 1437 |
+
{
|
| 1438 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1439 |
+
const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width);
|
| 1440 |
+
return __low2half(temp2);
|
| 1441 |
+
}
|
| 1442 |
+
__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width)
|
| 1443 |
+
{
|
| 1444 |
+
const __half2 temp1 = __halves2half2(var, var);
|
| 1445 |
+
const __half2 temp2 = __shfl_xor_sync(mask, temp1, delta, width);
|
| 1446 |
+
return __low2half(temp2);
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
#endif /*__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__)*/
|
| 1450 |
+
/******************************************************************************
|
| 1451 |
+
* __half and __half2 __ldg,__ldcg,__ldca,__ldcs *
|
| 1452 |
+
******************************************************************************/
|
| 1453 |
+
|
| 1454 |
+
#if defined(__cplusplus) && (__CUDA_ARCH__ >= 320 || !defined(__CUDA_ARCH__))
|
| 1455 |
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
| 1456 |
+
#define __LDG_PTR "l"
|
| 1457 |
+
#else
|
| 1458 |
+
#define __LDG_PTR "r"
|
| 1459 |
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
| 1460 |
+
__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr)
|
| 1461 |
+
{
|
| 1462 |
+
__half2 ret;
|
| 1463 |
+
asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
| 1464 |
+
return ret;
|
| 1465 |
+
}
|
| 1466 |
+
__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr)
|
| 1467 |
+
{
|
| 1468 |
+
__half ret;
|
| 1469 |
+
asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
| 1470 |
+
return ret;
|
| 1471 |
+
}
|
| 1472 |
+
__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr)
|
| 1473 |
+
{
|
| 1474 |
+
__half2 ret;
|
| 1475 |
+
asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
| 1476 |
+
return ret;
|
| 1477 |
+
}
|
| 1478 |
+
__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr)
|
| 1479 |
+
{
|
| 1480 |
+
__half ret;
|
| 1481 |
+
asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
| 1482 |
+
return ret;
|
| 1483 |
+
}
|
| 1484 |
+
__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr)
|
| 1485 |
+
{
|
| 1486 |
+
__half2 ret;
|
| 1487 |
+
asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
| 1488 |
+
return ret;
|
| 1489 |
+
}
|
| 1490 |
+
__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr)
|
| 1491 |
+
{
|
| 1492 |
+
__half ret;
|
| 1493 |
+
asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
| 1494 |
+
return ret;
|
| 1495 |
+
}
|
| 1496 |
+
__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr)
|
| 1497 |
+
{
|
| 1498 |
+
__half2 ret;
|
| 1499 |
+
asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
| 1500 |
+
return ret;
|
| 1501 |
+
}
|
| 1502 |
+
__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr)
|
| 1503 |
+
{
|
| 1504 |
+
__half ret;
|
| 1505 |
+
asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
| 1506 |
+
return ret;
|
| 1507 |
+
}
|
| 1508 |
+
__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr)
|
| 1509 |
+
{
|
| 1510 |
+
__half2 ret;
|
| 1511 |
+
asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
|
| 1512 |
+
return ret;
|
| 1513 |
+
}
|
| 1514 |
+
__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr)
|
| 1515 |
+
{
|
| 1516 |
+
__half ret;
|
| 1517 |
+
asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
|
| 1518 |
+
return ret;
|
| 1519 |
+
}
|
| 1520 |
+
__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr)
|
| 1521 |
+
{
|
| 1522 |
+
__half2 ret;
|
| 1523 |
+
asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
|
| 1524 |
+
return ret;
|
| 1525 |
+
}
|
| 1526 |
+
__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr)
|
| 1527 |
+
{
|
| 1528 |
+
__half ret;
|
| 1529 |
+
asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
|
| 1530 |
+
return ret;
|
| 1531 |
+
}
|
| 1532 |
+
__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value)
|
| 1533 |
+
{
|
| 1534 |
+
asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
| 1535 |
+
}
|
| 1536 |
+
__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value)
|
| 1537 |
+
{
|
| 1538 |
+
asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
| 1539 |
+
}
|
| 1540 |
+
__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value)
|
| 1541 |
+
{
|
| 1542 |
+
asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
| 1543 |
+
}
|
| 1544 |
+
__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value)
|
| 1545 |
+
{
|
| 1546 |
+
asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
| 1547 |
+
}
|
| 1548 |
+
__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value)
|
| 1549 |
+
{
|
| 1550 |
+
asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
| 1551 |
+
}
|
| 1552 |
+
__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value)
|
| 1553 |
+
{
|
| 1554 |
+
asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
| 1555 |
+
}
|
| 1556 |
+
__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value)
|
| 1557 |
+
{
|
| 1558 |
+
asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
| 1559 |
+
}
|
| 1560 |
+
__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value)
|
| 1561 |
+
{
|
| 1562 |
+
asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
| 1563 |
+
}
|
| 1564 |
+
#undef __LDG_PTR
|
| 1565 |
+
#endif /*defined(__cplusplus) && (__CUDA_ARCH__ >= 320 || !defined(__CUDA_ARCH__))*/
|
| 1566 |
+
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
|
| 1567 |
+
/******************************************************************************
|
| 1568 |
+
* __half2 comparison *
|
| 1569 |
+
******************************************************************************/
|
| 1570 |
+
#define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\
|
| 1571 |
+
__half2 val; \
|
| 1572 |
+
asm( "{ "#name".f16x2.f16x2 %0,%1,%2;\n}" \
|
| 1573 |
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
| 1574 |
+
return val; \
|
| 1575 |
+
} /* while(0) */
|
| 1576 |
+
__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b)
|
| 1577 |
+
{
|
| 1578 |
+
__COMPARISON_OP_HALF2_MACRO(set.eq)
|
| 1579 |
+
}
|
| 1580 |
+
__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b)
|
| 1581 |
+
{
|
| 1582 |
+
__COMPARISON_OP_HALF2_MACRO(set.ne)
|
| 1583 |
+
}
|
| 1584 |
+
__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b)
|
| 1585 |
+
{
|
| 1586 |
+
__COMPARISON_OP_HALF2_MACRO(set.le)
|
| 1587 |
+
}
|
| 1588 |
+
__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b)
|
| 1589 |
+
{
|
| 1590 |
+
__COMPARISON_OP_HALF2_MACRO(set.ge)
|
| 1591 |
+
}
|
| 1592 |
+
__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b)
|
| 1593 |
+
{
|
| 1594 |
+
__COMPARISON_OP_HALF2_MACRO(set.lt)
|
| 1595 |
+
}
|
| 1596 |
+
__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b)
|
| 1597 |
+
{
|
| 1598 |
+
__COMPARISON_OP_HALF2_MACRO(set.gt)
|
| 1599 |
+
}
|
| 1600 |
+
__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b)
|
| 1601 |
+
{
|
| 1602 |
+
__COMPARISON_OP_HALF2_MACRO(set.equ)
|
| 1603 |
+
}
|
| 1604 |
+
__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b)
|
| 1605 |
+
{
|
| 1606 |
+
__COMPARISON_OP_HALF2_MACRO(set.neu)
|
| 1607 |
+
}
|
| 1608 |
+
__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b)
|
| 1609 |
+
{
|
| 1610 |
+
__COMPARISON_OP_HALF2_MACRO(set.leu)
|
| 1611 |
+
}
|
| 1612 |
+
__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b)
|
| 1613 |
+
{
|
| 1614 |
+
__COMPARISON_OP_HALF2_MACRO(set.geu)
|
| 1615 |
+
}
|
| 1616 |
+
__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b)
|
| 1617 |
+
{
|
| 1618 |
+
__COMPARISON_OP_HALF2_MACRO(set.ltu)
|
| 1619 |
+
}
|
| 1620 |
+
__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b)
|
| 1621 |
+
{
|
| 1622 |
+
__COMPARISON_OP_HALF2_MACRO(set.gtu)
|
| 1623 |
+
}
|
| 1624 |
+
#undef __COMPARISON_OP_HALF2_MACRO
|
| 1625 |
+
#define __BOOL_COMPARISON_OP_HALF2_MACRO(name) /* do */ {\
|
| 1626 |
+
__half2 val; \
|
| 1627 |
+
bool retval; \
|
| 1628 |
+
asm( "{ "#name".f16x2.f16x2 %0,%1,%2;\n}" \
|
| 1629 |
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
| 1630 |
+
if (__HALF2_TO_CUI(val) == 0x3C003C00U) {\
|
| 1631 |
+
retval = true; \
|
| 1632 |
+
} else { \
|
| 1633 |
+
retval = false; \
|
| 1634 |
+
}\
|
| 1635 |
+
return retval;\
|
| 1636 |
+
} /* while(0) */
|
| 1637 |
+
__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b)
|
| 1638 |
+
{
|
| 1639 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.eq)
|
| 1640 |
+
}
|
| 1641 |
+
__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b)
|
| 1642 |
+
{
|
| 1643 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.ne)
|
| 1644 |
+
}
|
| 1645 |
+
__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b)
|
| 1646 |
+
{
|
| 1647 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.le)
|
| 1648 |
+
}
|
| 1649 |
+
__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b)
|
| 1650 |
+
{
|
| 1651 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.ge)
|
| 1652 |
+
}
|
| 1653 |
+
__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b)
|
| 1654 |
+
{
|
| 1655 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.lt)
|
| 1656 |
+
}
|
| 1657 |
+
__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b)
|
| 1658 |
+
{
|
| 1659 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.gt)
|
| 1660 |
+
}
|
| 1661 |
+
__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b)
|
| 1662 |
+
{
|
| 1663 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.equ)
|
| 1664 |
+
}
|
| 1665 |
+
__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b)
|
| 1666 |
+
{
|
| 1667 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.neu)
|
| 1668 |
+
}
|
| 1669 |
+
__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b)
|
| 1670 |
+
{
|
| 1671 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.leu)
|
| 1672 |
+
}
|
| 1673 |
+
__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b)
|
| 1674 |
+
{
|
| 1675 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.geu)
|
| 1676 |
+
}
|
| 1677 |
+
__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b)
|
| 1678 |
+
{
|
| 1679 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.ltu)
|
| 1680 |
+
}
|
| 1681 |
+
__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b)
|
| 1682 |
+
{
|
| 1683 |
+
__BOOL_COMPARISON_OP_HALF2_MACRO(set.gtu)
|
| 1684 |
+
}
|
| 1685 |
+
#undef __BOOL_COMPARISON_OP_HALF2_MACRO
|
| 1686 |
+
/******************************************************************************
|
| 1687 |
+
* __half comparison *
|
| 1688 |
+
******************************************************************************/
|
| 1689 |
+
#define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\
|
| 1690 |
+
unsigned short val; \
|
| 1691 |
+
asm( "{ .reg .pred __$temp3;\n" \
|
| 1692 |
+
" setp."#name".f16 __$temp3, %1, %2;\n" \
|
| 1693 |
+
" selp.u16 %0, 1, 0, __$temp3;}" \
|
| 1694 |
+
: "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \
|
| 1695 |
+
return (val != 0U) ? true : false; \
|
| 1696 |
+
} /* while(0) */
|
| 1697 |
+
__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b)
|
| 1698 |
+
{
|
| 1699 |
+
__COMPARISON_OP_HALF_MACRO(eq)
|
| 1700 |
+
}
|
| 1701 |
+
__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b)
|
| 1702 |
+
{
|
| 1703 |
+
__COMPARISON_OP_HALF_MACRO(ne)
|
| 1704 |
+
}
|
| 1705 |
+
__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b)
|
| 1706 |
+
{
|
| 1707 |
+
__COMPARISON_OP_HALF_MACRO(le)
|
| 1708 |
+
}
|
| 1709 |
+
__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b)
|
| 1710 |
+
{
|
| 1711 |
+
__COMPARISON_OP_HALF_MACRO(ge)
|
| 1712 |
+
}
|
| 1713 |
+
__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b)
|
| 1714 |
+
{
|
| 1715 |
+
__COMPARISON_OP_HALF_MACRO(lt)
|
| 1716 |
+
}
|
| 1717 |
+
__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b)
|
| 1718 |
+
{
|
| 1719 |
+
__COMPARISON_OP_HALF_MACRO(gt)
|
| 1720 |
+
}
|
| 1721 |
+
__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b)
|
| 1722 |
+
{
|
| 1723 |
+
__COMPARISON_OP_HALF_MACRO(equ)
|
| 1724 |
+
}
|
| 1725 |
+
__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b)
|
| 1726 |
+
{
|
| 1727 |
+
__COMPARISON_OP_HALF_MACRO(neu)
|
| 1728 |
+
}
|
| 1729 |
+
__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b)
|
| 1730 |
+
{
|
| 1731 |
+
__COMPARISON_OP_HALF_MACRO(leu)
|
| 1732 |
+
}
|
| 1733 |
+
__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b)
|
| 1734 |
+
{
|
| 1735 |
+
__COMPARISON_OP_HALF_MACRO(geu)
|
| 1736 |
+
}
|
| 1737 |
+
__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b)
|
| 1738 |
+
{
|
| 1739 |
+
__COMPARISON_OP_HALF_MACRO(ltu)
|
| 1740 |
+
}
|
| 1741 |
+
__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b)
|
| 1742 |
+
{
|
| 1743 |
+
__COMPARISON_OP_HALF_MACRO(gtu)
|
| 1744 |
+
}
|
| 1745 |
+
#undef __COMPARISON_OP_HALF_MACRO
|
| 1746 |
+
/******************************************************************************
|
| 1747 |
+
* __half2 arithmetic *
|
| 1748 |
+
******************************************************************************/
|
| 1749 |
+
__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b)
|
| 1750 |
+
{
|
| 1751 |
+
__BINARY_OP_HALF2_MACRO(add)
|
| 1752 |
+
}
|
| 1753 |
+
__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b)
|
| 1754 |
+
{
|
| 1755 |
+
__BINARY_OP_HALF2_MACRO(sub)
|
| 1756 |
+
}
|
| 1757 |
+
__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b)
|
| 1758 |
+
{
|
| 1759 |
+
__BINARY_OP_HALF2_MACRO(mul)
|
| 1760 |
+
}
|
| 1761 |
+
__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b)
|
| 1762 |
+
{
|
| 1763 |
+
__BINARY_OP_HALF2_MACRO(add.sat)
|
| 1764 |
+
}
|
| 1765 |
+
__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b)
|
| 1766 |
+
{
|
| 1767 |
+
__BINARY_OP_HALF2_MACRO(sub.sat)
|
| 1768 |
+
}
|
| 1769 |
+
__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b)
|
| 1770 |
+
{
|
| 1771 |
+
__BINARY_OP_HALF2_MACRO(mul.sat)
|
| 1772 |
+
}
|
| 1773 |
+
__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c)
|
| 1774 |
+
{
|
| 1775 |
+
__TERNARY_OP_HALF2_MACRO(fma.rn)
|
| 1776 |
+
}
|
| 1777 |
+
__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c)
|
| 1778 |
+
{
|
| 1779 |
+
__TERNARY_OP_HALF2_MACRO(fma.rn.sat)
|
| 1780 |
+
}
|
| 1781 |
+
__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) {
|
| 1782 |
+
__half ha = __low2half(a);
|
| 1783 |
+
__half hb = __low2half(b);
|
| 1784 |
+
|
| 1785 |
+
const __half v1 = __hdiv(ha, hb);
|
| 1786 |
+
|
| 1787 |
+
ha = __high2half(a);
|
| 1788 |
+
hb = __high2half(b);
|
| 1789 |
+
|
| 1790 |
+
const __half v2 = __hdiv(ha, hb);
|
| 1791 |
+
|
| 1792 |
+
return __halves2half2(v1, v2);
|
| 1793 |
+
}
|
| 1794 |
+
/******************************************************************************
|
| 1795 |
+
* __half arithmetic *
|
| 1796 |
+
******************************************************************************/
|
| 1797 |
+
__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b)
|
| 1798 |
+
{
|
| 1799 |
+
__BINARY_OP_HALF_MACRO(add)
|
| 1800 |
+
}
|
| 1801 |
+
__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b)
|
| 1802 |
+
{
|
| 1803 |
+
__BINARY_OP_HALF_MACRO(sub)
|
| 1804 |
+
}
|
| 1805 |
+
__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b)
|
| 1806 |
+
{
|
| 1807 |
+
__BINARY_OP_HALF_MACRO(mul)
|
| 1808 |
+
}
|
| 1809 |
+
__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b)
|
| 1810 |
+
{
|
| 1811 |
+
__BINARY_OP_HALF_MACRO(add.sat)
|
| 1812 |
+
}
|
| 1813 |
+
__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b)
|
| 1814 |
+
{
|
| 1815 |
+
__BINARY_OP_HALF_MACRO(sub.sat)
|
| 1816 |
+
}
|
| 1817 |
+
__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b)
|
| 1818 |
+
{
|
| 1819 |
+
__BINARY_OP_HALF_MACRO(mul.sat)
|
| 1820 |
+
}
|
| 1821 |
+
|
| 1822 |
+
__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c)
|
| 1823 |
+
{
|
| 1824 |
+
__TERNARY_OP_HALF_MACRO(fma.rn)
|
| 1825 |
+
}
|
| 1826 |
+
__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c)
|
| 1827 |
+
{
|
| 1828 |
+
__TERNARY_OP_HALF_MACRO(fma.rn.sat)
|
| 1829 |
+
}
|
| 1830 |
+
__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b) {
|
| 1831 |
+
__half v;
|
| 1832 |
+
__half abs;
|
| 1833 |
+
__half den;
|
| 1834 |
+
__HALF_TO_US(den) = 0x008FU;
|
| 1835 |
+
|
| 1836 |
+
float rcp;
|
| 1837 |
+
const float fa = __half2float(a);
|
| 1838 |
+
const float fb = __half2float(b);
|
| 1839 |
+
|
| 1840 |
+
asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb));
|
| 1841 |
+
|
| 1842 |
+
float fv = rcp * fa;
|
| 1843 |
+
|
| 1844 |
+
v = __float2half(fv);
|
| 1845 |
+
__HALF_TO_US(abs) = static_cast<unsigned short>(static_cast<unsigned int>(__HALF_TO_CUS(v)) & 0x00007FFFU);
|
| 1846 |
+
if (__hlt(abs, den) && (!(__HALF_TO_CUS(abs) == 0x0000U))) {
|
| 1847 |
+
const float err = __fmaf_rn(-fb, fv, fa);
|
| 1848 |
+
fv = __fmaf_rn(rcp, err, fv);
|
| 1849 |
+
v = __float2half(fv);
|
| 1850 |
+
}
|
| 1851 |
+
return v;
|
| 1852 |
+
}
|
| 1853 |
+
|
| 1854 |
+
/******************************************************************************
|
| 1855 |
+
* __half2 functions *
|
| 1856 |
+
******************************************************************************/
|
| 1857 |
+
#define __SPEC_CASE2(i,r, spc, ulp) \
|
| 1858 |
+
"{.reg.b32 spc, ulp, p;\n"\
|
| 1859 |
+
" mov.b32 spc,"#spc";\n"\
|
| 1860 |
+
" mov.b32 ulp,"#ulp";\n"\
|
| 1861 |
+
" set.eq.f16x2.f16x2 p,"#i", spc;\n"\
|
| 1862 |
+
" fma.rn.f16x2 "#r",p,ulp,"#r";\n}\n"
|
| 1863 |
+
#define __SPEC_CASE(i,r, spc, ulp) \
|
| 1864 |
+
"{.reg.b16 spc, ulp, p;\n"\
|
| 1865 |
+
" mov.b16 spc,"#spc";\n"\
|
| 1866 |
+
" mov.b16 ulp,"#ulp";\n"\
|
| 1867 |
+
" set.eq.f16.f16 p,"#i", spc;\n"\
|
| 1868 |
+
" fma.rn.f16 "#r",p,ulp,"#r";\n}\n"
|
| 1869 |
+
#define __APPROX_FCAST(fun) /* do */ {\
|
| 1870 |
+
__half val;\
|
| 1871 |
+
asm("{.reg.b32 f; \n"\
|
| 1872 |
+
" .reg.b16 r; \n"\
|
| 1873 |
+
" mov.b16 r,%1; \n"\
|
| 1874 |
+
" cvt.f32.f16 f,r; \n"\
|
| 1875 |
+
" "#fun".approx.f32 f,f; \n"\
|
| 1876 |
+
" cvt.rn.f16.f32 r,f; \n"\
|
| 1877 |
+
" mov.b16 %0,r; \n"\
|
| 1878 |
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\
|
| 1879 |
+
return val;\
|
| 1880 |
+
} /* while(0) */
|
| 1881 |
+
#define __APPROX_FCAST2(fun) /* do */ {\
|
| 1882 |
+
__half2 val;\
|
| 1883 |
+
asm("{.reg.b16 hl, hu; \n"\
|
| 1884 |
+
" .reg.b32 fl, fu; \n"\
|
| 1885 |
+
" mov.b32 {hl, hu}, %1; \n"\
|
| 1886 |
+
" cvt.f32.f16 fl, hl; \n"\
|
| 1887 |
+
" cvt.f32.f16 fu, hu; \n"\
|
| 1888 |
+
" "#fun".approx.f32 fl, fl; \n"\
|
| 1889 |
+
" "#fun".approx.f32 fu, fu; \n"\
|
| 1890 |
+
" cvt.rn.f16.f32 hl, fl; \n"\
|
| 1891 |
+
" cvt.rn.f16.f32 hu, fu; \n"\
|
| 1892 |
+
" mov.b32 %0, {hl, hu}; \n"\
|
| 1893 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \
|
| 1894 |
+
return val;\
|
| 1895 |
+
} /* while(0) */
|
| 1896 |
+
static __device__ __forceinline__ float __float_simpl_sinf(float a);
|
| 1897 |
+
static __device__ __forceinline__ float __float_simpl_cosf(float a);
|
| 1898 |
+
__CUDA_FP16_DECL__ __half __hsin_internal(const __half a) {
|
| 1899 |
+
float f = __half2float(a);
|
| 1900 |
+
f = __float_simpl_sinf(f);
|
| 1901 |
+
return __float2half_rn(f);
|
| 1902 |
+
}
|
| 1903 |
+
__CUDA_FP16_DECL__ __half hsin(const __half a) {
|
| 1904 |
+
__half r = __hsin_internal(a);
|
| 1905 |
+
asm("{\n\t"
|
| 1906 |
+
" .reg.b16 i,r,t; \n\t"
|
| 1907 |
+
" mov.b16 r, %0; \n\t"
|
| 1908 |
+
" mov.b16 i, %1; \n\t"
|
| 1909 |
+
" mov.b16 t, 0x8000U; \n\t"
|
| 1910 |
+
" and.b16 t,r,t; \n\t"
|
| 1911 |
+
__SPEC_CASE(i, r, 0X32B3U, 0x0800U)
|
| 1912 |
+
__SPEC_CASE(i, r, 0X5CB0U, 0x1000U)
|
| 1913 |
+
__SPEC_CASE(i, r, 0XB2B3U, 0x8800U)
|
| 1914 |
+
__SPEC_CASE(i, r, 0XDCB0U, 0x9000U)
|
| 1915 |
+
" or.b16 r,r,t; \n\t"
|
| 1916 |
+
" mov.b16 %0, r; \n"
|
| 1917 |
+
"}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
| 1918 |
+
return r;
|
| 1919 |
+
}
|
| 1920 |
+
__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) {
|
| 1921 |
+
const __half l = __low2half(a);
|
| 1922 |
+
const __half h = __high2half(a);
|
| 1923 |
+
const __half sl = __hsin_internal(l);
|
| 1924 |
+
const __half sh = __hsin_internal(h);
|
| 1925 |
+
__half2 r = __halves2half2(sl, sh);
|
| 1926 |
+
asm("{\n\t"
|
| 1927 |
+
" .reg.b32 i,r,t; \n\t"
|
| 1928 |
+
" mov.b32 r, %0; \n\t"
|
| 1929 |
+
" mov.b32 i, %1; \n\t"
|
| 1930 |
+
" and.b32 t, r, 0x80008000U; \n\t"
|
| 1931 |
+
__SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U)
|
| 1932 |
+
__SPEC_CASE2(i, r, 0X5CB05CB0U, 0x10001000U)
|
| 1933 |
+
__SPEC_CASE2(i, r, 0XB2B3B2B3U, 0x88008800U)
|
| 1934 |
+
__SPEC_CASE2(i, r, 0XDCB0DCB0U, 0x90009000U)
|
| 1935 |
+
" or.b32 r, r, t; \n\t"
|
| 1936 |
+
" mov.b32 %0, r; \n"
|
| 1937 |
+
"}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
| 1938 |
+
return r;
|
| 1939 |
+
}
|
| 1940 |
+
__CUDA_FP16_DECL__ __half __hcos_internal(const __half a) {
|
| 1941 |
+
float f = __half2float(a);
|
| 1942 |
+
f = __float_simpl_cosf(f);
|
| 1943 |
+
return __float2half_rn(f);
|
| 1944 |
+
}
|
| 1945 |
+
__CUDA_FP16_DECL__ __half hcos(const __half a) {
|
| 1946 |
+
__half r = __hcos_internal(a);
|
| 1947 |
+
asm("{\n\t"
|
| 1948 |
+
" .reg.b16 i,r; \n\t"
|
| 1949 |
+
" mov.b16 r, %0; \n\t"
|
| 1950 |
+
" mov.b16 i, %1; \n\t"
|
| 1951 |
+
__SPEC_CASE(i, r, 0X2B7CU, 0x1000U)
|
| 1952 |
+
__SPEC_CASE(i, r, 0XAB7CU, 0x1000U)
|
| 1953 |
+
" mov.b16 %0, r; \n"
|
| 1954 |
+
"}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
| 1955 |
+
return r;
|
| 1956 |
+
}
|
| 1957 |
+
__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) {
|
| 1958 |
+
const __half l = __low2half(a);
|
| 1959 |
+
const __half h = __high2half(a);
|
| 1960 |
+
const __half cl = __hcos_internal(l);
|
| 1961 |
+
const __half ch = __hcos_internal(h);
|
| 1962 |
+
__half2 r = __halves2half2(cl, ch);
|
| 1963 |
+
asm("{\n\t"
|
| 1964 |
+
" .reg.b32 i,r; \n\t"
|
| 1965 |
+
" mov.b32 r, %0; \n\t"
|
| 1966 |
+
" mov.b32 i, %1; \n\t"
|
| 1967 |
+
__SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U)
|
| 1968 |
+
__SPEC_CASE2(i, r, 0XAB7CAB7CU, 0x10001000U)
|
| 1969 |
+
" mov.b32 %0, r; \n"
|
| 1970 |
+
"}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
| 1971 |
+
return r;
|
| 1972 |
+
}
|
| 1973 |
+
static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, int *quadrant)
|
| 1974 |
+
{
|
| 1975 |
+
const int q = __float2int_rn(a * 0.636619772F);
|
| 1976 |
+
const float j = static_cast<float>(q);
|
| 1977 |
+
float t = __fmaf_rn(-j, 1.5707962512969971e+000F, a);
|
| 1978 |
+
t = __fmaf_rn(-j, 7.5497894158615964e-008F, t);
|
| 1979 |
+
*quadrant = q;
|
| 1980 |
+
return t;
|
| 1981 |
+
}
|
| 1982 |
+
static __device__ __forceinline__ float __internal_sin_cos_kernel(float x, const int i)
|
| 1983 |
+
{
|
| 1984 |
+
float z;
|
| 1985 |
+
const float x2 = x*x;
|
| 1986 |
+
|
| 1987 |
+
if ((static_cast<unsigned>(i) & 1U) != 0U) {
|
| 1988 |
+
z = 2.44331571e-5F;
|
| 1989 |
+
z = __fmaf_rn(z, x2, -1.38873163e-3F);
|
| 1990 |
+
}
|
| 1991 |
+
else {
|
| 1992 |
+
z = -1.95152959e-4F;
|
| 1993 |
+
z = __fmaf_rn(z, x2, 8.33216087e-3F);
|
| 1994 |
+
}
|
| 1995 |
+
if ((static_cast<unsigned>(i) & 1U) != 0U) {
|
| 1996 |
+
z = __fmaf_rn(z, x2, 4.16666457e-2F);
|
| 1997 |
+
z = __fmaf_rn(z, x2, -5.00000000e-1F);
|
| 1998 |
+
}
|
| 1999 |
+
else {
|
| 2000 |
+
z = __fmaf_rn(z, x2, -1.66666546e-1F);
|
| 2001 |
+
z = __fmaf_rn(z, x2, 0.0F);
|
| 2002 |
+
}
|
| 2003 |
+
if ((static_cast<unsigned>(i) & 1U) != 0U) {
|
| 2004 |
+
x = __fmaf_rn(z, x2, 1.0F);
|
| 2005 |
+
}
|
| 2006 |
+
else {
|
| 2007 |
+
x = __fmaf_rn(z, x, x);
|
| 2008 |
+
}
|
| 2009 |
+
if ((static_cast<unsigned>(i) & 2U) != 0U) {
|
| 2010 |
+
x = __fmaf_rn(x, -1.0F, 0.0F);
|
| 2011 |
+
}
|
| 2012 |
+
return x;
|
| 2013 |
+
}
|
| 2014 |
+
static __device__ __forceinline__ float __float_simpl_sinf(float a)
|
| 2015 |
+
{
|
| 2016 |
+
float z;
|
| 2017 |
+
int i;
|
| 2018 |
+
if (::isinf(a)) {
|
| 2019 |
+
a = a * 0.0F;
|
| 2020 |
+
}
|
| 2021 |
+
a = __internal_trig_reduction_kernel(a, &i);
|
| 2022 |
+
z = __internal_sin_cos_kernel(a, i);
|
| 2023 |
+
return z;
|
| 2024 |
+
}
|
| 2025 |
+
static __device__ __forceinline__ float __float_simpl_cosf(float a)
|
| 2026 |
+
{
|
| 2027 |
+
float z;
|
| 2028 |
+
int i;
|
| 2029 |
+
if (::isinf(a)) {
|
| 2030 |
+
a = a * 0.0F;
|
| 2031 |
+
}
|
| 2032 |
+
a = __internal_trig_reduction_kernel(a, &i);
|
| 2033 |
+
i++;
|
| 2034 |
+
z = __internal_sin_cos_kernel(a, i);
|
| 2035 |
+
return z;
|
| 2036 |
+
}
|
| 2037 |
+
|
| 2038 |
+
__CUDA_FP16_DECL__ __half hexp(const __half a) {
|
| 2039 |
+
__half val;
|
| 2040 |
+
asm("{.reg.b32 f, C; \n"
|
| 2041 |
+
" .reg.b16 h,r; \n"
|
| 2042 |
+
" mov.b16 h,%1; \n"
|
| 2043 |
+
" cvt.f32.f16 f,h; \n"
|
| 2044 |
+
" mov.b32 C, 0x3fb8aa3bU; \n"
|
| 2045 |
+
" mul.f32 f,f,C; \n"
|
| 2046 |
+
" ex2.approx.f32 f,f; \n"
|
| 2047 |
+
" cvt.rn.f16.f32 r,f; \n"
|
| 2048 |
+
__SPEC_CASE(h, r, 0X1F79U, 0x9400U)
|
| 2049 |
+
__SPEC_CASE(h, r, 0X25CFU, 0x9400U)
|
| 2050 |
+
__SPEC_CASE(h, r, 0XC13BU, 0x0400U)
|
| 2051 |
+
__SPEC_CASE(h, r, 0XC1EFU, 0x0200U)
|
| 2052 |
+
" mov.b16 %0,r; \n"
|
| 2053 |
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2054 |
+
return val;
|
| 2055 |
+
}
|
| 2056 |
+
__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) {
|
| 2057 |
+
__half2 val;
|
| 2058 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2059 |
+
" .reg.b32 h,r,fl,fu, C; \n"
|
| 2060 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2061 |
+
" mov.b32 h, %1; \n"
|
| 2062 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2063 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2064 |
+
" mov.b32 C, 0x3fb8aa3bU; \n"
|
| 2065 |
+
" mul.f32 fl,fl,C; \n"
|
| 2066 |
+
" mul.f32 fu,fu,C; \n"
|
| 2067 |
+
" ex2.approx.f32 fl, fl; \n"
|
| 2068 |
+
" ex2.approx.f32 fu, fu; \n"
|
| 2069 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2070 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2071 |
+
" mov.b32 r, {hl, hu}; \n"
|
| 2072 |
+
__SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U)
|
| 2073 |
+
__SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U)
|
| 2074 |
+
__SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U)
|
| 2075 |
+
__SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U)
|
| 2076 |
+
" mov.b32 %0, r; \n"
|
| 2077 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2078 |
+
return val;
|
| 2079 |
+
}
|
| 2080 |
+
__CUDA_FP16_DECL__ __half hexp2(const __half a) {
|
| 2081 |
+
__half val;
|
| 2082 |
+
asm("{.reg.b32 f, ULP; \n"
|
| 2083 |
+
" .reg.b16 r; \n"
|
| 2084 |
+
" mov.b16 r,%1; \n"
|
| 2085 |
+
" cvt.f32.f16 f,r; \n"
|
| 2086 |
+
" ex2.approx.f32 f,f; \n"
|
| 2087 |
+
" mov.b32 ULP, 0x33800000U;\n"
|
| 2088 |
+
" fma.rn.f32 f,f,ULP,f; \n"
|
| 2089 |
+
" cvt.rn.f16.f32 r,f; \n"
|
| 2090 |
+
" mov.b16 %0,r; \n"
|
| 2091 |
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2092 |
+
return val;
|
| 2093 |
+
}
|
| 2094 |
+
__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) {
|
| 2095 |
+
__half2 val;
|
| 2096 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2097 |
+
" .reg.b32 fl, fu, ULP; \n"
|
| 2098 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2099 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2100 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2101 |
+
" ex2.approx.f32 fl, fl; \n"
|
| 2102 |
+
" ex2.approx.f32 fu, fu; \n"
|
| 2103 |
+
" mov.b32 ULP, 0x33800000U;\n"
|
| 2104 |
+
" fma.rn.f32 fl,fl,ULP,fl; \n"
|
| 2105 |
+
" fma.rn.f32 fu,fu,ULP,fu; \n"
|
| 2106 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2107 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2108 |
+
" mov.b32 %0, {hl, hu}; \n"
|
| 2109 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2110 |
+
return val;
|
| 2111 |
+
}
|
| 2112 |
+
__CUDA_FP16_DECL__ __half hexp10(const __half a) {
|
| 2113 |
+
__half val;
|
| 2114 |
+
asm("{.reg.b16 h,r; \n"
|
| 2115 |
+
" .reg.b32 f, C; \n"
|
| 2116 |
+
" mov.b16 h, %1; \n"
|
| 2117 |
+
" cvt.f32.f16 f, h; \n"
|
| 2118 |
+
" mov.b32 C, 0x40549A78U; \n"
|
| 2119 |
+
" mul.f32 f,f,C; \n"
|
| 2120 |
+
" ex2.approx.f32 f, f; \n"
|
| 2121 |
+
" cvt.rn.f16.f32 r, f; \n"
|
| 2122 |
+
__SPEC_CASE(h, r, 0x34DEU, 0x9800U)
|
| 2123 |
+
__SPEC_CASE(h, r, 0x9766U, 0x9000U)
|
| 2124 |
+
__SPEC_CASE(h, r, 0x9972U, 0x1000U)
|
| 2125 |
+
__SPEC_CASE(h, r, 0xA5C4U, 0x1000U)
|
| 2126 |
+
__SPEC_CASE(h, r, 0xBF0AU, 0x8100U)
|
| 2127 |
+
" mov.b16 %0, r; \n"
|
| 2128 |
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2129 |
+
return val;
|
| 2130 |
+
}
|
| 2131 |
+
__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) {
|
| 2132 |
+
__half2 val;
|
| 2133 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2134 |
+
" .reg.b32 h,r,fl,fu, C; \n"
|
| 2135 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2136 |
+
" mov.b32 h, %1; \n"
|
| 2137 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2138 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2139 |
+
" mov.b32 C, 0x40549A78U; \n"
|
| 2140 |
+
" mul.f32 fl,fl,C; \n"
|
| 2141 |
+
" mul.f32 fu,fu,C; \n"
|
| 2142 |
+
" ex2.approx.f32 fl, fl; \n"
|
| 2143 |
+
" ex2.approx.f32 fu, fu; \n"
|
| 2144 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2145 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2146 |
+
" mov.b32 r, {hl, hu}; \n"
|
| 2147 |
+
__SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U)
|
| 2148 |
+
__SPEC_CASE2(h, r, 0x97669766U, 0x90009000U)
|
| 2149 |
+
__SPEC_CASE2(h, r, 0x99729972U, 0x10001000U)
|
| 2150 |
+
__SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U)
|
| 2151 |
+
__SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U)
|
| 2152 |
+
" mov.b32 %0, r; \n"
|
| 2153 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2154 |
+
return val;
|
| 2155 |
+
}
|
| 2156 |
+
__CUDA_FP16_DECL__ __half hlog2(const __half a) {
|
| 2157 |
+
__half val;
|
| 2158 |
+
asm("{.reg.b16 h, r; \n"
|
| 2159 |
+
" .reg.b32 f; \n"
|
| 2160 |
+
" mov.b16 h, %1; \n"
|
| 2161 |
+
" cvt.f32.f16 f, h; \n"
|
| 2162 |
+
" lg2.approx.f32 f, f; \n"
|
| 2163 |
+
" cvt.rn.f16.f32 r, f; \n"
|
| 2164 |
+
__SPEC_CASE(r, r, 0xA2E2U, 0x8080U)
|
| 2165 |
+
__SPEC_CASE(r, r, 0xBF46U, 0x9400U)
|
| 2166 |
+
" mov.b16 %0, r; \n"
|
| 2167 |
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2168 |
+
return val;
|
| 2169 |
+
}
|
| 2170 |
+
__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) {
|
| 2171 |
+
__half2 val;
|
| 2172 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2173 |
+
" .reg.b32 fl, fu, r, p; \n"
|
| 2174 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2175 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2176 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2177 |
+
" lg2.approx.f32 fl, fl; \n"
|
| 2178 |
+
" lg2.approx.f32 fu, fu; \n"
|
| 2179 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2180 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2181 |
+
" mov.b32 r, {hl, hu}; \n"
|
| 2182 |
+
__SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U)
|
| 2183 |
+
__SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U)
|
| 2184 |
+
" mov.b32 %0, r; \n"
|
| 2185 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2186 |
+
return val;
|
| 2187 |
+
}
|
| 2188 |
+
__CUDA_FP16_DECL__ __half hlog(const __half a) {
|
| 2189 |
+
__half val;
|
| 2190 |
+
asm("{.reg.b32 f, C; \n"
|
| 2191 |
+
" .reg.b16 r,h; \n"
|
| 2192 |
+
" mov.b16 h,%1; \n"
|
| 2193 |
+
" cvt.f32.f16 f,h; \n"
|
| 2194 |
+
" lg2.approx.f32 f,f; \n"
|
| 2195 |
+
" mov.b32 C, 0x3f317218U; \n"
|
| 2196 |
+
" mul.f32 f,f,C; \n"
|
| 2197 |
+
" cvt.rn.f16.f32 r,f; \n"
|
| 2198 |
+
__SPEC_CASE(h, r, 0X160DU, 0x9C00U)
|
| 2199 |
+
__SPEC_CASE(h, r, 0X3BFEU, 0x8010U)
|
| 2200 |
+
__SPEC_CASE(h, r, 0X3C0BU, 0x8080U)
|
| 2201 |
+
__SPEC_CASE(h, r, 0X6051U, 0x1C00U)
|
| 2202 |
+
" mov.b16 %0,r; \n"
|
| 2203 |
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2204 |
+
return val;
|
| 2205 |
+
}
|
| 2206 |
+
__CUDA_FP16_DECL__ __half2 h2log(const __half2 a) {
|
| 2207 |
+
__half2 val;
|
| 2208 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2209 |
+
" .reg.b32 r, fl, fu, C, h; \n"
|
| 2210 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2211 |
+
" mov.b32 h, %1; \n"
|
| 2212 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2213 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2214 |
+
" lg2.approx.f32 fl, fl; \n"
|
| 2215 |
+
" lg2.approx.f32 fu, fu; \n"
|
| 2216 |
+
" mov.b32 C, 0x3f317218U; \n"
|
| 2217 |
+
" mul.f32 fl,fl,C; \n"
|
| 2218 |
+
" mul.f32 fu,fu,C; \n"
|
| 2219 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2220 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2221 |
+
" mov.b32 r, {hl, hu}; \n"
|
| 2222 |
+
__SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U)
|
| 2223 |
+
__SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U)
|
| 2224 |
+
__SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U)
|
| 2225 |
+
__SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U)
|
| 2226 |
+
" mov.b32 %0, r; \n"
|
| 2227 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2228 |
+
return val;
|
| 2229 |
+
}
|
| 2230 |
+
__CUDA_FP16_DECL__ __half hlog10(const __half a) {
|
| 2231 |
+
__half val;
|
| 2232 |
+
asm("{.reg.b16 h, r; \n"
|
| 2233 |
+
" .reg.b32 f, C; \n"
|
| 2234 |
+
" mov.b16 h, %1; \n"
|
| 2235 |
+
" cvt.f32.f16 f, h; \n"
|
| 2236 |
+
" lg2.approx.f32 f, f; \n"
|
| 2237 |
+
" mov.b32 C, 0x3E9A209BU; \n"
|
| 2238 |
+
" mul.f32 f,f,C; \n"
|
| 2239 |
+
" cvt.rn.f16.f32 r, f; \n"
|
| 2240 |
+
__SPEC_CASE(h, r, 0x338FU, 0x1000U)
|
| 2241 |
+
__SPEC_CASE(h, r, 0x33F8U, 0x9000U)
|
| 2242 |
+
__SPEC_CASE(h, r, 0x57E1U, 0x9800U)
|
| 2243 |
+
__SPEC_CASE(h, r, 0x719DU, 0x9C00U)
|
| 2244 |
+
" mov.b16 %0, r; \n"
|
| 2245 |
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
| 2246 |
+
return val;
|
| 2247 |
+
}
|
| 2248 |
+
__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) {
|
| 2249 |
+
__half2 val;
|
| 2250 |
+
asm("{.reg.b16 hl, hu; \n"
|
| 2251 |
+
" .reg.b32 r, fl, fu, C, h; \n"
|
| 2252 |
+
" mov.b32 {hl, hu}, %1; \n"
|
| 2253 |
+
" mov.b32 h, %1; \n"
|
| 2254 |
+
" cvt.f32.f16 fl, hl; \n"
|
| 2255 |
+
" cvt.f32.f16 fu, hu; \n"
|
| 2256 |
+
" lg2.approx.f32 fl, fl; \n"
|
| 2257 |
+
" lg2.approx.f32 fu, fu; \n"
|
| 2258 |
+
" mov.b32 C, 0x3E9A209BU; \n"
|
| 2259 |
+
" mul.f32 fl,fl,C; \n"
|
| 2260 |
+
" mul.f32 fu,fu,C; \n"
|
| 2261 |
+
" cvt.rn.f16.f32 hl, fl; \n"
|
| 2262 |
+
" cvt.rn.f16.f32 hu, fu; \n"
|
| 2263 |
+
" mov.b32 r, {hl, hu}; \n"
|
| 2264 |
+
__SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U)
|
| 2265 |
+
__SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U)
|
| 2266 |
+
__SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U)
|
| 2267 |
+
__SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U)
|
| 2268 |
+
" mov.b32 %0, r; \n"
|
| 2269 |
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
| 2270 |
+
return val;
|
| 2271 |
+
}
|
| 2272 |
+
#undef __SPEC_CASE2
|
| 2273 |
+
#undef __SPEC_CASE
|
| 2274 |
+
__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) {
|
| 2275 |
+
__APPROX_FCAST2(rcp)
|
| 2276 |
+
}
|
| 2277 |
+
__CUDA_FP16_DECL__ __half hrcp(const __half a) {
|
| 2278 |
+
__APPROX_FCAST(rcp)
|
| 2279 |
+
}
|
| 2280 |
+
__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) {
|
| 2281 |
+
__APPROX_FCAST2(rsqrt)
|
| 2282 |
+
}
|
| 2283 |
+
__CUDA_FP16_DECL__ __half hrsqrt(const __half a) {
|
| 2284 |
+
__APPROX_FCAST(rsqrt)
|
| 2285 |
+
}
|
| 2286 |
+
__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) {
|
| 2287 |
+
__APPROX_FCAST2(sqrt)
|
| 2288 |
+
}
|
| 2289 |
+
__CUDA_FP16_DECL__ __half hsqrt(const __half a) {
|
| 2290 |
+
__APPROX_FCAST(sqrt)
|
| 2291 |
+
}
|
| 2292 |
+
#undef __APPROX_FCAST
|
| 2293 |
+
#undef __APPROX_FCAST2
|
| 2294 |
+
__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a)
|
| 2295 |
+
{
|
| 2296 |
+
__half2 r;
|
| 2297 |
+
asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}"
|
| 2298 |
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a)));
|
| 2299 |
+
return r;
|
| 2300 |
+
}
|
| 2301 |
+
__CUDA_FP16_DECL__ bool __hisnan(const __half a)
|
| 2302 |
+
{
|
| 2303 |
+
__half r;
|
| 2304 |
+
asm("{set.nan.f16.f16 %0,%1,%2;\n}"
|
| 2305 |
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a)));
|
| 2306 |
+
return __HALF_TO_CUS(r) != 0U;
|
| 2307 |
+
}
|
| 2308 |
+
__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a)
|
| 2309 |
+
{
|
| 2310 |
+
__half2 r;
|
| 2311 |
+
asm("{neg.f16x2 %0,%1;\n}"
|
| 2312 |
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
| 2313 |
+
return r;
|
| 2314 |
+
}
|
| 2315 |
+
__CUDA_FP16_DECL__ __half __hneg(const __half a)
|
| 2316 |
+
{
|
| 2317 |
+
__half r;
|
| 2318 |
+
asm("{neg.f16 %0,%1;\n}"
|
| 2319 |
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
| 2320 |
+
return r;
|
| 2321 |
+
}
|
| 2322 |
+
__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a)
|
| 2323 |
+
{
|
| 2324 |
+
__half2 r;
|
| 2325 |
+
asm("{abs.f16x2 %0,%1;\n}"
|
| 2326 |
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
| 2327 |
+
return r;
|
| 2328 |
+
}
|
| 2329 |
+
__CUDA_FP16_DECL__ __half __habs(const __half a)
|
| 2330 |
+
{
|
| 2331 |
+
__half r;
|
| 2332 |
+
asm("{abs.f16 %0,%1;\n}"
|
| 2333 |
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
| 2334 |
+
return r;
|
| 2335 |
+
}
|
| 2336 |
+
|
| 2337 |
+
__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c)
|
| 2338 |
+
{
|
| 2339 |
+
// fast version of complex multiply-accumulate
|
| 2340 |
+
// (a.re, a.im) * (b.re, b.im) + (c.re, c.im)
|
| 2341 |
+
// acc.re = (c.re + a.re*b.re) - a.im*b.im
|
| 2342 |
+
// acc.im = (c.im + a.re*b.im) + a.im*b.re
|
| 2343 |
+
const __half2 a_re = __half2half2(a.x);
|
| 2344 |
+
__half2 acc = __hfma2(a_re, b, c);
|
| 2345 |
+
const __half2 a_im = __half2half2(a.y);
|
| 2346 |
+
const __half2 ib = __halves2half2(__hneg(b.y), b.x);
|
| 2347 |
+
acc = __hfma2(a_im, ib, acc);
|
| 2348 |
+
return acc;
|
| 2349 |
+
}
|
| 2350 |
+
#endif /*__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
|
| 2351 |
+
|
| 2352 |
+
#if __CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)
|
| 2353 |
+
/******************************************************************************
|
| 2354 |
+
* __half arithmetic *
|
| 2355 |
+
******************************************************************************/
|
| 2356 |
+
__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b)
|
| 2357 |
+
{
|
| 2358 |
+
__BINARY_OP_HALF_MACRO(max)
|
| 2359 |
+
}
|
| 2360 |
+
__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b)
|
| 2361 |
+
{
|
| 2362 |
+
__BINARY_OP_HALF_MACRO(min)
|
| 2363 |
+
}
|
| 2364 |
+
__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b)
|
| 2365 |
+
{
|
| 2366 |
+
__BINARY_OP_HALF_MACRO(max.NaN)
|
| 2367 |
+
}
|
| 2368 |
+
__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b)
|
| 2369 |
+
{
|
| 2370 |
+
__BINARY_OP_HALF_MACRO(min.NaN)
|
| 2371 |
+
}
|
| 2372 |
+
__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c)
|
| 2373 |
+
{
|
| 2374 |
+
__TERNARY_OP_HALF_MACRO(fma.rn.relu)
|
| 2375 |
+
}
|
| 2376 |
+
/******************************************************************************
|
| 2377 |
+
* __half2 arithmetic *
|
| 2378 |
+
******************************************************************************/
|
| 2379 |
+
__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b)
|
| 2380 |
+
{
|
| 2381 |
+
__BINARY_OP_HALF2_MACRO(max)
|
| 2382 |
+
}
|
| 2383 |
+
__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b)
|
| 2384 |
+
{
|
| 2385 |
+
__BINARY_OP_HALF2_MACRO(min)
|
| 2386 |
+
}
|
| 2387 |
+
__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b)
|
| 2388 |
+
{
|
| 2389 |
+
__BINARY_OP_HALF2_MACRO(max.NaN)
|
| 2390 |
+
}
|
| 2391 |
+
__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b)
|
| 2392 |
+
{
|
| 2393 |
+
__BINARY_OP_HALF2_MACRO(min.NaN)
|
| 2394 |
+
}
|
| 2395 |
+
__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c)
|
| 2396 |
+
{
|
| 2397 |
+
__TERNARY_OP_HALF2_MACRO(fma.rn.relu)
|
| 2398 |
+
}
|
| 2399 |
+
#endif /*__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)*/
|
| 2400 |
+
|
| 2401 |
+
/* Define __PTR for atomicAdd prototypes below, undef after done */
|
| 2402 |
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
| 2403 |
+
#define __PTR "l"
|
| 2404 |
+
#else
|
| 2405 |
+
#define __PTR "r"
|
| 2406 |
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
| 2407 |
+
|
| 2408 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
|
| 2409 |
+
|
| 2410 |
+
__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) {
|
| 2411 |
+
__half2 r;
|
| 2412 |
+
asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n"
|
| 2413 |
+
: "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val))
|
| 2414 |
+
: "memory");
|
| 2415 |
+
return r;
|
| 2416 |
+
}
|
| 2417 |
+
|
| 2418 |
+
#endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600*/
|
| 2419 |
+
|
| 2420 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 2421 |
+
|
| 2422 |
+
__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) {
|
| 2423 |
+
__half r;
|
| 2424 |
+
asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n"
|
| 2425 |
+
: "=h"(__HALF_TO_US(r))
|
| 2426 |
+
: __PTR(address), "h"(__HALF_TO_CUS(val))
|
| 2427 |
+
: "memory");
|
| 2428 |
+
return r;
|
| 2429 |
+
}
|
| 2430 |
+
|
| 2431 |
+
#endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700*/
|
| 2432 |
+
|
| 2433 |
+
#undef __PTR
|
| 2434 |
+
|
| 2435 |
+
#undef __CUDA_FP16_DECL__
|
| 2436 |
+
#endif /* defined(__CUDACC__) */
|
| 2437 |
+
#endif /* defined(__cplusplus) */
|
| 2438 |
+
|
| 2439 |
+
#undef __TERNARY_OP_HALF2_MACRO
|
| 2440 |
+
#undef __TERNARY_OP_HALF_MACRO
|
| 2441 |
+
#undef __BINARY_OP_HALF2_MACRO
|
| 2442 |
+
#undef __BINARY_OP_HALF_MACRO
|
| 2443 |
+
|
| 2444 |
+
#undef __CUDA_HOSTDEVICE_FP16_DECL__
|
| 2445 |
+
#undef __CUDA_FP16_DECL__
|
| 2446 |
+
|
| 2447 |
+
/* Define first-class types "half" and "half2", unless user specifies otherwise via "#define CUDA_NO_HALF" */
|
| 2448 |
+
/* C cannot ever have these types defined here, because __half and __half2 are C++ classes */
|
| 2449 |
+
#if defined(__cplusplus) && !defined(CUDA_NO_HALF)
|
| 2450 |
+
typedef __half half;
|
| 2451 |
+
typedef __half2 half2;
|
| 2452 |
+
// for consistency with __nv_bfloat16
|
| 2453 |
+
typedef __half __nv_half;
|
| 2454 |
+
typedef __half2 __nv_half2;
|
| 2455 |
+
typedef __half_raw __nv_half_raw;
|
| 2456 |
+
typedef __half2_raw __nv_half2_raw;
|
| 2457 |
+
typedef __half nv_half;
|
| 2458 |
+
typedef __half2 nv_half2;
|
| 2459 |
+
#endif /* defined(__cplusplus) && !defined(CUDA_NO_HALF) */
|
| 2460 |
+
|
| 2461 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16)
|
| 2462 |
+
#undef __CPP_VERSION_AT_LEAST_11_FP16
|
| 2463 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
| 2464 |
+
|
| 2465 |
+
#endif /* end of include guard: __CUDA_FP16_HPP__ */
|
lib/python3.10/site-packages/numba/cuda/cuda_paths.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import re
|
| 3 |
+
import os
|
| 4 |
+
from collections import namedtuple
|
| 5 |
+
|
| 6 |
+
from numba.core.config import IS_WIN32
|
| 7 |
+
from numba.misc.findlib import find_lib, find_file
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
_env_path_tuple = namedtuple('_env_path_tuple', ['by', 'info'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _find_valid_path(options):
|
| 14 |
+
"""Find valid path from *options*, which is a list of 2-tuple of
|
| 15 |
+
(name, path). Return first pair where *path* is not None.
|
| 16 |
+
If no valid path is found, return ('<unknown>', None)
|
| 17 |
+
"""
|
| 18 |
+
for by, data in options:
|
| 19 |
+
if data is not None:
|
| 20 |
+
return by, data
|
| 21 |
+
else:
|
| 22 |
+
return '<unknown>', None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _get_libdevice_path_decision():
|
| 26 |
+
options = [
|
| 27 |
+
('Conda environment', get_conda_ctk()),
|
| 28 |
+
('Conda environment (NVIDIA package)', get_nvidia_libdevice_ctk()),
|
| 29 |
+
('CUDA_HOME', get_cuda_home('nvvm', 'libdevice')),
|
| 30 |
+
('System', get_system_ctk('nvvm', 'libdevice')),
|
| 31 |
+
('Debian package', get_debian_pkg_libdevice()),
|
| 32 |
+
]
|
| 33 |
+
by, libdir = _find_valid_path(options)
|
| 34 |
+
return by, libdir
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _nvvm_lib_dir():
|
| 38 |
+
if IS_WIN32:
|
| 39 |
+
return 'nvvm', 'bin'
|
| 40 |
+
else:
|
| 41 |
+
return 'nvvm', 'lib64'
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _get_nvvm_path_decision():
|
| 45 |
+
options = [
|
| 46 |
+
('Conda environment', get_conda_ctk()),
|
| 47 |
+
('Conda environment (NVIDIA package)', get_nvidia_nvvm_ctk()),
|
| 48 |
+
('CUDA_HOME', get_cuda_home(*_nvvm_lib_dir())),
|
| 49 |
+
('System', get_system_ctk(*_nvvm_lib_dir())),
|
| 50 |
+
]
|
| 51 |
+
by, path = _find_valid_path(options)
|
| 52 |
+
return by, path
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _get_libdevice_paths():
|
| 56 |
+
by, libdir = _get_libdevice_path_decision()
|
| 57 |
+
# Search for pattern
|
| 58 |
+
pat = r'libdevice(\.\d+)*\.bc$'
|
| 59 |
+
candidates = find_file(re.compile(pat), libdir)
|
| 60 |
+
# Keep only the max (most recent version) of the bitcode files.
|
| 61 |
+
out = max(candidates, default=None)
|
| 62 |
+
return _env_path_tuple(by, out)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _cudalib_path():
|
| 66 |
+
if IS_WIN32:
|
| 67 |
+
return 'bin'
|
| 68 |
+
else:
|
| 69 |
+
return 'lib64'
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _cuda_home_static_cudalib_path():
|
| 73 |
+
if IS_WIN32:
|
| 74 |
+
return ('lib', 'x64')
|
| 75 |
+
else:
|
| 76 |
+
return ('lib64',)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _get_cudalib_dir_path_decision():
|
| 80 |
+
options = [
|
| 81 |
+
('Conda environment', get_conda_ctk()),
|
| 82 |
+
('Conda environment (NVIDIA package)', get_nvidia_cudalib_ctk()),
|
| 83 |
+
('CUDA_HOME', get_cuda_home(_cudalib_path())),
|
| 84 |
+
('System', get_system_ctk(_cudalib_path())),
|
| 85 |
+
]
|
| 86 |
+
by, libdir = _find_valid_path(options)
|
| 87 |
+
return by, libdir
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _get_static_cudalib_dir_path_decision():
|
| 91 |
+
options = [
|
| 92 |
+
('Conda environment', get_conda_ctk()),
|
| 93 |
+
('Conda environment (NVIDIA package)', get_nvidia_static_cudalib_ctk()),
|
| 94 |
+
('CUDA_HOME', get_cuda_home(*_cuda_home_static_cudalib_path())),
|
| 95 |
+
('System', get_system_ctk(_cudalib_path())),
|
| 96 |
+
]
|
| 97 |
+
by, libdir = _find_valid_path(options)
|
| 98 |
+
return by, libdir
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _get_cudalib_dir():
|
| 102 |
+
by, libdir = _get_cudalib_dir_path_decision()
|
| 103 |
+
return _env_path_tuple(by, libdir)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _get_static_cudalib_dir():
|
| 107 |
+
by, libdir = _get_static_cudalib_dir_path_decision()
|
| 108 |
+
return _env_path_tuple(by, libdir)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def get_system_ctk(*subdirs):
|
| 112 |
+
"""Return path to system-wide cudatoolkit; or, None if it doesn't exist.
|
| 113 |
+
"""
|
| 114 |
+
# Linux?
|
| 115 |
+
if sys.platform.startswith('linux'):
|
| 116 |
+
# Is cuda alias to /usr/local/cuda?
|
| 117 |
+
# We are intentionally not getting versioned cuda installation.
|
| 118 |
+
base = '/usr/local/cuda'
|
| 119 |
+
if os.path.exists(base):
|
| 120 |
+
return os.path.join(base, *subdirs)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_conda_ctk():
|
| 124 |
+
"""Return path to directory containing the shared libraries of cudatoolkit.
|
| 125 |
+
"""
|
| 126 |
+
is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
|
| 127 |
+
if not is_conda_env:
|
| 128 |
+
return
|
| 129 |
+
# Assume the existence of NVVM to imply cudatoolkit installed
|
| 130 |
+
paths = find_lib('nvvm')
|
| 131 |
+
if not paths:
|
| 132 |
+
return
|
| 133 |
+
# Use the directory name of the max path
|
| 134 |
+
return os.path.dirname(max(paths))
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def get_nvidia_nvvm_ctk():
|
| 138 |
+
"""Return path to directory containing the NVVM shared library.
|
| 139 |
+
"""
|
| 140 |
+
is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
|
| 141 |
+
if not is_conda_env:
|
| 142 |
+
return
|
| 143 |
+
|
| 144 |
+
# Assume the existence of NVVM in the conda env implies that a CUDA toolkit
|
| 145 |
+
# conda package is installed.
|
| 146 |
+
|
| 147 |
+
# First, try the location used on Linux and the Windows 11.x packages
|
| 148 |
+
libdir = os.path.join(sys.prefix, 'nvvm', _cudalib_path())
|
| 149 |
+
if not os.path.exists(libdir) or not os.path.isdir(libdir):
|
| 150 |
+
# If that fails, try the location used for Windows 12.x packages
|
| 151 |
+
libdir = os.path.join(sys.prefix, 'Library', 'nvvm', _cudalib_path())
|
| 152 |
+
if not os.path.exists(libdir) or not os.path.isdir(libdir):
|
| 153 |
+
# If that doesn't exist either, assume we don't have the NVIDIA
|
| 154 |
+
# conda package
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
paths = find_lib('nvvm', libdir=libdir)
|
| 158 |
+
if not paths:
|
| 159 |
+
return
|
| 160 |
+
# Use the directory name of the max path
|
| 161 |
+
return os.path.dirname(max(paths))
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def get_nvidia_libdevice_ctk():
|
| 165 |
+
"""Return path to directory containing the libdevice library.
|
| 166 |
+
"""
|
| 167 |
+
nvvm_ctk = get_nvidia_nvvm_ctk()
|
| 168 |
+
if not nvvm_ctk:
|
| 169 |
+
return
|
| 170 |
+
nvvm_dir = os.path.dirname(nvvm_ctk)
|
| 171 |
+
return os.path.join(nvvm_dir, 'libdevice')
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def get_nvidia_cudalib_ctk():
|
| 175 |
+
"""Return path to directory containing the shared libraries of cudatoolkit.
|
| 176 |
+
"""
|
| 177 |
+
nvvm_ctk = get_nvidia_nvvm_ctk()
|
| 178 |
+
if not nvvm_ctk:
|
| 179 |
+
return
|
| 180 |
+
env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))
|
| 181 |
+
subdir = 'bin' if IS_WIN32 else 'lib'
|
| 182 |
+
return os.path.join(env_dir, subdir)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def get_nvidia_static_cudalib_ctk():
|
| 186 |
+
"""Return path to directory containing the static libraries of cudatoolkit.
|
| 187 |
+
"""
|
| 188 |
+
nvvm_ctk = get_nvidia_nvvm_ctk()
|
| 189 |
+
if not nvvm_ctk:
|
| 190 |
+
return
|
| 191 |
+
|
| 192 |
+
if IS_WIN32 and ("Library" not in nvvm_ctk):
|
| 193 |
+
# Location specific to CUDA 11.x packages on Windows
|
| 194 |
+
dirs = ('Lib', 'x64')
|
| 195 |
+
else:
|
| 196 |
+
# Linux, or Windows with CUDA 12.x packages
|
| 197 |
+
dirs = ('lib',)
|
| 198 |
+
|
| 199 |
+
env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))
|
| 200 |
+
return os.path.join(env_dir, *dirs)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def get_cuda_home(*subdirs):
|
| 204 |
+
"""Get paths of CUDA_HOME.
|
| 205 |
+
If *subdirs* are the subdirectory name to be appended in the resulting
|
| 206 |
+
path.
|
| 207 |
+
"""
|
| 208 |
+
cuda_home = os.environ.get('CUDA_HOME')
|
| 209 |
+
if cuda_home is None:
|
| 210 |
+
# Try Windows CUDA installation without Anaconda
|
| 211 |
+
cuda_home = os.environ.get('CUDA_PATH')
|
| 212 |
+
if cuda_home is not None:
|
| 213 |
+
return os.path.join(cuda_home, *subdirs)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _get_nvvm_path():
|
| 217 |
+
by, path = _get_nvvm_path_decision()
|
| 218 |
+
candidates = find_lib('nvvm', path)
|
| 219 |
+
path = max(candidates) if candidates else None
|
| 220 |
+
return _env_path_tuple(by, path)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def get_cuda_paths():
|
| 224 |
+
"""Returns a dictionary mapping component names to a 2-tuple
|
| 225 |
+
of (source_variable, info).
|
| 226 |
+
|
| 227 |
+
The returned dictionary will have the following keys and infos:
|
| 228 |
+
- "nvvm": file_path
|
| 229 |
+
- "libdevice": List[Tuple[arch, file_path]]
|
| 230 |
+
- "cudalib_dir": directory_path
|
| 231 |
+
|
| 232 |
+
Note: The result of the function is cached.
|
| 233 |
+
"""
|
| 234 |
+
# Check cache
|
| 235 |
+
if hasattr(get_cuda_paths, '_cached_result'):
|
| 236 |
+
return get_cuda_paths._cached_result
|
| 237 |
+
else:
|
| 238 |
+
# Not in cache
|
| 239 |
+
d = {
|
| 240 |
+
'nvvm': _get_nvvm_path(),
|
| 241 |
+
'libdevice': _get_libdevice_paths(),
|
| 242 |
+
'cudalib_dir': _get_cudalib_dir(),
|
| 243 |
+
'static_cudalib_dir': _get_static_cudalib_dir(),
|
| 244 |
+
}
|
| 245 |
+
# Cache result
|
| 246 |
+
get_cuda_paths._cached_result = d
|
| 247 |
+
return d
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def get_debian_pkg_libdevice():
|
| 251 |
+
"""
|
| 252 |
+
Return the Debian NVIDIA Maintainers-packaged libdevice location, if it
|
| 253 |
+
exists.
|
| 254 |
+
"""
|
| 255 |
+
pkg_libdevice_location = '/usr/lib/nvidia-cuda-toolkit/libdevice'
|
| 256 |
+
if not os.path.exists(pkg_libdevice_location):
|
| 257 |
+
return None
|
| 258 |
+
return pkg_libdevice_location
|
lib/python3.10/site-packages/numba/cuda/cudadecl.py
ADDED
|
@@ -0,0 +1,806 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from numba.core import types
|
| 3 |
+
from numba.core.typing.npydecl import (parse_dtype, parse_shape,
|
| 4 |
+
register_number_classes,
|
| 5 |
+
register_numpy_ufunc,
|
| 6 |
+
trigonometric_functions,
|
| 7 |
+
comparison_functions,
|
| 8 |
+
math_operations,
|
| 9 |
+
bit_twiddling_functions)
|
| 10 |
+
from numba.core.typing.templates import (AttributeTemplate, ConcreteTemplate,
|
| 11 |
+
AbstractTemplate, CallableTemplate,
|
| 12 |
+
signature, Registry)
|
| 13 |
+
from numba.cuda.types import dim3
|
| 14 |
+
from numba.core.typeconv import Conversion
|
| 15 |
+
from numba import cuda
|
| 16 |
+
from numba.cuda.compiler import declare_device_function_template
|
| 17 |
+
|
| 18 |
+
registry = Registry()
|
| 19 |
+
register = registry.register
|
| 20 |
+
register_attr = registry.register_attr
|
| 21 |
+
register_global = registry.register_global
|
| 22 |
+
|
| 23 |
+
register_number_classes(register_global)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Cuda_array_decl(CallableTemplate):
|
| 27 |
+
def generic(self):
|
| 28 |
+
def typer(shape, dtype):
|
| 29 |
+
|
| 30 |
+
# Only integer literals and tuples of integer literals are valid
|
| 31 |
+
# shapes
|
| 32 |
+
if isinstance(shape, types.Integer):
|
| 33 |
+
if not isinstance(shape, types.IntegerLiteral):
|
| 34 |
+
return None
|
| 35 |
+
elif isinstance(shape, (types.Tuple, types.UniTuple)):
|
| 36 |
+
if any([not isinstance(s, types.IntegerLiteral)
|
| 37 |
+
for s in shape]):
|
| 38 |
+
return None
|
| 39 |
+
else:
|
| 40 |
+
return None
|
| 41 |
+
|
| 42 |
+
ndim = parse_shape(shape)
|
| 43 |
+
nb_dtype = parse_dtype(dtype)
|
| 44 |
+
if nb_dtype is not None and ndim is not None:
|
| 45 |
+
return types.Array(dtype=nb_dtype, ndim=ndim, layout='C')
|
| 46 |
+
|
| 47 |
+
return typer
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@register
|
| 51 |
+
class Cuda_shared_array(Cuda_array_decl):
|
| 52 |
+
key = cuda.shared.array
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@register
|
| 56 |
+
class Cuda_local_array(Cuda_array_decl):
|
| 57 |
+
key = cuda.local.array
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@register
|
| 61 |
+
class Cuda_const_array_like(CallableTemplate):
|
| 62 |
+
key = cuda.const.array_like
|
| 63 |
+
|
| 64 |
+
def generic(self):
|
| 65 |
+
def typer(ndarray):
|
| 66 |
+
return ndarray
|
| 67 |
+
return typer
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@register
|
| 71 |
+
class Cuda_threadfence_device(ConcreteTemplate):
|
| 72 |
+
key = cuda.threadfence
|
| 73 |
+
cases = [signature(types.none)]
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@register
|
| 77 |
+
class Cuda_threadfence_block(ConcreteTemplate):
|
| 78 |
+
key = cuda.threadfence_block
|
| 79 |
+
cases = [signature(types.none)]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@register
|
| 83 |
+
class Cuda_threadfence_system(ConcreteTemplate):
|
| 84 |
+
key = cuda.threadfence_system
|
| 85 |
+
cases = [signature(types.none)]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@register
|
| 89 |
+
class Cuda_syncwarp(ConcreteTemplate):
|
| 90 |
+
key = cuda.syncwarp
|
| 91 |
+
cases = [signature(types.none), signature(types.none, types.i4)]
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@register
|
| 95 |
+
class Cuda_shfl_sync_intrinsic(ConcreteTemplate):
|
| 96 |
+
key = cuda.shfl_sync_intrinsic
|
| 97 |
+
cases = [
|
| 98 |
+
signature(types.Tuple((types.i4, types.b1)),
|
| 99 |
+
types.i4, types.i4, types.i4, types.i4, types.i4),
|
| 100 |
+
signature(types.Tuple((types.i8, types.b1)),
|
| 101 |
+
types.i4, types.i4, types.i8, types.i4, types.i4),
|
| 102 |
+
signature(types.Tuple((types.f4, types.b1)),
|
| 103 |
+
types.i4, types.i4, types.f4, types.i4, types.i4),
|
| 104 |
+
signature(types.Tuple((types.f8, types.b1)),
|
| 105 |
+
types.i4, types.i4, types.f8, types.i4, types.i4),
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@register
|
| 110 |
+
class Cuda_vote_sync_intrinsic(ConcreteTemplate):
|
| 111 |
+
key = cuda.vote_sync_intrinsic
|
| 112 |
+
cases = [signature(types.Tuple((types.i4, types.b1)),
|
| 113 |
+
types.i4, types.i4, types.b1)]
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@register
|
| 117 |
+
class Cuda_match_any_sync(ConcreteTemplate):
|
| 118 |
+
key = cuda.match_any_sync
|
| 119 |
+
cases = [
|
| 120 |
+
signature(types.i4, types.i4, types.i4),
|
| 121 |
+
signature(types.i4, types.i4, types.i8),
|
| 122 |
+
signature(types.i4, types.i4, types.f4),
|
| 123 |
+
signature(types.i4, types.i4, types.f8),
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
@register
|
| 128 |
+
class Cuda_match_all_sync(ConcreteTemplate):
|
| 129 |
+
key = cuda.match_all_sync
|
| 130 |
+
cases = [
|
| 131 |
+
signature(types.Tuple((types.i4, types.b1)), types.i4, types.i4),
|
| 132 |
+
signature(types.Tuple((types.i4, types.b1)), types.i4, types.i8),
|
| 133 |
+
signature(types.Tuple((types.i4, types.b1)), types.i4, types.f4),
|
| 134 |
+
signature(types.Tuple((types.i4, types.b1)), types.i4, types.f8),
|
| 135 |
+
]
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@register
|
| 139 |
+
class Cuda_activemask(ConcreteTemplate):
|
| 140 |
+
key = cuda.activemask
|
| 141 |
+
cases = [signature(types.uint32)]
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@register
|
| 145 |
+
class Cuda_lanemask_lt(ConcreteTemplate):
|
| 146 |
+
key = cuda.lanemask_lt
|
| 147 |
+
cases = [signature(types.uint32)]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@register
|
| 151 |
+
class Cuda_popc(ConcreteTemplate):
|
| 152 |
+
"""
|
| 153 |
+
Supported types from `llvm.popc`
|
| 154 |
+
[here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics)
|
| 155 |
+
"""
|
| 156 |
+
key = cuda.popc
|
| 157 |
+
cases = [
|
| 158 |
+
signature(types.int8, types.int8),
|
| 159 |
+
signature(types.int16, types.int16),
|
| 160 |
+
signature(types.int32, types.int32),
|
| 161 |
+
signature(types.int64, types.int64),
|
| 162 |
+
signature(types.uint8, types.uint8),
|
| 163 |
+
signature(types.uint16, types.uint16),
|
| 164 |
+
signature(types.uint32, types.uint32),
|
| 165 |
+
signature(types.uint64, types.uint64),
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@register
|
| 170 |
+
class Cuda_fma(ConcreteTemplate):
|
| 171 |
+
"""
|
| 172 |
+
Supported types from `llvm.fma`
|
| 173 |
+
[here](https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#standard-c-library-intrinics)
|
| 174 |
+
"""
|
| 175 |
+
key = cuda.fma
|
| 176 |
+
cases = [
|
| 177 |
+
signature(types.float32, types.float32, types.float32, types.float32),
|
| 178 |
+
signature(types.float64, types.float64, types.float64, types.float64),
|
| 179 |
+
]
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@register
|
| 183 |
+
class Cuda_hfma(ConcreteTemplate):
|
| 184 |
+
key = cuda.fp16.hfma
|
| 185 |
+
cases = [
|
| 186 |
+
signature(types.float16, types.float16, types.float16, types.float16)
|
| 187 |
+
]
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@register
|
| 191 |
+
class Cuda_cbrt(ConcreteTemplate):
|
| 192 |
+
|
| 193 |
+
key = cuda.cbrt
|
| 194 |
+
cases = [
|
| 195 |
+
signature(types.float32, types.float32),
|
| 196 |
+
signature(types.float64, types.float64),
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@register
|
| 201 |
+
class Cuda_brev(ConcreteTemplate):
|
| 202 |
+
key = cuda.brev
|
| 203 |
+
cases = [
|
| 204 |
+
signature(types.uint32, types.uint32),
|
| 205 |
+
signature(types.uint64, types.uint64),
|
| 206 |
+
]
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
@register
|
| 210 |
+
class Cuda_clz(ConcreteTemplate):
|
| 211 |
+
"""
|
| 212 |
+
Supported types from `llvm.ctlz`
|
| 213 |
+
[here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics)
|
| 214 |
+
"""
|
| 215 |
+
key = cuda.clz
|
| 216 |
+
cases = [
|
| 217 |
+
signature(types.int8, types.int8),
|
| 218 |
+
signature(types.int16, types.int16),
|
| 219 |
+
signature(types.int32, types.int32),
|
| 220 |
+
signature(types.int64, types.int64),
|
| 221 |
+
signature(types.uint8, types.uint8),
|
| 222 |
+
signature(types.uint16, types.uint16),
|
| 223 |
+
signature(types.uint32, types.uint32),
|
| 224 |
+
signature(types.uint64, types.uint64),
|
| 225 |
+
]
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
@register
|
| 229 |
+
class Cuda_ffs(ConcreteTemplate):
|
| 230 |
+
"""
|
| 231 |
+
Supported types from `llvm.cttz`
|
| 232 |
+
[here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics)
|
| 233 |
+
"""
|
| 234 |
+
key = cuda.ffs
|
| 235 |
+
cases = [
|
| 236 |
+
signature(types.uint32, types.int8),
|
| 237 |
+
signature(types.uint32, types.int16),
|
| 238 |
+
signature(types.uint32, types.int32),
|
| 239 |
+
signature(types.uint32, types.int64),
|
| 240 |
+
signature(types.uint32, types.uint8),
|
| 241 |
+
signature(types.uint32, types.uint16),
|
| 242 |
+
signature(types.uint32, types.uint32),
|
| 243 |
+
signature(types.uint32, types.uint64),
|
| 244 |
+
]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@register
|
| 248 |
+
class Cuda_selp(AbstractTemplate):
|
| 249 |
+
key = cuda.selp
|
| 250 |
+
|
| 251 |
+
def generic(self, args, kws):
|
| 252 |
+
assert not kws
|
| 253 |
+
test, a, b = args
|
| 254 |
+
|
| 255 |
+
# per docs
|
| 256 |
+
# http://docs.nvidia.com/cuda/parallel-thread-execution/index.html#comparison-and-selection-instructions-selp
|
| 257 |
+
supported_types = (types.float64, types.float32,
|
| 258 |
+
types.int16, types.uint16,
|
| 259 |
+
types.int32, types.uint32,
|
| 260 |
+
types.int64, types.uint64)
|
| 261 |
+
|
| 262 |
+
if a != b or a not in supported_types:
|
| 263 |
+
return
|
| 264 |
+
|
| 265 |
+
return signature(a, test, a, a)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def _genfp16_unary(l_key):
|
| 269 |
+
@register
|
| 270 |
+
class Cuda_fp16_unary(ConcreteTemplate):
|
| 271 |
+
key = l_key
|
| 272 |
+
cases = [signature(types.float16, types.float16)]
|
| 273 |
+
|
| 274 |
+
return Cuda_fp16_unary
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def _genfp16_unary_operator(l_key):
|
| 278 |
+
@register_global(l_key)
|
| 279 |
+
class Cuda_fp16_unary(AbstractTemplate):
|
| 280 |
+
key = l_key
|
| 281 |
+
|
| 282 |
+
def generic(self, args, kws):
|
| 283 |
+
assert not kws
|
| 284 |
+
if len(args) == 1 and args[0] == types.float16:
|
| 285 |
+
return signature(types.float16, types.float16)
|
| 286 |
+
|
| 287 |
+
return Cuda_fp16_unary
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _genfp16_binary(l_key):
|
| 291 |
+
@register
|
| 292 |
+
class Cuda_fp16_binary(ConcreteTemplate):
|
| 293 |
+
key = l_key
|
| 294 |
+
cases = [signature(types.float16, types.float16, types.float16)]
|
| 295 |
+
|
| 296 |
+
return Cuda_fp16_binary
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
@register_global(float)
|
| 300 |
+
class Float(AbstractTemplate):
|
| 301 |
+
|
| 302 |
+
def generic(self, args, kws):
|
| 303 |
+
assert not kws
|
| 304 |
+
|
| 305 |
+
[arg] = args
|
| 306 |
+
|
| 307 |
+
if arg == types.float16:
|
| 308 |
+
return signature(arg, arg)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def _genfp16_binary_comparison(l_key):
|
| 312 |
+
@register
|
| 313 |
+
class Cuda_fp16_cmp(ConcreteTemplate):
|
| 314 |
+
key = l_key
|
| 315 |
+
|
| 316 |
+
cases = [
|
| 317 |
+
signature(types.b1, types.float16, types.float16)
|
| 318 |
+
]
|
| 319 |
+
return Cuda_fp16_cmp
|
| 320 |
+
|
| 321 |
+
# If multiple ConcreteTemplates provide typing for a single function, then
|
| 322 |
+
# function resolution will pick the first compatible typing it finds even if it
|
| 323 |
+
# involves inserting a cast that would be considered undesirable (in this
|
| 324 |
+
# specific case, float16s could be cast to float32s for comparisons).
|
| 325 |
+
#
|
| 326 |
+
# To work around this, we instead use an AbstractTemplate that implements
|
| 327 |
+
# exactly the casting logic that we desire. The AbstractTemplate gets
|
| 328 |
+
# considered in preference to ConcreteTemplates during typing.
|
| 329 |
+
#
|
| 330 |
+
# This is tracked as Issue #7863 (https://github.com/numba/numba/issues/7863) -
|
| 331 |
+
# once this is resolved it should be possible to replace this AbstractTemplate
|
| 332 |
+
# with a ConcreteTemplate to simplify the logic.
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def _fp16_binary_operator(l_key, retty):
|
| 336 |
+
@register_global(l_key)
|
| 337 |
+
class Cuda_fp16_operator(AbstractTemplate):
|
| 338 |
+
key = l_key
|
| 339 |
+
|
| 340 |
+
def generic(self, args, kws):
|
| 341 |
+
assert not kws
|
| 342 |
+
|
| 343 |
+
if len(args) == 2 and \
|
| 344 |
+
(args[0] == types.float16 or args[1] == types.float16):
|
| 345 |
+
if (args[0] == types.float16):
|
| 346 |
+
convertible = self.context.can_convert(args[1], args[0])
|
| 347 |
+
else:
|
| 348 |
+
convertible = self.context.can_convert(args[0], args[1])
|
| 349 |
+
|
| 350 |
+
# We allow three cases here:
|
| 351 |
+
#
|
| 352 |
+
# 1. fp16 to fp16 - Conversion.exact
|
| 353 |
+
# 2. fp16 to other types fp16 can be promoted to
|
| 354 |
+
# - Conversion.promote
|
| 355 |
+
# 3. fp16 to int8 (safe conversion) -
|
| 356 |
+
# - Conversion.safe
|
| 357 |
+
|
| 358 |
+
if (convertible == Conversion.exact) or \
|
| 359 |
+
(convertible == Conversion.promote) or \
|
| 360 |
+
(convertible == Conversion.safe):
|
| 361 |
+
return signature(retty, types.float16, types.float16)
|
| 362 |
+
|
| 363 |
+
return Cuda_fp16_operator
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def _genfp16_comparison_operator(op):
|
| 367 |
+
return _fp16_binary_operator(op, types.b1)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def _genfp16_binary_operator(op):
|
| 371 |
+
return _fp16_binary_operator(op, types.float16)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
Cuda_hadd = _genfp16_binary(cuda.fp16.hadd)
|
| 375 |
+
Cuda_add = _genfp16_binary_operator(operator.add)
|
| 376 |
+
Cuda_iadd = _genfp16_binary_operator(operator.iadd)
|
| 377 |
+
Cuda_hsub = _genfp16_binary(cuda.fp16.hsub)
|
| 378 |
+
Cuda_sub = _genfp16_binary_operator(operator.sub)
|
| 379 |
+
Cuda_isub = _genfp16_binary_operator(operator.isub)
|
| 380 |
+
Cuda_hmul = _genfp16_binary(cuda.fp16.hmul)
|
| 381 |
+
Cuda_mul = _genfp16_binary_operator(operator.mul)
|
| 382 |
+
Cuda_imul = _genfp16_binary_operator(operator.imul)
|
| 383 |
+
Cuda_hmax = _genfp16_binary(cuda.fp16.hmax)
|
| 384 |
+
Cuda_hmin = _genfp16_binary(cuda.fp16.hmin)
|
| 385 |
+
Cuda_hneg = _genfp16_unary(cuda.fp16.hneg)
|
| 386 |
+
Cuda_neg = _genfp16_unary_operator(operator.neg)
|
| 387 |
+
Cuda_habs = _genfp16_unary(cuda.fp16.habs)
|
| 388 |
+
Cuda_abs = _genfp16_unary_operator(abs)
|
| 389 |
+
Cuda_heq = _genfp16_binary_comparison(cuda.fp16.heq)
|
| 390 |
+
_genfp16_comparison_operator(operator.eq)
|
| 391 |
+
Cuda_hne = _genfp16_binary_comparison(cuda.fp16.hne)
|
| 392 |
+
_genfp16_comparison_operator(operator.ne)
|
| 393 |
+
Cuda_hge = _genfp16_binary_comparison(cuda.fp16.hge)
|
| 394 |
+
_genfp16_comparison_operator(operator.ge)
|
| 395 |
+
Cuda_hgt = _genfp16_binary_comparison(cuda.fp16.hgt)
|
| 396 |
+
_genfp16_comparison_operator(operator.gt)
|
| 397 |
+
Cuda_hle = _genfp16_binary_comparison(cuda.fp16.hle)
|
| 398 |
+
_genfp16_comparison_operator(operator.le)
|
| 399 |
+
Cuda_hlt = _genfp16_binary_comparison(cuda.fp16.hlt)
|
| 400 |
+
_genfp16_comparison_operator(operator.lt)
|
| 401 |
+
_genfp16_binary_operator(operator.truediv)
|
| 402 |
+
_genfp16_binary_operator(operator.itruediv)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def _resolve_wrapped_unary(fname):
|
| 406 |
+
decl = declare_device_function_template(f'__numba_wrapper_{fname}',
|
| 407 |
+
types.float16,
|
| 408 |
+
(types.float16,))
|
| 409 |
+
return types.Function(decl)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def _resolve_wrapped_binary(fname):
|
| 413 |
+
decl = declare_device_function_template(f'__numba_wrapper_{fname}',
|
| 414 |
+
types.float16,
|
| 415 |
+
(types.float16, types.float16,))
|
| 416 |
+
return types.Function(decl)
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
hsin_device = _resolve_wrapped_unary('hsin')
|
| 420 |
+
hcos_device = _resolve_wrapped_unary('hcos')
|
| 421 |
+
hlog_device = _resolve_wrapped_unary('hlog')
|
| 422 |
+
hlog10_device = _resolve_wrapped_unary('hlog10')
|
| 423 |
+
hlog2_device = _resolve_wrapped_unary('hlog2')
|
| 424 |
+
hexp_device = _resolve_wrapped_unary('hexp')
|
| 425 |
+
hexp10_device = _resolve_wrapped_unary('hexp10')
|
| 426 |
+
hexp2_device = _resolve_wrapped_unary('hexp2')
|
| 427 |
+
hsqrt_device = _resolve_wrapped_unary('hsqrt')
|
| 428 |
+
hrsqrt_device = _resolve_wrapped_unary('hrsqrt')
|
| 429 |
+
hfloor_device = _resolve_wrapped_unary('hfloor')
|
| 430 |
+
hceil_device = _resolve_wrapped_unary('hceil')
|
| 431 |
+
hrcp_device = _resolve_wrapped_unary('hrcp')
|
| 432 |
+
hrint_device = _resolve_wrapped_unary('hrint')
|
| 433 |
+
htrunc_device = _resolve_wrapped_unary('htrunc')
|
| 434 |
+
hdiv_device = _resolve_wrapped_binary('hdiv')
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
# generate atomic operations
|
| 438 |
+
def _gen(l_key, supported_types):
|
| 439 |
+
@register
|
| 440 |
+
class Cuda_atomic(AbstractTemplate):
|
| 441 |
+
key = l_key
|
| 442 |
+
|
| 443 |
+
def generic(self, args, kws):
|
| 444 |
+
assert not kws
|
| 445 |
+
ary, idx, val = args
|
| 446 |
+
|
| 447 |
+
if ary.dtype not in supported_types:
|
| 448 |
+
return
|
| 449 |
+
|
| 450 |
+
if ary.ndim == 1:
|
| 451 |
+
return signature(ary.dtype, ary, types.intp, ary.dtype)
|
| 452 |
+
elif ary.ndim > 1:
|
| 453 |
+
return signature(ary.dtype, ary, idx, ary.dtype)
|
| 454 |
+
return Cuda_atomic
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
all_numba_types = (types.float64, types.float32,
|
| 458 |
+
types.int32, types.uint32,
|
| 459 |
+
types.int64, types.uint64)
|
| 460 |
+
|
| 461 |
+
integer_numba_types = (types.int32, types.uint32,
|
| 462 |
+
types.int64, types.uint64)
|
| 463 |
+
|
| 464 |
+
unsigned_int_numba_types = (types.uint32, types.uint64)
|
| 465 |
+
|
| 466 |
+
Cuda_atomic_add = _gen(cuda.atomic.add, all_numba_types)
|
| 467 |
+
Cuda_atomic_sub = _gen(cuda.atomic.sub, all_numba_types)
|
| 468 |
+
Cuda_atomic_max = _gen(cuda.atomic.max, all_numba_types)
|
| 469 |
+
Cuda_atomic_min = _gen(cuda.atomic.min, all_numba_types)
|
| 470 |
+
Cuda_atomic_nanmax = _gen(cuda.atomic.nanmax, all_numba_types)
|
| 471 |
+
Cuda_atomic_nanmin = _gen(cuda.atomic.nanmin, all_numba_types)
|
| 472 |
+
Cuda_atomic_and = _gen(cuda.atomic.and_, integer_numba_types)
|
| 473 |
+
Cuda_atomic_or = _gen(cuda.atomic.or_, integer_numba_types)
|
| 474 |
+
Cuda_atomic_xor = _gen(cuda.atomic.xor, integer_numba_types)
|
| 475 |
+
Cuda_atomic_inc = _gen(cuda.atomic.inc, unsigned_int_numba_types)
|
| 476 |
+
Cuda_atomic_dec = _gen(cuda.atomic.dec, unsigned_int_numba_types)
|
| 477 |
+
Cuda_atomic_exch = _gen(cuda.atomic.exch, integer_numba_types)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
@register
|
| 481 |
+
class Cuda_atomic_compare_and_swap(AbstractTemplate):
|
| 482 |
+
key = cuda.atomic.compare_and_swap
|
| 483 |
+
|
| 484 |
+
def generic(self, args, kws):
|
| 485 |
+
assert not kws
|
| 486 |
+
ary, old, val = args
|
| 487 |
+
dty = ary.dtype
|
| 488 |
+
|
| 489 |
+
if dty in integer_numba_types and ary.ndim == 1:
|
| 490 |
+
return signature(dty, ary, dty, dty)
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
@register
|
| 494 |
+
class Cuda_atomic_cas(AbstractTemplate):
|
| 495 |
+
key = cuda.atomic.cas
|
| 496 |
+
|
| 497 |
+
def generic(self, args, kws):
|
| 498 |
+
assert not kws
|
| 499 |
+
ary, idx, old, val = args
|
| 500 |
+
dty = ary.dtype
|
| 501 |
+
|
| 502 |
+
if dty not in integer_numba_types:
|
| 503 |
+
return
|
| 504 |
+
|
| 505 |
+
if ary.ndim == 1:
|
| 506 |
+
return signature(dty, ary, types.intp, dty, dty)
|
| 507 |
+
elif ary.ndim > 1:
|
| 508 |
+
return signature(dty, ary, idx, dty, dty)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
@register
|
| 512 |
+
class Cuda_nanosleep(ConcreteTemplate):
|
| 513 |
+
key = cuda.nanosleep
|
| 514 |
+
|
| 515 |
+
cases = [signature(types.void, types.uint32)]
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
@register_attr
|
| 519 |
+
class Dim3_attrs(AttributeTemplate):
|
| 520 |
+
key = dim3
|
| 521 |
+
|
| 522 |
+
def resolve_x(self, mod):
|
| 523 |
+
return types.int32
|
| 524 |
+
|
| 525 |
+
def resolve_y(self, mod):
|
| 526 |
+
return types.int32
|
| 527 |
+
|
| 528 |
+
def resolve_z(self, mod):
|
| 529 |
+
return types.int32
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
@register_attr
|
| 533 |
+
class CudaSharedModuleTemplate(AttributeTemplate):
|
| 534 |
+
key = types.Module(cuda.shared)
|
| 535 |
+
|
| 536 |
+
def resolve_array(self, mod):
|
| 537 |
+
return types.Function(Cuda_shared_array)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@register_attr
|
| 541 |
+
class CudaConstModuleTemplate(AttributeTemplate):
|
| 542 |
+
key = types.Module(cuda.const)
|
| 543 |
+
|
| 544 |
+
def resolve_array_like(self, mod):
|
| 545 |
+
return types.Function(Cuda_const_array_like)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
@register_attr
|
| 549 |
+
class CudaLocalModuleTemplate(AttributeTemplate):
|
| 550 |
+
key = types.Module(cuda.local)
|
| 551 |
+
|
| 552 |
+
def resolve_array(self, mod):
|
| 553 |
+
return types.Function(Cuda_local_array)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
@register_attr
|
| 557 |
+
class CudaAtomicTemplate(AttributeTemplate):
|
| 558 |
+
key = types.Module(cuda.atomic)
|
| 559 |
+
|
| 560 |
+
def resolve_add(self, mod):
|
| 561 |
+
return types.Function(Cuda_atomic_add)
|
| 562 |
+
|
| 563 |
+
def resolve_sub(self, mod):
|
| 564 |
+
return types.Function(Cuda_atomic_sub)
|
| 565 |
+
|
| 566 |
+
def resolve_and_(self, mod):
|
| 567 |
+
return types.Function(Cuda_atomic_and)
|
| 568 |
+
|
| 569 |
+
def resolve_or_(self, mod):
|
| 570 |
+
return types.Function(Cuda_atomic_or)
|
| 571 |
+
|
| 572 |
+
def resolve_xor(self, mod):
|
| 573 |
+
return types.Function(Cuda_atomic_xor)
|
| 574 |
+
|
| 575 |
+
def resolve_inc(self, mod):
|
| 576 |
+
return types.Function(Cuda_atomic_inc)
|
| 577 |
+
|
| 578 |
+
def resolve_dec(self, mod):
|
| 579 |
+
return types.Function(Cuda_atomic_dec)
|
| 580 |
+
|
| 581 |
+
def resolve_exch(self, mod):
|
| 582 |
+
return types.Function(Cuda_atomic_exch)
|
| 583 |
+
|
| 584 |
+
def resolve_max(self, mod):
|
| 585 |
+
return types.Function(Cuda_atomic_max)
|
| 586 |
+
|
| 587 |
+
def resolve_min(self, mod):
|
| 588 |
+
return types.Function(Cuda_atomic_min)
|
| 589 |
+
|
| 590 |
+
def resolve_nanmin(self, mod):
|
| 591 |
+
return types.Function(Cuda_atomic_nanmin)
|
| 592 |
+
|
| 593 |
+
def resolve_nanmax(self, mod):
|
| 594 |
+
return types.Function(Cuda_atomic_nanmax)
|
| 595 |
+
|
| 596 |
+
def resolve_compare_and_swap(self, mod):
|
| 597 |
+
return types.Function(Cuda_atomic_compare_and_swap)
|
| 598 |
+
|
| 599 |
+
def resolve_cas(self, mod):
|
| 600 |
+
return types.Function(Cuda_atomic_cas)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
@register_attr
|
| 604 |
+
class CudaFp16Template(AttributeTemplate):
|
| 605 |
+
key = types.Module(cuda.fp16)
|
| 606 |
+
|
| 607 |
+
def resolve_hadd(self, mod):
|
| 608 |
+
return types.Function(Cuda_hadd)
|
| 609 |
+
|
| 610 |
+
def resolve_hsub(self, mod):
|
| 611 |
+
return types.Function(Cuda_hsub)
|
| 612 |
+
|
| 613 |
+
def resolve_hmul(self, mod):
|
| 614 |
+
return types.Function(Cuda_hmul)
|
| 615 |
+
|
| 616 |
+
def resolve_hdiv(self, mod):
|
| 617 |
+
return hdiv_device
|
| 618 |
+
|
| 619 |
+
def resolve_hneg(self, mod):
|
| 620 |
+
return types.Function(Cuda_hneg)
|
| 621 |
+
|
| 622 |
+
def resolve_habs(self, mod):
|
| 623 |
+
return types.Function(Cuda_habs)
|
| 624 |
+
|
| 625 |
+
def resolve_hfma(self, mod):
|
| 626 |
+
return types.Function(Cuda_hfma)
|
| 627 |
+
|
| 628 |
+
def resolve_hsin(self, mod):
|
| 629 |
+
return hsin_device
|
| 630 |
+
|
| 631 |
+
def resolve_hcos(self, mod):
|
| 632 |
+
return hcos_device
|
| 633 |
+
|
| 634 |
+
def resolve_hlog(self, mod):
|
| 635 |
+
return hlog_device
|
| 636 |
+
|
| 637 |
+
def resolve_hlog10(self, mod):
|
| 638 |
+
return hlog10_device
|
| 639 |
+
|
| 640 |
+
def resolve_hlog2(self, mod):
|
| 641 |
+
return hlog2_device
|
| 642 |
+
|
| 643 |
+
def resolve_hexp(self, mod):
|
| 644 |
+
return hexp_device
|
| 645 |
+
|
| 646 |
+
def resolve_hexp10(self, mod):
|
| 647 |
+
return hexp10_device
|
| 648 |
+
|
| 649 |
+
def resolve_hexp2(self, mod):
|
| 650 |
+
return hexp2_device
|
| 651 |
+
|
| 652 |
+
def resolve_hfloor(self, mod):
|
| 653 |
+
return hfloor_device
|
| 654 |
+
|
| 655 |
+
def resolve_hceil(self, mod):
|
| 656 |
+
return hceil_device
|
| 657 |
+
|
| 658 |
+
def resolve_hsqrt(self, mod):
|
| 659 |
+
return hsqrt_device
|
| 660 |
+
|
| 661 |
+
def resolve_hrsqrt(self, mod):
|
| 662 |
+
return hrsqrt_device
|
| 663 |
+
|
| 664 |
+
def resolve_hrcp(self, mod):
|
| 665 |
+
return hrcp_device
|
| 666 |
+
|
| 667 |
+
def resolve_hrint(self, mod):
|
| 668 |
+
return hrint_device
|
| 669 |
+
|
| 670 |
+
def resolve_htrunc(self, mod):
|
| 671 |
+
return htrunc_device
|
| 672 |
+
|
| 673 |
+
def resolve_heq(self, mod):
|
| 674 |
+
return types.Function(Cuda_heq)
|
| 675 |
+
|
| 676 |
+
def resolve_hne(self, mod):
|
| 677 |
+
return types.Function(Cuda_hne)
|
| 678 |
+
|
| 679 |
+
def resolve_hge(self, mod):
|
| 680 |
+
return types.Function(Cuda_hge)
|
| 681 |
+
|
| 682 |
+
def resolve_hgt(self, mod):
|
| 683 |
+
return types.Function(Cuda_hgt)
|
| 684 |
+
|
| 685 |
+
def resolve_hle(self, mod):
|
| 686 |
+
return types.Function(Cuda_hle)
|
| 687 |
+
|
| 688 |
+
def resolve_hlt(self, mod):
|
| 689 |
+
return types.Function(Cuda_hlt)
|
| 690 |
+
|
| 691 |
+
def resolve_hmax(self, mod):
|
| 692 |
+
return types.Function(Cuda_hmax)
|
| 693 |
+
|
| 694 |
+
def resolve_hmin(self, mod):
|
| 695 |
+
return types.Function(Cuda_hmin)
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
@register_attr
|
| 699 |
+
class CudaModuleTemplate(AttributeTemplate):
|
| 700 |
+
key = types.Module(cuda)
|
| 701 |
+
|
| 702 |
+
def resolve_cg(self, mod):
|
| 703 |
+
return types.Module(cuda.cg)
|
| 704 |
+
|
| 705 |
+
def resolve_threadIdx(self, mod):
|
| 706 |
+
return dim3
|
| 707 |
+
|
| 708 |
+
def resolve_blockIdx(self, mod):
|
| 709 |
+
return dim3
|
| 710 |
+
|
| 711 |
+
def resolve_blockDim(self, mod):
|
| 712 |
+
return dim3
|
| 713 |
+
|
| 714 |
+
def resolve_gridDim(self, mod):
|
| 715 |
+
return dim3
|
| 716 |
+
|
| 717 |
+
def resolve_laneid(self, mod):
|
| 718 |
+
return types.int32
|
| 719 |
+
|
| 720 |
+
def resolve_shared(self, mod):
|
| 721 |
+
return types.Module(cuda.shared)
|
| 722 |
+
|
| 723 |
+
def resolve_popc(self, mod):
|
| 724 |
+
return types.Function(Cuda_popc)
|
| 725 |
+
|
| 726 |
+
def resolve_brev(self, mod):
|
| 727 |
+
return types.Function(Cuda_brev)
|
| 728 |
+
|
| 729 |
+
def resolve_clz(self, mod):
|
| 730 |
+
return types.Function(Cuda_clz)
|
| 731 |
+
|
| 732 |
+
def resolve_ffs(self, mod):
|
| 733 |
+
return types.Function(Cuda_ffs)
|
| 734 |
+
|
| 735 |
+
def resolve_fma(self, mod):
|
| 736 |
+
return types.Function(Cuda_fma)
|
| 737 |
+
|
| 738 |
+
def resolve_cbrt(self, mod):
|
| 739 |
+
return types.Function(Cuda_cbrt)
|
| 740 |
+
|
| 741 |
+
def resolve_threadfence(self, mod):
|
| 742 |
+
return types.Function(Cuda_threadfence_device)
|
| 743 |
+
|
| 744 |
+
def resolve_threadfence_block(self, mod):
|
| 745 |
+
return types.Function(Cuda_threadfence_block)
|
| 746 |
+
|
| 747 |
+
def resolve_threadfence_system(self, mod):
|
| 748 |
+
return types.Function(Cuda_threadfence_system)
|
| 749 |
+
|
| 750 |
+
def resolve_syncwarp(self, mod):
|
| 751 |
+
return types.Function(Cuda_syncwarp)
|
| 752 |
+
|
| 753 |
+
def resolve_shfl_sync_intrinsic(self, mod):
|
| 754 |
+
return types.Function(Cuda_shfl_sync_intrinsic)
|
| 755 |
+
|
| 756 |
+
def resolve_vote_sync_intrinsic(self, mod):
|
| 757 |
+
return types.Function(Cuda_vote_sync_intrinsic)
|
| 758 |
+
|
| 759 |
+
def resolve_match_any_sync(self, mod):
|
| 760 |
+
return types.Function(Cuda_match_any_sync)
|
| 761 |
+
|
| 762 |
+
def resolve_match_all_sync(self, mod):
|
| 763 |
+
return types.Function(Cuda_match_all_sync)
|
| 764 |
+
|
| 765 |
+
def resolve_activemask(self, mod):
|
| 766 |
+
return types.Function(Cuda_activemask)
|
| 767 |
+
|
| 768 |
+
def resolve_lanemask_lt(self, mod):
|
| 769 |
+
return types.Function(Cuda_lanemask_lt)
|
| 770 |
+
|
| 771 |
+
def resolve_selp(self, mod):
|
| 772 |
+
return types.Function(Cuda_selp)
|
| 773 |
+
|
| 774 |
+
def resolve_nanosleep(self, mod):
|
| 775 |
+
return types.Function(Cuda_nanosleep)
|
| 776 |
+
|
| 777 |
+
def resolve_atomic(self, mod):
|
| 778 |
+
return types.Module(cuda.atomic)
|
| 779 |
+
|
| 780 |
+
def resolve_fp16(self, mod):
|
| 781 |
+
return types.Module(cuda.fp16)
|
| 782 |
+
|
| 783 |
+
def resolve_const(self, mod):
|
| 784 |
+
return types.Module(cuda.const)
|
| 785 |
+
|
| 786 |
+
def resolve_local(self, mod):
|
| 787 |
+
return types.Module(cuda.local)
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
register_global(cuda, types.Module(cuda))
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
# NumPy
|
| 794 |
+
|
| 795 |
+
for func in trigonometric_functions:
|
| 796 |
+
register_numpy_ufunc(func, register_global)
|
| 797 |
+
|
| 798 |
+
for func in comparison_functions:
|
| 799 |
+
register_numpy_ufunc(func, register_global)
|
| 800 |
+
|
| 801 |
+
for func in bit_twiddling_functions:
|
| 802 |
+
register_numpy_ufunc(func, register_global)
|
| 803 |
+
|
| 804 |
+
for func in math_operations:
|
| 805 |
+
if func in ('log', 'log2', 'log10'):
|
| 806 |
+
register_numpy_ufunc(func, register_global)
|
lib/python3.10/site-packages/numba/cuda/cudaimpl.py
ADDED
|
@@ -0,0 +1,1055 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import reduce
|
| 2 |
+
import operator
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
from llvmlite import ir
|
| 6 |
+
import llvmlite.binding as ll
|
| 7 |
+
|
| 8 |
+
from numba.core.imputils import Registry, lower_cast
|
| 9 |
+
from numba.core.typing.npydecl import parse_dtype
|
| 10 |
+
from numba.core.datamodel import models
|
| 11 |
+
from numba.core import types, cgutils
|
| 12 |
+
from numba.np import ufunc_db
|
| 13 |
+
from numba.np.npyimpl import register_ufuncs
|
| 14 |
+
from .cudadrv import nvvm
|
| 15 |
+
from numba import cuda
|
| 16 |
+
from numba.cuda import nvvmutils, stubs, errors
|
| 17 |
+
from numba.cuda.types import dim3, CUDADispatcher
|
| 18 |
+
|
| 19 |
+
registry = Registry()
|
| 20 |
+
lower = registry.lower
|
| 21 |
+
lower_attr = registry.lower_getattr
|
| 22 |
+
lower_constant = registry.lower_constant
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def initialize_dim3(builder, prefix):
|
| 26 |
+
x = nvvmutils.call_sreg(builder, "%s.x" % prefix)
|
| 27 |
+
y = nvvmutils.call_sreg(builder, "%s.y" % prefix)
|
| 28 |
+
z = nvvmutils.call_sreg(builder, "%s.z" % prefix)
|
| 29 |
+
return cgutils.pack_struct(builder, (x, y, z))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@lower_attr(types.Module(cuda), 'threadIdx')
|
| 33 |
+
def cuda_threadIdx(context, builder, sig, args):
|
| 34 |
+
return initialize_dim3(builder, 'tid')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@lower_attr(types.Module(cuda), 'blockDim')
|
| 38 |
+
def cuda_blockDim(context, builder, sig, args):
|
| 39 |
+
return initialize_dim3(builder, 'ntid')
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@lower_attr(types.Module(cuda), 'blockIdx')
|
| 43 |
+
def cuda_blockIdx(context, builder, sig, args):
|
| 44 |
+
return initialize_dim3(builder, 'ctaid')
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@lower_attr(types.Module(cuda), 'gridDim')
|
| 48 |
+
def cuda_gridDim(context, builder, sig, args):
|
| 49 |
+
return initialize_dim3(builder, 'nctaid')
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@lower_attr(types.Module(cuda), 'laneid')
|
| 53 |
+
def cuda_laneid(context, builder, sig, args):
|
| 54 |
+
return nvvmutils.call_sreg(builder, 'laneid')
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@lower_attr(dim3, 'x')
|
| 58 |
+
def dim3_x(context, builder, sig, args):
|
| 59 |
+
return builder.extract_value(args, 0)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@lower_attr(dim3, 'y')
|
| 63 |
+
def dim3_y(context, builder, sig, args):
|
| 64 |
+
return builder.extract_value(args, 1)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@lower_attr(dim3, 'z')
|
| 68 |
+
def dim3_z(context, builder, sig, args):
|
| 69 |
+
return builder.extract_value(args, 2)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# -----------------------------------------------------------------------------
|
| 73 |
+
|
| 74 |
+
@lower(cuda.const.array_like, types.Array)
|
| 75 |
+
def cuda_const_array_like(context, builder, sig, args):
|
| 76 |
+
# This is a no-op because CUDATargetContext.make_constant_array already
|
| 77 |
+
# created the constant array.
|
| 78 |
+
return args[0]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
_unique_smem_id = 0
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _get_unique_smem_id(name):
|
| 85 |
+
"""Due to bug with NVVM invalid internalizing of shared memory in the
|
| 86 |
+
PTX output. We can't mark shared memory to be internal. We have to
|
| 87 |
+
ensure unique name is generated for shared memory symbol.
|
| 88 |
+
"""
|
| 89 |
+
global _unique_smem_id
|
| 90 |
+
_unique_smem_id += 1
|
| 91 |
+
return "{0}_{1}".format(name, _unique_smem_id)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@lower(cuda.shared.array, types.IntegerLiteral, types.Any)
|
| 95 |
+
def cuda_shared_array_integer(context, builder, sig, args):
|
| 96 |
+
length = sig.args[0].literal_value
|
| 97 |
+
dtype = parse_dtype(sig.args[1])
|
| 98 |
+
return _generic_array(context, builder, shape=(length,), dtype=dtype,
|
| 99 |
+
symbol_name=_get_unique_smem_id('_cudapy_smem'),
|
| 100 |
+
addrspace=nvvm.ADDRSPACE_SHARED,
|
| 101 |
+
can_dynsized=True)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@lower(cuda.shared.array, types.Tuple, types.Any)
|
| 105 |
+
@lower(cuda.shared.array, types.UniTuple, types.Any)
|
| 106 |
+
def cuda_shared_array_tuple(context, builder, sig, args):
|
| 107 |
+
shape = [ s.literal_value for s in sig.args[0] ]
|
| 108 |
+
dtype = parse_dtype(sig.args[1])
|
| 109 |
+
return _generic_array(context, builder, shape=shape, dtype=dtype,
|
| 110 |
+
symbol_name=_get_unique_smem_id('_cudapy_smem'),
|
| 111 |
+
addrspace=nvvm.ADDRSPACE_SHARED,
|
| 112 |
+
can_dynsized=True)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@lower(cuda.local.array, types.IntegerLiteral, types.Any)
|
| 116 |
+
def cuda_local_array_integer(context, builder, sig, args):
|
| 117 |
+
length = sig.args[0].literal_value
|
| 118 |
+
dtype = parse_dtype(sig.args[1])
|
| 119 |
+
return _generic_array(context, builder, shape=(length,), dtype=dtype,
|
| 120 |
+
symbol_name='_cudapy_lmem',
|
| 121 |
+
addrspace=nvvm.ADDRSPACE_LOCAL,
|
| 122 |
+
can_dynsized=False)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@lower(cuda.local.array, types.Tuple, types.Any)
|
| 126 |
+
@lower(cuda.local.array, types.UniTuple, types.Any)
|
| 127 |
+
def ptx_lmem_alloc_array(context, builder, sig, args):
|
| 128 |
+
shape = [ s.literal_value for s in sig.args[0] ]
|
| 129 |
+
dtype = parse_dtype(sig.args[1])
|
| 130 |
+
return _generic_array(context, builder, shape=shape, dtype=dtype,
|
| 131 |
+
symbol_name='_cudapy_lmem',
|
| 132 |
+
addrspace=nvvm.ADDRSPACE_LOCAL,
|
| 133 |
+
can_dynsized=False)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@lower(stubs.threadfence_block)
|
| 137 |
+
def ptx_threadfence_block(context, builder, sig, args):
|
| 138 |
+
assert not args
|
| 139 |
+
fname = 'llvm.nvvm.membar.cta'
|
| 140 |
+
lmod = builder.module
|
| 141 |
+
fnty = ir.FunctionType(ir.VoidType(), ())
|
| 142 |
+
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 143 |
+
builder.call(sync, ())
|
| 144 |
+
return context.get_dummy_value()
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@lower(stubs.threadfence_system)
|
| 148 |
+
def ptx_threadfence_system(context, builder, sig, args):
|
| 149 |
+
assert not args
|
| 150 |
+
fname = 'llvm.nvvm.membar.sys'
|
| 151 |
+
lmod = builder.module
|
| 152 |
+
fnty = ir.FunctionType(ir.VoidType(), ())
|
| 153 |
+
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 154 |
+
builder.call(sync, ())
|
| 155 |
+
return context.get_dummy_value()
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@lower(stubs.threadfence)
|
| 159 |
+
def ptx_threadfence_device(context, builder, sig, args):
|
| 160 |
+
assert not args
|
| 161 |
+
fname = 'llvm.nvvm.membar.gl'
|
| 162 |
+
lmod = builder.module
|
| 163 |
+
fnty = ir.FunctionType(ir.VoidType(), ())
|
| 164 |
+
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 165 |
+
builder.call(sync, ())
|
| 166 |
+
return context.get_dummy_value()
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@lower(stubs.syncwarp)
|
| 170 |
+
def ptx_syncwarp(context, builder, sig, args):
|
| 171 |
+
mask = context.get_constant(types.int32, 0xFFFFFFFF)
|
| 172 |
+
mask_sig = types.none(types.int32)
|
| 173 |
+
return ptx_syncwarp_mask(context, builder, mask_sig, [mask])
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
@lower(stubs.syncwarp, types.i4)
|
| 177 |
+
def ptx_syncwarp_mask(context, builder, sig, args):
|
| 178 |
+
fname = 'llvm.nvvm.bar.warp.sync'
|
| 179 |
+
lmod = builder.module
|
| 180 |
+
fnty = ir.FunctionType(ir.VoidType(), (ir.IntType(32),))
|
| 181 |
+
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 182 |
+
builder.call(sync, args)
|
| 183 |
+
return context.get_dummy_value()
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i4, types.i4,
|
| 187 |
+
types.i4)
|
| 188 |
+
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i8, types.i4,
|
| 189 |
+
types.i4)
|
| 190 |
+
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f4, types.i4,
|
| 191 |
+
types.i4)
|
| 192 |
+
@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f8, types.i4,
|
| 193 |
+
types.i4)
|
| 194 |
+
def ptx_shfl_sync_i32(context, builder, sig, args):
|
| 195 |
+
"""
|
| 196 |
+
The NVVM intrinsic for shfl only supports i32, but the cuda intrinsic
|
| 197 |
+
function supports both 32 and 64 bit ints and floats, so for feature parity,
|
| 198 |
+
i64, f32, and f64 are implemented. Floats by way of bitcasting the float to
|
| 199 |
+
an int, then shuffling, then bitcasting back. And 64-bit values by packing
|
| 200 |
+
them into 2 32bit values, shuffling thoose, and then packing back together.
|
| 201 |
+
"""
|
| 202 |
+
mask, mode, value, index, clamp = args
|
| 203 |
+
value_type = sig.args[2]
|
| 204 |
+
if value_type in types.real_domain:
|
| 205 |
+
value = builder.bitcast(value, ir.IntType(value_type.bitwidth))
|
| 206 |
+
fname = 'llvm.nvvm.shfl.sync.i32'
|
| 207 |
+
lmod = builder.module
|
| 208 |
+
fnty = ir.FunctionType(
|
| 209 |
+
ir.LiteralStructType((ir.IntType(32), ir.IntType(1))),
|
| 210 |
+
(ir.IntType(32), ir.IntType(32), ir.IntType(32),
|
| 211 |
+
ir.IntType(32), ir.IntType(32))
|
| 212 |
+
)
|
| 213 |
+
func = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 214 |
+
if value_type.bitwidth == 32:
|
| 215 |
+
ret = builder.call(func, (mask, mode, value, index, clamp))
|
| 216 |
+
if value_type == types.float32:
|
| 217 |
+
rv = builder.extract_value(ret, 0)
|
| 218 |
+
pred = builder.extract_value(ret, 1)
|
| 219 |
+
fv = builder.bitcast(rv, ir.FloatType())
|
| 220 |
+
ret = cgutils.make_anonymous_struct(builder, (fv, pred))
|
| 221 |
+
else:
|
| 222 |
+
value1 = builder.trunc(value, ir.IntType(32))
|
| 223 |
+
value_lshr = builder.lshr(value, context.get_constant(types.i8, 32))
|
| 224 |
+
value2 = builder.trunc(value_lshr, ir.IntType(32))
|
| 225 |
+
ret1 = builder.call(func, (mask, mode, value1, index, clamp))
|
| 226 |
+
ret2 = builder.call(func, (mask, mode, value2, index, clamp))
|
| 227 |
+
rv1 = builder.extract_value(ret1, 0)
|
| 228 |
+
rv2 = builder.extract_value(ret2, 0)
|
| 229 |
+
pred = builder.extract_value(ret1, 1)
|
| 230 |
+
rv1_64 = builder.zext(rv1, ir.IntType(64))
|
| 231 |
+
rv2_64 = builder.zext(rv2, ir.IntType(64))
|
| 232 |
+
rv_shl = builder.shl(rv2_64, context.get_constant(types.i8, 32))
|
| 233 |
+
rv = builder.or_(rv_shl, rv1_64)
|
| 234 |
+
if value_type == types.float64:
|
| 235 |
+
rv = builder.bitcast(rv, ir.DoubleType())
|
| 236 |
+
ret = cgutils.make_anonymous_struct(builder, (rv, pred))
|
| 237 |
+
return ret
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@lower(stubs.vote_sync_intrinsic, types.i4, types.i4, types.boolean)
|
| 241 |
+
def ptx_vote_sync(context, builder, sig, args):
|
| 242 |
+
fname = 'llvm.nvvm.vote.sync'
|
| 243 |
+
lmod = builder.module
|
| 244 |
+
fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32),
|
| 245 |
+
ir.IntType(1))),
|
| 246 |
+
(ir.IntType(32), ir.IntType(32), ir.IntType(1)))
|
| 247 |
+
func = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 248 |
+
return builder.call(func, args)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@lower(stubs.match_any_sync, types.i4, types.i4)
|
| 252 |
+
@lower(stubs.match_any_sync, types.i4, types.i8)
|
| 253 |
+
@lower(stubs.match_any_sync, types.i4, types.f4)
|
| 254 |
+
@lower(stubs.match_any_sync, types.i4, types.f8)
|
| 255 |
+
def ptx_match_any_sync(context, builder, sig, args):
|
| 256 |
+
mask, value = args
|
| 257 |
+
width = sig.args[1].bitwidth
|
| 258 |
+
if sig.args[1] in types.real_domain:
|
| 259 |
+
value = builder.bitcast(value, ir.IntType(width))
|
| 260 |
+
fname = 'llvm.nvvm.match.any.sync.i{}'.format(width)
|
| 261 |
+
lmod = builder.module
|
| 262 |
+
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32), ir.IntType(width)))
|
| 263 |
+
func = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 264 |
+
return builder.call(func, (mask, value))
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
@lower(stubs.match_all_sync, types.i4, types.i4)
|
| 268 |
+
@lower(stubs.match_all_sync, types.i4, types.i8)
|
| 269 |
+
@lower(stubs.match_all_sync, types.i4, types.f4)
|
| 270 |
+
@lower(stubs.match_all_sync, types.i4, types.f8)
|
| 271 |
+
def ptx_match_all_sync(context, builder, sig, args):
|
| 272 |
+
mask, value = args
|
| 273 |
+
width = sig.args[1].bitwidth
|
| 274 |
+
if sig.args[1] in types.real_domain:
|
| 275 |
+
value = builder.bitcast(value, ir.IntType(width))
|
| 276 |
+
fname = 'llvm.nvvm.match.all.sync.i{}'.format(width)
|
| 277 |
+
lmod = builder.module
|
| 278 |
+
fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32),
|
| 279 |
+
ir.IntType(1))),
|
| 280 |
+
(ir.IntType(32), ir.IntType(width)))
|
| 281 |
+
func = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 282 |
+
return builder.call(func, (mask, value))
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@lower(stubs.activemask)
|
| 286 |
+
def ptx_activemask(context, builder, sig, args):
|
| 287 |
+
activemask = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []),
|
| 288 |
+
"activemask.b32 $0;", '=r', side_effect=True)
|
| 289 |
+
return builder.call(activemask, [])
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@lower(stubs.lanemask_lt)
|
| 293 |
+
def ptx_lanemask_lt(context, builder, sig, args):
|
| 294 |
+
activemask = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []),
|
| 295 |
+
"mov.u32 $0, %lanemask_lt;", '=r',
|
| 296 |
+
side_effect=True)
|
| 297 |
+
return builder.call(activemask, [])
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@lower(stubs.popc, types.Any)
|
| 301 |
+
def ptx_popc(context, builder, sig, args):
|
| 302 |
+
return builder.ctpop(args[0])
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
@lower(stubs.fma, types.Any, types.Any, types.Any)
|
| 306 |
+
def ptx_fma(context, builder, sig, args):
|
| 307 |
+
return builder.fma(*args)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def float16_float_ty_constraint(bitwidth):
|
| 311 |
+
typemap = {32: ('f32', 'f'), 64: ('f64', 'd')}
|
| 312 |
+
|
| 313 |
+
try:
|
| 314 |
+
return typemap[bitwidth]
|
| 315 |
+
except KeyError:
|
| 316 |
+
msg = f"Conversion between float16 and float{bitwidth} unsupported"
|
| 317 |
+
raise errors.CudaLoweringError(msg)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
@lower_cast(types.float16, types.Float)
|
| 321 |
+
def float16_to_float_cast(context, builder, fromty, toty, val):
|
| 322 |
+
if fromty.bitwidth == toty.bitwidth:
|
| 323 |
+
return val
|
| 324 |
+
|
| 325 |
+
ty, constraint = float16_float_ty_constraint(toty.bitwidth)
|
| 326 |
+
|
| 327 |
+
fnty = ir.FunctionType(context.get_value_type(toty), [ir.IntType(16)])
|
| 328 |
+
asm = ir.InlineAsm(fnty, f"cvt.{ty}.f16 $0, $1;", f"={constraint},h")
|
| 329 |
+
return builder.call(asm, [val])
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
@lower_cast(types.Float, types.float16)
|
| 333 |
+
def float_to_float16_cast(context, builder, fromty, toty, val):
|
| 334 |
+
if fromty.bitwidth == toty.bitwidth:
|
| 335 |
+
return val
|
| 336 |
+
|
| 337 |
+
ty, constraint = float16_float_ty_constraint(fromty.bitwidth)
|
| 338 |
+
|
| 339 |
+
fnty = ir.FunctionType(ir.IntType(16), [context.get_value_type(fromty)])
|
| 340 |
+
asm = ir.InlineAsm(fnty, f"cvt.rn.f16.{ty} $0, $1;", f"=h,{constraint}")
|
| 341 |
+
return builder.call(asm, [val])
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def float16_int_constraint(bitwidth):
|
| 345 |
+
typemap = { 8: 'c', 16: 'h', 32: 'r', 64: 'l' }
|
| 346 |
+
|
| 347 |
+
try:
|
| 348 |
+
return typemap[bitwidth]
|
| 349 |
+
except KeyError:
|
| 350 |
+
msg = f"Conversion between float16 and int{bitwidth} unsupported"
|
| 351 |
+
raise errors.CudaLoweringError(msg)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
@lower_cast(types.float16, types.Integer)
|
| 355 |
+
def float16_to_integer_cast(context, builder, fromty, toty, val):
|
| 356 |
+
bitwidth = toty.bitwidth
|
| 357 |
+
constraint = float16_int_constraint(bitwidth)
|
| 358 |
+
signedness = 's' if toty.signed else 'u'
|
| 359 |
+
|
| 360 |
+
fnty = ir.FunctionType(context.get_value_type(toty), [ir.IntType(16)])
|
| 361 |
+
asm = ir.InlineAsm(fnty,
|
| 362 |
+
f"cvt.rni.{signedness}{bitwidth}.f16 $0, $1;",
|
| 363 |
+
f"={constraint},h")
|
| 364 |
+
return builder.call(asm, [val])
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
@lower_cast(types.Integer, types.float16)
|
| 368 |
+
@lower_cast(types.IntegerLiteral, types.float16)
|
| 369 |
+
def integer_to_float16_cast(context, builder, fromty, toty, val):
|
| 370 |
+
bitwidth = fromty.bitwidth
|
| 371 |
+
constraint = float16_int_constraint(bitwidth)
|
| 372 |
+
signedness = 's' if fromty.signed else 'u'
|
| 373 |
+
|
| 374 |
+
fnty = ir.FunctionType(ir.IntType(16),
|
| 375 |
+
[context.get_value_type(fromty)])
|
| 376 |
+
asm = ir.InlineAsm(fnty,
|
| 377 |
+
f"cvt.rn.f16.{signedness}{bitwidth} $0, $1;",
|
| 378 |
+
f"=h,{constraint}")
|
| 379 |
+
return builder.call(asm, [val])
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def lower_fp16_binary(fn, op):
|
| 383 |
+
@lower(fn, types.float16, types.float16)
|
| 384 |
+
def ptx_fp16_binary(context, builder, sig, args):
|
| 385 |
+
fnty = ir.FunctionType(ir.IntType(16),
|
| 386 |
+
[ir.IntType(16), ir.IntType(16)])
|
| 387 |
+
asm = ir.InlineAsm(fnty, f'{op}.f16 $0,$1,$2;', '=h,h,h')
|
| 388 |
+
return builder.call(asm, args)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
lower_fp16_binary(stubs.fp16.hadd, 'add')
|
| 392 |
+
lower_fp16_binary(operator.add, 'add')
|
| 393 |
+
lower_fp16_binary(operator.iadd, 'add')
|
| 394 |
+
lower_fp16_binary(stubs.fp16.hsub, 'sub')
|
| 395 |
+
lower_fp16_binary(operator.sub, 'sub')
|
| 396 |
+
lower_fp16_binary(operator.isub, 'sub')
|
| 397 |
+
lower_fp16_binary(stubs.fp16.hmul, 'mul')
|
| 398 |
+
lower_fp16_binary(operator.mul, 'mul')
|
| 399 |
+
lower_fp16_binary(operator.imul, 'mul')
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
@lower(stubs.fp16.hneg, types.float16)
|
| 403 |
+
def ptx_fp16_hneg(context, builder, sig, args):
|
| 404 |
+
fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16)])
|
| 405 |
+
asm = ir.InlineAsm(fnty, 'neg.f16 $0, $1;', '=h,h')
|
| 406 |
+
return builder.call(asm, args)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
@lower(operator.neg, types.float16)
|
| 410 |
+
def operator_hneg(context, builder, sig, args):
|
| 411 |
+
return ptx_fp16_hneg(context, builder, sig, args)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
@lower(stubs.fp16.habs, types.float16)
|
| 415 |
+
def ptx_fp16_habs(context, builder, sig, args):
|
| 416 |
+
fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16)])
|
| 417 |
+
asm = ir.InlineAsm(fnty, 'abs.f16 $0, $1;', '=h,h')
|
| 418 |
+
return builder.call(asm, args)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
@lower(abs, types.float16)
|
| 422 |
+
def operator_habs(context, builder, sig, args):
|
| 423 |
+
return ptx_fp16_habs(context, builder, sig, args)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
@lower(stubs.fp16.hfma, types.float16, types.float16, types.float16)
|
| 427 |
+
def ptx_hfma(context, builder, sig, args):
|
| 428 |
+
argtys = [ir.IntType(16), ir.IntType(16), ir.IntType(16)]
|
| 429 |
+
fnty = ir.FunctionType(ir.IntType(16), argtys)
|
| 430 |
+
asm = ir.InlineAsm(fnty, "fma.rn.f16 $0,$1,$2,$3;", "=h,h,h,h")
|
| 431 |
+
return builder.call(asm, args)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@lower(operator.truediv, types.float16, types.float16)
|
| 435 |
+
@lower(operator.itruediv, types.float16, types.float16)
|
| 436 |
+
def fp16_div_impl(context, builder, sig, args):
|
| 437 |
+
def fp16_div(x, y):
|
| 438 |
+
return cuda.fp16.hdiv(x, y)
|
| 439 |
+
|
| 440 |
+
return context.compile_internal(builder, fp16_div, sig, args)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
_fp16_cmp = """{{
|
| 444 |
+
.reg .pred __$$f16_cmp_tmp;
|
| 445 |
+
setp.{op}.f16 __$$f16_cmp_tmp, $1, $2;
|
| 446 |
+
selp.u16 $0, 1, 0, __$$f16_cmp_tmp;
|
| 447 |
+
}}"""
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def _gen_fp16_cmp(op):
|
| 451 |
+
def ptx_fp16_comparison(context, builder, sig, args):
|
| 452 |
+
fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16), ir.IntType(16)])
|
| 453 |
+
asm = ir.InlineAsm(fnty, _fp16_cmp.format(op=op), '=h,h,h')
|
| 454 |
+
result = builder.call(asm, args)
|
| 455 |
+
|
| 456 |
+
zero = context.get_constant(types.int16, 0)
|
| 457 |
+
int_result = builder.bitcast(result, ir.IntType(16))
|
| 458 |
+
return builder.icmp_unsigned("!=", int_result, zero)
|
| 459 |
+
return ptx_fp16_comparison
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
lower(stubs.fp16.heq, types.float16, types.float16)(_gen_fp16_cmp('eq'))
|
| 463 |
+
lower(operator.eq, types.float16, types.float16)(_gen_fp16_cmp('eq'))
|
| 464 |
+
lower(stubs.fp16.hne, types.float16, types.float16)(_gen_fp16_cmp('ne'))
|
| 465 |
+
lower(operator.ne, types.float16, types.float16)(_gen_fp16_cmp('ne'))
|
| 466 |
+
lower(stubs.fp16.hge, types.float16, types.float16)(_gen_fp16_cmp('ge'))
|
| 467 |
+
lower(operator.ge, types.float16, types.float16)(_gen_fp16_cmp('ge'))
|
| 468 |
+
lower(stubs.fp16.hgt, types.float16, types.float16)(_gen_fp16_cmp('gt'))
|
| 469 |
+
lower(operator.gt, types.float16, types.float16)(_gen_fp16_cmp('gt'))
|
| 470 |
+
lower(stubs.fp16.hle, types.float16, types.float16)(_gen_fp16_cmp('le'))
|
| 471 |
+
lower(operator.le, types.float16, types.float16)(_gen_fp16_cmp('le'))
|
| 472 |
+
lower(stubs.fp16.hlt, types.float16, types.float16)(_gen_fp16_cmp('lt'))
|
| 473 |
+
lower(operator.lt, types.float16, types.float16)(_gen_fp16_cmp('lt'))
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def lower_fp16_minmax(fn, fname, op):
|
| 477 |
+
@lower(fn, types.float16, types.float16)
|
| 478 |
+
def ptx_fp16_minmax(context, builder, sig, args):
|
| 479 |
+
choice = _gen_fp16_cmp(op)(context, builder, sig, args)
|
| 480 |
+
return builder.select(choice, args[0], args[1])
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
lower_fp16_minmax(stubs.fp16.hmax, 'max', 'gt')
|
| 484 |
+
lower_fp16_minmax(stubs.fp16.hmin, 'min', 'lt')
|
| 485 |
+
|
| 486 |
+
# See:
|
| 487 |
+
# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrt.html#__nv_cbrt
|
| 488 |
+
# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrtf.html#__nv_cbrtf
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
cbrt_funcs = {
|
| 492 |
+
types.float32: '__nv_cbrtf',
|
| 493 |
+
types.float64: '__nv_cbrt',
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@lower(stubs.cbrt, types.float32)
|
| 498 |
+
@lower(stubs.cbrt, types.float64)
|
| 499 |
+
def ptx_cbrt(context, builder, sig, args):
|
| 500 |
+
ty = sig.return_type
|
| 501 |
+
fname = cbrt_funcs[ty]
|
| 502 |
+
fty = context.get_value_type(ty)
|
| 503 |
+
lmod = builder.module
|
| 504 |
+
fnty = ir.FunctionType(fty, [fty])
|
| 505 |
+
fn = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 506 |
+
return builder.call(fn, args)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
@lower(stubs.brev, types.u4)
|
| 510 |
+
def ptx_brev_u4(context, builder, sig, args):
|
| 511 |
+
# FIXME the llvm.bitreverse.i32 intrinsic isn't supported by nvcc
|
| 512 |
+
# return builder.bitreverse(args[0])
|
| 513 |
+
|
| 514 |
+
fn = cgutils.get_or_insert_function(
|
| 515 |
+
builder.module,
|
| 516 |
+
ir.FunctionType(ir.IntType(32), (ir.IntType(32),)),
|
| 517 |
+
'__nv_brev')
|
| 518 |
+
return builder.call(fn, args)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
@lower(stubs.brev, types.u8)
|
| 522 |
+
def ptx_brev_u8(context, builder, sig, args):
|
| 523 |
+
# FIXME the llvm.bitreverse.i64 intrinsic isn't supported by nvcc
|
| 524 |
+
# return builder.bitreverse(args[0])
|
| 525 |
+
|
| 526 |
+
fn = cgutils.get_or_insert_function(
|
| 527 |
+
builder.module,
|
| 528 |
+
ir.FunctionType(ir.IntType(64), (ir.IntType(64),)),
|
| 529 |
+
'__nv_brevll')
|
| 530 |
+
return builder.call(fn, args)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
@lower(stubs.clz, types.Any)
|
| 534 |
+
def ptx_clz(context, builder, sig, args):
|
| 535 |
+
return builder.ctlz(
|
| 536 |
+
args[0],
|
| 537 |
+
context.get_constant(types.boolean, 0))
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@lower(stubs.ffs, types.i4)
|
| 541 |
+
@lower(stubs.ffs, types.u4)
|
| 542 |
+
def ptx_ffs_32(context, builder, sig, args):
|
| 543 |
+
fn = cgutils.get_or_insert_function(
|
| 544 |
+
builder.module,
|
| 545 |
+
ir.FunctionType(ir.IntType(32), (ir.IntType(32),)),
|
| 546 |
+
'__nv_ffs')
|
| 547 |
+
return builder.call(fn, args)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
@lower(stubs.ffs, types.i8)
|
| 551 |
+
@lower(stubs.ffs, types.u8)
|
| 552 |
+
def ptx_ffs_64(context, builder, sig, args):
|
| 553 |
+
fn = cgutils.get_or_insert_function(
|
| 554 |
+
builder.module,
|
| 555 |
+
ir.FunctionType(ir.IntType(32), (ir.IntType(64),)),
|
| 556 |
+
'__nv_ffsll')
|
| 557 |
+
return builder.call(fn, args)
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
@lower(stubs.selp, types.Any, types.Any, types.Any)
|
| 561 |
+
def ptx_selp(context, builder, sig, args):
|
| 562 |
+
test, a, b = args
|
| 563 |
+
return builder.select(test, a, b)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
@lower(max, types.f4, types.f4)
|
| 567 |
+
def ptx_max_f4(context, builder, sig, args):
|
| 568 |
+
fn = cgutils.get_or_insert_function(
|
| 569 |
+
builder.module,
|
| 570 |
+
ir.FunctionType(
|
| 571 |
+
ir.FloatType(),
|
| 572 |
+
(ir.FloatType(), ir.FloatType())),
|
| 573 |
+
'__nv_fmaxf')
|
| 574 |
+
return builder.call(fn, args)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@lower(max, types.f8, types.f4)
|
| 578 |
+
@lower(max, types.f4, types.f8)
|
| 579 |
+
@lower(max, types.f8, types.f8)
|
| 580 |
+
def ptx_max_f8(context, builder, sig, args):
|
| 581 |
+
fn = cgutils.get_or_insert_function(
|
| 582 |
+
builder.module,
|
| 583 |
+
ir.FunctionType(
|
| 584 |
+
ir.DoubleType(),
|
| 585 |
+
(ir.DoubleType(), ir.DoubleType())),
|
| 586 |
+
'__nv_fmax')
|
| 587 |
+
|
| 588 |
+
return builder.call(fn, [
|
| 589 |
+
context.cast(builder, args[0], sig.args[0], types.double),
|
| 590 |
+
context.cast(builder, args[1], sig.args[1], types.double),
|
| 591 |
+
])
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
@lower(min, types.f4, types.f4)
|
| 595 |
+
def ptx_min_f4(context, builder, sig, args):
|
| 596 |
+
fn = cgutils.get_or_insert_function(
|
| 597 |
+
builder.module,
|
| 598 |
+
ir.FunctionType(
|
| 599 |
+
ir.FloatType(),
|
| 600 |
+
(ir.FloatType(), ir.FloatType())),
|
| 601 |
+
'__nv_fminf')
|
| 602 |
+
return builder.call(fn, args)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
@lower(min, types.f8, types.f4)
|
| 606 |
+
@lower(min, types.f4, types.f8)
|
| 607 |
+
@lower(min, types.f8, types.f8)
|
| 608 |
+
def ptx_min_f8(context, builder, sig, args):
|
| 609 |
+
fn = cgutils.get_or_insert_function(
|
| 610 |
+
builder.module,
|
| 611 |
+
ir.FunctionType(
|
| 612 |
+
ir.DoubleType(),
|
| 613 |
+
(ir.DoubleType(), ir.DoubleType())),
|
| 614 |
+
'__nv_fmin')
|
| 615 |
+
|
| 616 |
+
return builder.call(fn, [
|
| 617 |
+
context.cast(builder, args[0], sig.args[0], types.double),
|
| 618 |
+
context.cast(builder, args[1], sig.args[1], types.double),
|
| 619 |
+
])
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
@lower(round, types.f4)
|
| 623 |
+
@lower(round, types.f8)
|
| 624 |
+
def ptx_round(context, builder, sig, args):
|
| 625 |
+
fn = cgutils.get_or_insert_function(
|
| 626 |
+
builder.module,
|
| 627 |
+
ir.FunctionType(
|
| 628 |
+
ir.IntType(64),
|
| 629 |
+
(ir.DoubleType(),)),
|
| 630 |
+
'__nv_llrint')
|
| 631 |
+
return builder.call(fn, [
|
| 632 |
+
context.cast(builder, args[0], sig.args[0], types.double),
|
| 633 |
+
])
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
# This rounding implementation follows the algorithm used in the "fallback
|
| 637 |
+
# version" of double_round in CPython.
|
| 638 |
+
# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/floatobject.c#L964-L1007
|
| 639 |
+
|
| 640 |
+
@lower(round, types.f4, types.Integer)
|
| 641 |
+
@lower(round, types.f8, types.Integer)
|
| 642 |
+
def round_to_impl(context, builder, sig, args):
|
| 643 |
+
def round_ndigits(x, ndigits):
|
| 644 |
+
if math.isinf(x) or math.isnan(x):
|
| 645 |
+
return x
|
| 646 |
+
|
| 647 |
+
if ndigits >= 0:
|
| 648 |
+
if ndigits > 22:
|
| 649 |
+
# pow1 and pow2 are each safe from overflow, but
|
| 650 |
+
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
|
| 651 |
+
pow1 = 10.0 ** (ndigits - 22)
|
| 652 |
+
pow2 = 1e22
|
| 653 |
+
else:
|
| 654 |
+
pow1 = 10.0 ** ndigits
|
| 655 |
+
pow2 = 1.0
|
| 656 |
+
y = (x * pow1) * pow2
|
| 657 |
+
if math.isinf(y):
|
| 658 |
+
return x
|
| 659 |
+
|
| 660 |
+
else:
|
| 661 |
+
pow1 = 10.0 ** (-ndigits)
|
| 662 |
+
y = x / pow1
|
| 663 |
+
|
| 664 |
+
z = round(y)
|
| 665 |
+
if (math.fabs(y - z) == 0.5):
|
| 666 |
+
# halfway between two integers; use round-half-even
|
| 667 |
+
z = 2.0 * round(y / 2.0)
|
| 668 |
+
|
| 669 |
+
if ndigits >= 0:
|
| 670 |
+
z = (z / pow2) / pow1
|
| 671 |
+
else:
|
| 672 |
+
z *= pow1
|
| 673 |
+
|
| 674 |
+
return z
|
| 675 |
+
|
| 676 |
+
return context.compile_internal(builder, round_ndigits, sig, args, )
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
def gen_deg_rad(const):
|
| 680 |
+
def impl(context, builder, sig, args):
|
| 681 |
+
argty, = sig.args
|
| 682 |
+
factor = context.get_constant(argty, const)
|
| 683 |
+
return builder.fmul(factor, args[0])
|
| 684 |
+
return impl
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
_deg2rad = math.pi / 180.
|
| 688 |
+
_rad2deg = 180. / math.pi
|
| 689 |
+
lower(math.radians, types.f4)(gen_deg_rad(_deg2rad))
|
| 690 |
+
lower(math.radians, types.f8)(gen_deg_rad(_deg2rad))
|
| 691 |
+
lower(math.degrees, types.f4)(gen_deg_rad(_rad2deg))
|
| 692 |
+
lower(math.degrees, types.f8)(gen_deg_rad(_rad2deg))
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
def _normalize_indices(context, builder, indty, inds, aryty, valty):
|
| 696 |
+
"""
|
| 697 |
+
Convert integer indices into tuple of intp
|
| 698 |
+
"""
|
| 699 |
+
if indty in types.integer_domain:
|
| 700 |
+
indty = types.UniTuple(dtype=indty, count=1)
|
| 701 |
+
indices = [inds]
|
| 702 |
+
else:
|
| 703 |
+
indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
|
| 704 |
+
indices = [context.cast(builder, i, t, types.intp)
|
| 705 |
+
for t, i in zip(indty, indices)]
|
| 706 |
+
|
| 707 |
+
dtype = aryty.dtype
|
| 708 |
+
if dtype != valty:
|
| 709 |
+
raise TypeError("expect %s but got %s" % (dtype, valty))
|
| 710 |
+
|
| 711 |
+
if aryty.ndim != len(indty):
|
| 712 |
+
raise TypeError("indexing %d-D array with %d-D index" %
|
| 713 |
+
(aryty.ndim, len(indty)))
|
| 714 |
+
|
| 715 |
+
return indty, indices
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
def _atomic_dispatcher(dispatch_fn):
|
| 719 |
+
def imp(context, builder, sig, args):
|
| 720 |
+
# The common argument handling code
|
| 721 |
+
aryty, indty, valty = sig.args
|
| 722 |
+
ary, inds, val = args
|
| 723 |
+
dtype = aryty.dtype
|
| 724 |
+
|
| 725 |
+
indty, indices = _normalize_indices(context, builder, indty, inds,
|
| 726 |
+
aryty, valty)
|
| 727 |
+
|
| 728 |
+
lary = context.make_array(aryty)(context, builder, ary)
|
| 729 |
+
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices,
|
| 730 |
+
wraparound=True)
|
| 731 |
+
# dispatcher to implementation base on dtype
|
| 732 |
+
return dispatch_fn(context, builder, dtype, ptr, val)
|
| 733 |
+
return imp
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
@lower(stubs.atomic.add, types.Array, types.intp, types.Any)
|
| 737 |
+
@lower(stubs.atomic.add, types.Array, types.UniTuple, types.Any)
|
| 738 |
+
@lower(stubs.atomic.add, types.Array, types.Tuple, types.Any)
|
| 739 |
+
@_atomic_dispatcher
|
| 740 |
+
def ptx_atomic_add_tuple(context, builder, dtype, ptr, val):
|
| 741 |
+
if dtype == types.float32:
|
| 742 |
+
lmod = builder.module
|
| 743 |
+
return builder.call(nvvmutils.declare_atomic_add_float32(lmod),
|
| 744 |
+
(ptr, val))
|
| 745 |
+
elif dtype == types.float64:
|
| 746 |
+
lmod = builder.module
|
| 747 |
+
return builder.call(nvvmutils.declare_atomic_add_float64(lmod),
|
| 748 |
+
(ptr, val))
|
| 749 |
+
else:
|
| 750 |
+
return builder.atomic_rmw('add', ptr, val, 'monotonic')
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
@lower(stubs.atomic.sub, types.Array, types.intp, types.Any)
|
| 754 |
+
@lower(stubs.atomic.sub, types.Array, types.UniTuple, types.Any)
|
| 755 |
+
@lower(stubs.atomic.sub, types.Array, types.Tuple, types.Any)
|
| 756 |
+
@_atomic_dispatcher
|
| 757 |
+
def ptx_atomic_sub(context, builder, dtype, ptr, val):
|
| 758 |
+
if dtype == types.float32:
|
| 759 |
+
lmod = builder.module
|
| 760 |
+
return builder.call(nvvmutils.declare_atomic_sub_float32(lmod),
|
| 761 |
+
(ptr, val))
|
| 762 |
+
elif dtype == types.float64:
|
| 763 |
+
lmod = builder.module
|
| 764 |
+
return builder.call(nvvmutils.declare_atomic_sub_float64(lmod),
|
| 765 |
+
(ptr, val))
|
| 766 |
+
else:
|
| 767 |
+
return builder.atomic_rmw('sub', ptr, val, 'monotonic')
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
@lower(stubs.atomic.inc, types.Array, types.intp, types.Any)
|
| 771 |
+
@lower(stubs.atomic.inc, types.Array, types.UniTuple, types.Any)
|
| 772 |
+
@lower(stubs.atomic.inc, types.Array, types.Tuple, types.Any)
|
| 773 |
+
@_atomic_dispatcher
|
| 774 |
+
def ptx_atomic_inc(context, builder, dtype, ptr, val):
|
| 775 |
+
if dtype in cuda.cudadecl.unsigned_int_numba_types:
|
| 776 |
+
bw = dtype.bitwidth
|
| 777 |
+
lmod = builder.module
|
| 778 |
+
fn = getattr(nvvmutils, f'declare_atomic_inc_int{bw}')
|
| 779 |
+
return builder.call(fn(lmod), (ptr, val))
|
| 780 |
+
else:
|
| 781 |
+
raise TypeError(f'Unimplemented atomic inc with {dtype} array')
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@lower(stubs.atomic.dec, types.Array, types.intp, types.Any)
|
| 785 |
+
@lower(stubs.atomic.dec, types.Array, types.UniTuple, types.Any)
|
| 786 |
+
@lower(stubs.atomic.dec, types.Array, types.Tuple, types.Any)
|
| 787 |
+
@_atomic_dispatcher
|
| 788 |
+
def ptx_atomic_dec(context, builder, dtype, ptr, val):
|
| 789 |
+
if dtype in cuda.cudadecl.unsigned_int_numba_types:
|
| 790 |
+
bw = dtype.bitwidth
|
| 791 |
+
lmod = builder.module
|
| 792 |
+
fn = getattr(nvvmutils, f'declare_atomic_dec_int{bw}')
|
| 793 |
+
return builder.call(fn(lmod), (ptr, val))
|
| 794 |
+
else:
|
| 795 |
+
raise TypeError(f'Unimplemented atomic dec with {dtype} array')
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def ptx_atomic_bitwise(stub, op):
|
| 799 |
+
@_atomic_dispatcher
|
| 800 |
+
def impl_ptx_atomic(context, builder, dtype, ptr, val):
|
| 801 |
+
if dtype in (cuda.cudadecl.integer_numba_types):
|
| 802 |
+
return builder.atomic_rmw(op, ptr, val, 'monotonic')
|
| 803 |
+
else:
|
| 804 |
+
raise TypeError(f'Unimplemented atomic {op} with {dtype} array')
|
| 805 |
+
|
| 806 |
+
for ty in (types.intp, types.UniTuple, types.Tuple):
|
| 807 |
+
lower(stub, types.Array, ty, types.Any)(impl_ptx_atomic)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
ptx_atomic_bitwise(stubs.atomic.and_, 'and')
|
| 811 |
+
ptx_atomic_bitwise(stubs.atomic.or_, 'or')
|
| 812 |
+
ptx_atomic_bitwise(stubs.atomic.xor, 'xor')
|
| 813 |
+
|
| 814 |
+
|
| 815 |
+
@lower(stubs.atomic.exch, types.Array, types.intp, types.Any)
|
| 816 |
+
@lower(stubs.atomic.exch, types.Array, types.UniTuple, types.Any)
|
| 817 |
+
@lower(stubs.atomic.exch, types.Array, types.Tuple, types.Any)
|
| 818 |
+
@_atomic_dispatcher
|
| 819 |
+
def ptx_atomic_exch(context, builder, dtype, ptr, val):
|
| 820 |
+
if dtype in (cuda.cudadecl.integer_numba_types):
|
| 821 |
+
return builder.atomic_rmw('xchg', ptr, val, 'monotonic')
|
| 822 |
+
else:
|
| 823 |
+
raise TypeError(f'Unimplemented atomic exch with {dtype} array')
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
@lower(stubs.atomic.max, types.Array, types.intp, types.Any)
|
| 827 |
+
@lower(stubs.atomic.max, types.Array, types.Tuple, types.Any)
|
| 828 |
+
@lower(stubs.atomic.max, types.Array, types.UniTuple, types.Any)
|
| 829 |
+
@_atomic_dispatcher
|
| 830 |
+
def ptx_atomic_max(context, builder, dtype, ptr, val):
|
| 831 |
+
lmod = builder.module
|
| 832 |
+
if dtype == types.float64:
|
| 833 |
+
return builder.call(nvvmutils.declare_atomic_max_float64(lmod),
|
| 834 |
+
(ptr, val))
|
| 835 |
+
elif dtype == types.float32:
|
| 836 |
+
return builder.call(nvvmutils.declare_atomic_max_float32(lmod),
|
| 837 |
+
(ptr, val))
|
| 838 |
+
elif dtype in (types.int32, types.int64):
|
| 839 |
+
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
|
| 840 |
+
elif dtype in (types.uint32, types.uint64):
|
| 841 |
+
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
|
| 842 |
+
else:
|
| 843 |
+
raise TypeError('Unimplemented atomic max with %s array' % dtype)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
@lower(stubs.atomic.min, types.Array, types.intp, types.Any)
|
| 847 |
+
@lower(stubs.atomic.min, types.Array, types.Tuple, types.Any)
|
| 848 |
+
@lower(stubs.atomic.min, types.Array, types.UniTuple, types.Any)
|
| 849 |
+
@_atomic_dispatcher
|
| 850 |
+
def ptx_atomic_min(context, builder, dtype, ptr, val):
|
| 851 |
+
lmod = builder.module
|
| 852 |
+
if dtype == types.float64:
|
| 853 |
+
return builder.call(nvvmutils.declare_atomic_min_float64(lmod),
|
| 854 |
+
(ptr, val))
|
| 855 |
+
elif dtype == types.float32:
|
| 856 |
+
return builder.call(nvvmutils.declare_atomic_min_float32(lmod),
|
| 857 |
+
(ptr, val))
|
| 858 |
+
elif dtype in (types.int32, types.int64):
|
| 859 |
+
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
|
| 860 |
+
elif dtype in (types.uint32, types.uint64):
|
| 861 |
+
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
|
| 862 |
+
else:
|
| 863 |
+
raise TypeError('Unimplemented atomic min with %s array' % dtype)
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
@lower(stubs.atomic.nanmax, types.Array, types.intp, types.Any)
|
| 867 |
+
@lower(stubs.atomic.nanmax, types.Array, types.Tuple, types.Any)
|
| 868 |
+
@lower(stubs.atomic.nanmax, types.Array, types.UniTuple, types.Any)
|
| 869 |
+
@_atomic_dispatcher
|
| 870 |
+
def ptx_atomic_nanmax(context, builder, dtype, ptr, val):
|
| 871 |
+
lmod = builder.module
|
| 872 |
+
if dtype == types.float64:
|
| 873 |
+
return builder.call(nvvmutils.declare_atomic_nanmax_float64(lmod),
|
| 874 |
+
(ptr, val))
|
| 875 |
+
elif dtype == types.float32:
|
| 876 |
+
return builder.call(nvvmutils.declare_atomic_nanmax_float32(lmod),
|
| 877 |
+
(ptr, val))
|
| 878 |
+
elif dtype in (types.int32, types.int64):
|
| 879 |
+
return builder.atomic_rmw('max', ptr, val, ordering='monotonic')
|
| 880 |
+
elif dtype in (types.uint32, types.uint64):
|
| 881 |
+
return builder.atomic_rmw('umax', ptr, val, ordering='monotonic')
|
| 882 |
+
else:
|
| 883 |
+
raise TypeError('Unimplemented atomic max with %s array' % dtype)
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
@lower(stubs.atomic.nanmin, types.Array, types.intp, types.Any)
|
| 887 |
+
@lower(stubs.atomic.nanmin, types.Array, types.Tuple, types.Any)
|
| 888 |
+
@lower(stubs.atomic.nanmin, types.Array, types.UniTuple, types.Any)
|
| 889 |
+
@_atomic_dispatcher
|
| 890 |
+
def ptx_atomic_nanmin(context, builder, dtype, ptr, val):
|
| 891 |
+
lmod = builder.module
|
| 892 |
+
if dtype == types.float64:
|
| 893 |
+
return builder.call(nvvmutils.declare_atomic_nanmin_float64(lmod),
|
| 894 |
+
(ptr, val))
|
| 895 |
+
elif dtype == types.float32:
|
| 896 |
+
return builder.call(nvvmutils.declare_atomic_nanmin_float32(lmod),
|
| 897 |
+
(ptr, val))
|
| 898 |
+
elif dtype in (types.int32, types.int64):
|
| 899 |
+
return builder.atomic_rmw('min', ptr, val, ordering='monotonic')
|
| 900 |
+
elif dtype in (types.uint32, types.uint64):
|
| 901 |
+
return builder.atomic_rmw('umin', ptr, val, ordering='monotonic')
|
| 902 |
+
else:
|
| 903 |
+
raise TypeError('Unimplemented atomic min with %s array' % dtype)
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
@lower(stubs.atomic.compare_and_swap, types.Array, types.Any, types.Any)
|
| 907 |
+
def ptx_atomic_compare_and_swap(context, builder, sig, args):
|
| 908 |
+
sig = sig.return_type(sig.args[0], types.intp, sig.args[1], sig.args[2])
|
| 909 |
+
args = (args[0], context.get_constant(types.intp, 0), args[1], args[2])
|
| 910 |
+
return ptx_atomic_cas(context, builder, sig, args)
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
@lower(stubs.atomic.cas, types.Array, types.intp, types.Any, types.Any)
|
| 914 |
+
@lower(stubs.atomic.cas, types.Array, types.Tuple, types.Any, types.Any)
|
| 915 |
+
@lower(stubs.atomic.cas, types.Array, types.UniTuple, types.Any, types.Any)
|
| 916 |
+
def ptx_atomic_cas(context, builder, sig, args):
|
| 917 |
+
aryty, indty, oldty, valty = sig.args
|
| 918 |
+
ary, inds, old, val = args
|
| 919 |
+
|
| 920 |
+
indty, indices = _normalize_indices(context, builder, indty, inds, aryty,
|
| 921 |
+
valty)
|
| 922 |
+
|
| 923 |
+
lary = context.make_array(aryty)(context, builder, ary)
|
| 924 |
+
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices,
|
| 925 |
+
wraparound=True)
|
| 926 |
+
|
| 927 |
+
if aryty.dtype in (cuda.cudadecl.integer_numba_types):
|
| 928 |
+
lmod = builder.module
|
| 929 |
+
bitwidth = aryty.dtype.bitwidth
|
| 930 |
+
return nvvmutils.atomic_cmpxchg(builder, lmod, bitwidth, ptr, old, val)
|
| 931 |
+
else:
|
| 932 |
+
raise TypeError('Unimplemented atomic cas with %s array' % aryty.dtype)
|
| 933 |
+
|
| 934 |
+
|
| 935 |
+
# -----------------------------------------------------------------------------
|
| 936 |
+
|
| 937 |
+
@lower(stubs.nanosleep, types.uint32)
|
| 938 |
+
def ptx_nanosleep(context, builder, sig, args):
|
| 939 |
+
nanosleep = ir.InlineAsm(ir.FunctionType(ir.VoidType(), [ir.IntType(32)]),
|
| 940 |
+
"nanosleep.u32 $0;", 'r', side_effect=True)
|
| 941 |
+
ns = args[0]
|
| 942 |
+
builder.call(nanosleep, [ns])
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
# -----------------------------------------------------------------------------
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace,
|
| 949 |
+
can_dynsized=False):
|
| 950 |
+
elemcount = reduce(operator.mul, shape, 1)
|
| 951 |
+
|
| 952 |
+
# Check for valid shape for this type of allocation.
|
| 953 |
+
# Only 1d arrays can be dynamic.
|
| 954 |
+
dynamic_smem = elemcount <= 0 and can_dynsized and len(shape) == 1
|
| 955 |
+
if elemcount <= 0 and not dynamic_smem:
|
| 956 |
+
raise ValueError("array length <= 0")
|
| 957 |
+
|
| 958 |
+
# Check that we support the requested dtype
|
| 959 |
+
data_model = context.data_model_manager[dtype]
|
| 960 |
+
other_supported_type = (
|
| 961 |
+
isinstance(dtype, (types.Record, types.Boolean))
|
| 962 |
+
or isinstance(data_model, models.StructModel)
|
| 963 |
+
or dtype == types.float16
|
| 964 |
+
)
|
| 965 |
+
if dtype not in types.number_domain and not other_supported_type:
|
| 966 |
+
raise TypeError("unsupported type: %s" % dtype)
|
| 967 |
+
|
| 968 |
+
lldtype = context.get_data_type(dtype)
|
| 969 |
+
laryty = ir.ArrayType(lldtype, elemcount)
|
| 970 |
+
|
| 971 |
+
if addrspace == nvvm.ADDRSPACE_LOCAL:
|
| 972 |
+
# Special case local address space allocation to use alloca
|
| 973 |
+
# NVVM is smart enough to only use local memory if no register is
|
| 974 |
+
# available
|
| 975 |
+
dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name)
|
| 976 |
+
else:
|
| 977 |
+
lmod = builder.module
|
| 978 |
+
|
| 979 |
+
# Create global variable in the requested address space
|
| 980 |
+
gvmem = cgutils.add_global_variable(lmod, laryty, symbol_name,
|
| 981 |
+
addrspace)
|
| 982 |
+
# Specify alignment to avoid misalignment bug
|
| 983 |
+
align = context.get_abi_sizeof(lldtype)
|
| 984 |
+
# Alignment is required to be a power of 2 for shared memory. If it is
|
| 985 |
+
# not a power of 2 (e.g. for a Record array) then round up accordingly.
|
| 986 |
+
gvmem.align = 1 << (align - 1 ).bit_length()
|
| 987 |
+
|
| 988 |
+
if dynamic_smem:
|
| 989 |
+
gvmem.linkage = 'external'
|
| 990 |
+
else:
|
| 991 |
+
## Comment out the following line to workaround a NVVM bug
|
| 992 |
+
## which generates a invalid symbol name when the linkage
|
| 993 |
+
## is internal and in some situation.
|
| 994 |
+
## See _get_unique_smem_id()
|
| 995 |
+
# gvmem.linkage = lc.LINKAGE_INTERNAL
|
| 996 |
+
|
| 997 |
+
gvmem.initializer = ir.Constant(laryty, ir.Undefined)
|
| 998 |
+
|
| 999 |
+
# Convert to generic address-space
|
| 1000 |
+
dataptr = builder.addrspacecast(gvmem, ir.PointerType(ir.IntType(8)),
|
| 1001 |
+
'generic')
|
| 1002 |
+
|
| 1003 |
+
targetdata = ll.create_target_data(nvvm.NVVM().data_layout)
|
| 1004 |
+
lldtype = context.get_data_type(dtype)
|
| 1005 |
+
itemsize = lldtype.get_abi_size(targetdata)
|
| 1006 |
+
|
| 1007 |
+
# Compute strides
|
| 1008 |
+
laststride = itemsize
|
| 1009 |
+
rstrides = []
|
| 1010 |
+
for i, lastsize in enumerate(reversed(shape)):
|
| 1011 |
+
rstrides.append(laststride)
|
| 1012 |
+
laststride *= lastsize
|
| 1013 |
+
strides = [s for s in reversed(rstrides)]
|
| 1014 |
+
kstrides = [context.get_constant(types.intp, s) for s in strides]
|
| 1015 |
+
|
| 1016 |
+
# Compute shape
|
| 1017 |
+
if dynamic_smem:
|
| 1018 |
+
# Compute the shape based on the dynamic shared memory configuration.
|
| 1019 |
+
# Unfortunately NVVM does not provide an intrinsic for the
|
| 1020 |
+
# %dynamic_smem_size register, so we must read it using inline
|
| 1021 |
+
# assembly.
|
| 1022 |
+
get_dynshared_size = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []),
|
| 1023 |
+
"mov.u32 $0, %dynamic_smem_size;",
|
| 1024 |
+
'=r', side_effect=True)
|
| 1025 |
+
dynsmem_size = builder.zext(builder.call(get_dynshared_size, []),
|
| 1026 |
+
ir.IntType(64))
|
| 1027 |
+
# Only 1-D dynamic shared memory is supported so the following is a
|
| 1028 |
+
# sufficient construction of the shape
|
| 1029 |
+
kitemsize = context.get_constant(types.intp, itemsize)
|
| 1030 |
+
kshape = [builder.udiv(dynsmem_size, kitemsize)]
|
| 1031 |
+
else:
|
| 1032 |
+
kshape = [context.get_constant(types.intp, s) for s in shape]
|
| 1033 |
+
|
| 1034 |
+
# Create array object
|
| 1035 |
+
ndim = len(shape)
|
| 1036 |
+
aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')
|
| 1037 |
+
ary = context.make_array(aryty)(context, builder)
|
| 1038 |
+
|
| 1039 |
+
context.populate_array(ary,
|
| 1040 |
+
data=builder.bitcast(dataptr, ary.data.type),
|
| 1041 |
+
shape=kshape,
|
| 1042 |
+
strides=kstrides,
|
| 1043 |
+
itemsize=context.get_constant(types.intp, itemsize),
|
| 1044 |
+
meminfo=None)
|
| 1045 |
+
return ary._getvalue()
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
@lower_constant(CUDADispatcher)
|
| 1049 |
+
def cuda_dispatcher_const(context, builder, ty, pyval):
|
| 1050 |
+
return context.get_dummy_value()
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
# NumPy
|
| 1054 |
+
|
| 1055 |
+
register_ufuncs(ufunc_db.get_ufuncs(), lower)
|
lib/python3.10/site-packages/numba/cuda/cudamath.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from numba.core import types
|
| 3 |
+
from numba.core.typing.templates import ConcreteTemplate, signature, Registry
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
registry = Registry()
|
| 7 |
+
infer_global = registry.register_global
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@infer_global(math.acos)
|
| 11 |
+
@infer_global(math.acosh)
|
| 12 |
+
@infer_global(math.asin)
|
| 13 |
+
@infer_global(math.asinh)
|
| 14 |
+
@infer_global(math.atan)
|
| 15 |
+
@infer_global(math.atanh)
|
| 16 |
+
@infer_global(math.cosh)
|
| 17 |
+
@infer_global(math.degrees)
|
| 18 |
+
@infer_global(math.erf)
|
| 19 |
+
@infer_global(math.erfc)
|
| 20 |
+
@infer_global(math.expm1)
|
| 21 |
+
@infer_global(math.gamma)
|
| 22 |
+
@infer_global(math.lgamma)
|
| 23 |
+
@infer_global(math.log1p)
|
| 24 |
+
@infer_global(math.radians)
|
| 25 |
+
@infer_global(math.sinh)
|
| 26 |
+
@infer_global(math.tanh)
|
| 27 |
+
@infer_global(math.tan)
|
| 28 |
+
class Math_unary(ConcreteTemplate):
|
| 29 |
+
cases = [
|
| 30 |
+
signature(types.float64, types.int64),
|
| 31 |
+
signature(types.float64, types.uint64),
|
| 32 |
+
signature(types.float32, types.float32),
|
| 33 |
+
signature(types.float64, types.float64),
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@infer_global(math.sin)
|
| 38 |
+
@infer_global(math.cos)
|
| 39 |
+
@infer_global(math.ceil)
|
| 40 |
+
@infer_global(math.floor)
|
| 41 |
+
@infer_global(math.sqrt)
|
| 42 |
+
@infer_global(math.log)
|
| 43 |
+
@infer_global(math.log2)
|
| 44 |
+
@infer_global(math.log10)
|
| 45 |
+
@infer_global(math.exp)
|
| 46 |
+
@infer_global(math.fabs)
|
| 47 |
+
@infer_global(math.trunc)
|
| 48 |
+
class Math_unary_with_fp16(ConcreteTemplate):
|
| 49 |
+
cases = [
|
| 50 |
+
signature(types.float64, types.int64),
|
| 51 |
+
signature(types.float64, types.uint64),
|
| 52 |
+
signature(types.float32, types.float32),
|
| 53 |
+
signature(types.float64, types.float64),
|
| 54 |
+
signature(types.float16, types.float16),
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@infer_global(math.atan2)
|
| 59 |
+
class Math_atan2(ConcreteTemplate):
|
| 60 |
+
key = math.atan2
|
| 61 |
+
cases = [
|
| 62 |
+
signature(types.float64, types.int64, types.int64),
|
| 63 |
+
signature(types.float64, types.uint64, types.uint64),
|
| 64 |
+
signature(types.float32, types.float32, types.float32),
|
| 65 |
+
signature(types.float64, types.float64, types.float64),
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@infer_global(math.hypot)
|
| 70 |
+
class Math_hypot(ConcreteTemplate):
|
| 71 |
+
key = math.hypot
|
| 72 |
+
cases = [
|
| 73 |
+
signature(types.float64, types.int64, types.int64),
|
| 74 |
+
signature(types.float64, types.uint64, types.uint64),
|
| 75 |
+
signature(types.float32, types.float32, types.float32),
|
| 76 |
+
signature(types.float64, types.float64, types.float64),
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@infer_global(math.copysign)
|
| 81 |
+
@infer_global(math.fmod)
|
| 82 |
+
class Math_binary(ConcreteTemplate):
|
| 83 |
+
cases = [
|
| 84 |
+
signature(types.float32, types.float32, types.float32),
|
| 85 |
+
signature(types.float64, types.float64, types.float64),
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@infer_global(math.remainder)
|
| 90 |
+
class Math_remainder(ConcreteTemplate):
|
| 91 |
+
cases = [
|
| 92 |
+
signature(types.float32, types.float32, types.float32),
|
| 93 |
+
signature(types.float64, types.float64, types.float64),
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@infer_global(math.pow)
|
| 98 |
+
class Math_pow(ConcreteTemplate):
|
| 99 |
+
cases = [
|
| 100 |
+
signature(types.float32, types.float32, types.float32),
|
| 101 |
+
signature(types.float64, types.float64, types.float64),
|
| 102 |
+
signature(types.float32, types.float32, types.int32),
|
| 103 |
+
signature(types.float64, types.float64, types.int32),
|
| 104 |
+
]
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@infer_global(math.frexp)
|
| 108 |
+
class Math_frexp(ConcreteTemplate):
|
| 109 |
+
cases = [
|
| 110 |
+
signature(types.Tuple([types.float32, types.int32]), types.float32),
|
| 111 |
+
signature(types.Tuple([types.float64, types.int32]), types.float64),
|
| 112 |
+
]
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@infer_global(math.ldexp)
|
| 116 |
+
class Math_ldexp(ConcreteTemplate):
|
| 117 |
+
cases = [
|
| 118 |
+
signature(types.float32, types.float32, types.int32),
|
| 119 |
+
signature(types.float64, types.float64, types.int32),
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@infer_global(math.isinf)
|
| 124 |
+
@infer_global(math.isnan)
|
| 125 |
+
@infer_global(math.isfinite)
|
| 126 |
+
class Math_isnan(ConcreteTemplate):
|
| 127 |
+
cases = [
|
| 128 |
+
signature(types.boolean, types.int64),
|
| 129 |
+
signature(types.boolean, types.uint64),
|
| 130 |
+
signature(types.boolean, types.float32),
|
| 131 |
+
signature(types.boolean, types.float64),
|
| 132 |
+
]
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@infer_global(math.modf)
|
| 136 |
+
class Math_modf(ConcreteTemplate):
|
| 137 |
+
cases = [
|
| 138 |
+
signature(types.UniTuple(types.float64, 2), types.float64),
|
| 139 |
+
signature(types.UniTuple(types.float32, 2), types.float32)
|
| 140 |
+
]
|
lib/python3.10/site-packages/numba/cuda/decorators.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from warnings import warn
|
| 2 |
+
from numba.core import types, config, sigutils
|
| 3 |
+
from numba.core.errors import DeprecationError, NumbaInvalidConfigWarning
|
| 4 |
+
from numba.cuda.compiler import declare_device_function
|
| 5 |
+
from numba.cuda.dispatcher import CUDADispatcher
|
| 6 |
+
from numba.cuda.simulator.kernel import FakeCUDAKernel
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_msg_deprecated_signature_arg = ("Deprecated keyword argument `{0}`. "
|
| 10 |
+
"Signatures should be passed as the first "
|
| 11 |
+
"positional argument.")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def jit(func_or_sig=None, device=False, inline=False, link=[], debug=None,
|
| 15 |
+
opt=True, lineinfo=False, cache=False, **kws):
|
| 16 |
+
"""
|
| 17 |
+
JIT compile a Python function for CUDA GPUs.
|
| 18 |
+
|
| 19 |
+
:param func_or_sig: A function to JIT compile, or *signatures* of a
|
| 20 |
+
function to compile. If a function is supplied, then a
|
| 21 |
+
:class:`Dispatcher <numba.cuda.dispatcher.CUDADispatcher>` is returned.
|
| 22 |
+
Otherwise, ``func_or_sig`` may be a signature or a list of signatures,
|
| 23 |
+
and a function is returned. The returned function accepts another
|
| 24 |
+
function, which it will compile and then return a :class:`Dispatcher
|
| 25 |
+
<numba.cuda.dispatcher.CUDADispatcher>`. See :ref:`jit-decorator` for
|
| 26 |
+
more information about passing signatures.
|
| 27 |
+
|
| 28 |
+
.. note:: A kernel cannot have any return value.
|
| 29 |
+
:param device: Indicates whether this is a device function.
|
| 30 |
+
:type device: bool
|
| 31 |
+
:param link: A list of files containing PTX or CUDA C/C++ source to link
|
| 32 |
+
with the function
|
| 33 |
+
:type link: list
|
| 34 |
+
:param debug: If True, check for exceptions thrown when executing the
|
| 35 |
+
kernel. Since this degrades performance, this should only be used for
|
| 36 |
+
debugging purposes. If set to True, then ``opt`` should be set to False.
|
| 37 |
+
Defaults to False. (The default value can be overridden by setting
|
| 38 |
+
environment variable ``NUMBA_CUDA_DEBUGINFO=1``.)
|
| 39 |
+
:param fastmath: When True, enables fastmath optimizations as outlined in
|
| 40 |
+
the :ref:`CUDA Fast Math documentation <cuda-fast-math>`.
|
| 41 |
+
:param max_registers: Request that the kernel is limited to using at most
|
| 42 |
+
this number of registers per thread. The limit may not be respected if
|
| 43 |
+
the ABI requires a greater number of registers than that requested.
|
| 44 |
+
Useful for increasing occupancy.
|
| 45 |
+
:param opt: Whether to compile from LLVM IR to PTX with optimization
|
| 46 |
+
enabled. When ``True``, ``-opt=3`` is passed to NVVM. When
|
| 47 |
+
``False``, ``-opt=0`` is passed to NVVM. Defaults to ``True``.
|
| 48 |
+
:type opt: bool
|
| 49 |
+
:param lineinfo: If True, generate a line mapping between source code and
|
| 50 |
+
assembly code. This enables inspection of the source code in NVIDIA
|
| 51 |
+
profiling tools and correlation with program counter sampling.
|
| 52 |
+
:type lineinfo: bool
|
| 53 |
+
:param cache: If True, enables the file-based cache for this function.
|
| 54 |
+
:type cache: bool
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
if link and config.ENABLE_CUDASIM:
|
| 58 |
+
raise NotImplementedError('Cannot link PTX in the simulator')
|
| 59 |
+
|
| 60 |
+
if kws.get('boundscheck'):
|
| 61 |
+
raise NotImplementedError("bounds checking is not supported for CUDA")
|
| 62 |
+
|
| 63 |
+
if kws.get('argtypes') is not None:
|
| 64 |
+
msg = _msg_deprecated_signature_arg.format('argtypes')
|
| 65 |
+
raise DeprecationError(msg)
|
| 66 |
+
if kws.get('restype') is not None:
|
| 67 |
+
msg = _msg_deprecated_signature_arg.format('restype')
|
| 68 |
+
raise DeprecationError(msg)
|
| 69 |
+
if kws.get('bind') is not None:
|
| 70 |
+
msg = _msg_deprecated_signature_arg.format('bind')
|
| 71 |
+
raise DeprecationError(msg)
|
| 72 |
+
|
| 73 |
+
debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug
|
| 74 |
+
fastmath = kws.get('fastmath', False)
|
| 75 |
+
extensions = kws.get('extensions', [])
|
| 76 |
+
|
| 77 |
+
if debug and opt:
|
| 78 |
+
msg = ("debug=True with opt=True (the default) "
|
| 79 |
+
"is not supported by CUDA. This may result in a crash"
|
| 80 |
+
" - set debug=False or opt=False.")
|
| 81 |
+
warn(NumbaInvalidConfigWarning(msg))
|
| 82 |
+
|
| 83 |
+
if debug and lineinfo:
|
| 84 |
+
msg = ("debug and lineinfo are mutually exclusive. Use debug to get "
|
| 85 |
+
"full debug info (this disables some optimizations), or "
|
| 86 |
+
"lineinfo for line info only with code generation unaffected.")
|
| 87 |
+
warn(NumbaInvalidConfigWarning(msg))
|
| 88 |
+
|
| 89 |
+
if device and kws.get('link'):
|
| 90 |
+
raise ValueError("link keyword invalid for device function")
|
| 91 |
+
|
| 92 |
+
if sigutils.is_signature(func_or_sig):
|
| 93 |
+
signatures = [func_or_sig]
|
| 94 |
+
specialized = True
|
| 95 |
+
elif isinstance(func_or_sig, list):
|
| 96 |
+
signatures = func_or_sig
|
| 97 |
+
specialized = False
|
| 98 |
+
else:
|
| 99 |
+
signatures = None
|
| 100 |
+
|
| 101 |
+
if signatures is not None:
|
| 102 |
+
if config.ENABLE_CUDASIM:
|
| 103 |
+
def jitwrapper(func):
|
| 104 |
+
return FakeCUDAKernel(func, device=device, fastmath=fastmath)
|
| 105 |
+
return jitwrapper
|
| 106 |
+
|
| 107 |
+
def _jit(func):
|
| 108 |
+
targetoptions = kws.copy()
|
| 109 |
+
targetoptions['debug'] = debug
|
| 110 |
+
targetoptions['lineinfo'] = lineinfo
|
| 111 |
+
targetoptions['link'] = link
|
| 112 |
+
targetoptions['opt'] = opt
|
| 113 |
+
targetoptions['fastmath'] = fastmath
|
| 114 |
+
targetoptions['device'] = device
|
| 115 |
+
targetoptions['extensions'] = extensions
|
| 116 |
+
|
| 117 |
+
disp = CUDADispatcher(func, targetoptions=targetoptions)
|
| 118 |
+
|
| 119 |
+
if cache:
|
| 120 |
+
disp.enable_caching()
|
| 121 |
+
|
| 122 |
+
for sig in signatures:
|
| 123 |
+
argtypes, restype = sigutils.normalize_signature(sig)
|
| 124 |
+
|
| 125 |
+
if restype and not device and restype != types.void:
|
| 126 |
+
raise TypeError("CUDA kernel must have void return type.")
|
| 127 |
+
|
| 128 |
+
if device:
|
| 129 |
+
from numba.core import typeinfer
|
| 130 |
+
with typeinfer.register_dispatcher(disp):
|
| 131 |
+
disp.compile_device(argtypes, restype)
|
| 132 |
+
else:
|
| 133 |
+
disp.compile(argtypes)
|
| 134 |
+
|
| 135 |
+
disp._specialized = specialized
|
| 136 |
+
disp.disable_compile()
|
| 137 |
+
|
| 138 |
+
return disp
|
| 139 |
+
|
| 140 |
+
return _jit
|
| 141 |
+
else:
|
| 142 |
+
if func_or_sig is None:
|
| 143 |
+
if config.ENABLE_CUDASIM:
|
| 144 |
+
def autojitwrapper(func):
|
| 145 |
+
return FakeCUDAKernel(func, device=device,
|
| 146 |
+
fastmath=fastmath)
|
| 147 |
+
else:
|
| 148 |
+
def autojitwrapper(func):
|
| 149 |
+
return jit(func, device=device, debug=debug, opt=opt,
|
| 150 |
+
lineinfo=lineinfo, link=link, cache=cache, **kws)
|
| 151 |
+
|
| 152 |
+
return autojitwrapper
|
| 153 |
+
# func_or_sig is a function
|
| 154 |
+
else:
|
| 155 |
+
if config.ENABLE_CUDASIM:
|
| 156 |
+
return FakeCUDAKernel(func_or_sig, device=device,
|
| 157 |
+
fastmath=fastmath)
|
| 158 |
+
else:
|
| 159 |
+
targetoptions = kws.copy()
|
| 160 |
+
targetoptions['debug'] = debug
|
| 161 |
+
targetoptions['lineinfo'] = lineinfo
|
| 162 |
+
targetoptions['opt'] = opt
|
| 163 |
+
targetoptions['link'] = link
|
| 164 |
+
targetoptions['fastmath'] = fastmath
|
| 165 |
+
targetoptions['device'] = device
|
| 166 |
+
targetoptions['extensions'] = extensions
|
| 167 |
+
disp = CUDADispatcher(func_or_sig, targetoptions=targetoptions)
|
| 168 |
+
|
| 169 |
+
if cache:
|
| 170 |
+
disp.enable_caching()
|
| 171 |
+
|
| 172 |
+
return disp
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def declare_device(name, sig):
|
| 176 |
+
"""
|
| 177 |
+
Declare the signature of a foreign function. Returns a descriptor that can
|
| 178 |
+
be used to call the function from a Python kernel.
|
| 179 |
+
|
| 180 |
+
:param name: The name of the foreign function.
|
| 181 |
+
:type name: str
|
| 182 |
+
:param sig: The Numba signature of the function.
|
| 183 |
+
"""
|
| 184 |
+
argtypes, restype = sigutils.normalize_signature(sig)
|
| 185 |
+
if restype is None:
|
| 186 |
+
msg = 'Return type must be provided for device declarations'
|
| 187 |
+
raise TypeError(msg)
|
| 188 |
+
|
| 189 |
+
return declare_device_function(name, restype, argtypes)
|
lib/python3.10/site-packages/numba/cuda/descriptor.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.core.descriptors import TargetDescriptor
|
| 2 |
+
from numba.core.options import TargetOptions
|
| 3 |
+
from .target import CUDATargetContext, CUDATypingContext
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class CUDATargetOptions(TargetOptions):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CUDATarget(TargetDescriptor):
|
| 11 |
+
def __init__(self, name):
|
| 12 |
+
self.options = CUDATargetOptions
|
| 13 |
+
# The typing and target contexts are initialized only when needed -
|
| 14 |
+
# this prevents an attempt to load CUDA libraries at import time on
|
| 15 |
+
# systems that might not have them present.
|
| 16 |
+
self._typingctx = None
|
| 17 |
+
self._targetctx = None
|
| 18 |
+
super().__init__(name)
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def typing_context(self):
|
| 22 |
+
if self._typingctx is None:
|
| 23 |
+
self._typingctx = CUDATypingContext()
|
| 24 |
+
return self._typingctx
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def target_context(self):
|
| 28 |
+
if self._targetctx is None:
|
| 29 |
+
self._targetctx = CUDATargetContext(self._typingctx)
|
| 30 |
+
return self._targetctx
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
cuda_target = CUDATarget('cuda')
|
lib/python3.10/site-packages/numba/cuda/device_init.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Re export
|
| 2 |
+
import sys
|
| 3 |
+
from numba.cuda import cg
|
| 4 |
+
from .stubs import (threadIdx, blockIdx, blockDim, gridDim, laneid, warpsize,
|
| 5 |
+
syncwarp, shared, local, const, atomic,
|
| 6 |
+
shfl_sync_intrinsic, vote_sync_intrinsic, match_any_sync,
|
| 7 |
+
match_all_sync, threadfence_block, threadfence_system,
|
| 8 |
+
threadfence, selp, popc, brev, clz, ffs, fma, cbrt,
|
| 9 |
+
activemask, lanemask_lt, nanosleep, fp16,
|
| 10 |
+
_vector_type_stubs)
|
| 11 |
+
from .intrinsics import (grid, gridsize, syncthreads, syncthreads_and,
|
| 12 |
+
syncthreads_count, syncthreads_or)
|
| 13 |
+
from .cudadrv.error import CudaSupportError
|
| 14 |
+
from numba.cuda.cudadrv.driver import (BaseCUDAMemoryManager,
|
| 15 |
+
HostOnlyCUDAMemoryManager,
|
| 16 |
+
GetIpcHandleMixin, MemoryPointer,
|
| 17 |
+
MappedMemory, PinnedMemory, MemoryInfo,
|
| 18 |
+
IpcHandle, set_memory_manager)
|
| 19 |
+
from numba.cuda.cudadrv.runtime import runtime
|
| 20 |
+
from .cudadrv import nvvm
|
| 21 |
+
from numba.cuda import initialize
|
| 22 |
+
from .errors import KernelRuntimeError
|
| 23 |
+
|
| 24 |
+
from .decorators import jit, declare_device
|
| 25 |
+
from .api import *
|
| 26 |
+
from .api import _auto_device
|
| 27 |
+
from .args import In, Out, InOut
|
| 28 |
+
|
| 29 |
+
from .intrinsic_wrapper import (all_sync, any_sync, eq_sync, ballot_sync,
|
| 30 |
+
shfl_sync, shfl_up_sync, shfl_down_sync,
|
| 31 |
+
shfl_xor_sync)
|
| 32 |
+
|
| 33 |
+
from .kernels import reduction
|
| 34 |
+
|
| 35 |
+
reduce = Reduce = reduction.Reduce
|
| 36 |
+
|
| 37 |
+
# Expose vector type constructors and aliases as module level attributes.
|
| 38 |
+
for vector_type_stub in _vector_type_stubs:
|
| 39 |
+
setattr(sys.modules[__name__], vector_type_stub.__name__, vector_type_stub)
|
| 40 |
+
for alias in vector_type_stub.aliases:
|
| 41 |
+
setattr(sys.modules[__name__], alias, vector_type_stub)
|
| 42 |
+
del vector_type_stub, _vector_type_stubs
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def is_available():
|
| 46 |
+
"""Returns a boolean to indicate the availability of a CUDA GPU.
|
| 47 |
+
|
| 48 |
+
This will initialize the driver if it hasn't been initialized.
|
| 49 |
+
"""
|
| 50 |
+
# whilst `driver.is_available` will init the driver itself,
|
| 51 |
+
# the driver initialization may raise and as a result break
|
| 52 |
+
# test discovery/orchestration as `cuda.is_available` is often
|
| 53 |
+
# used as a guard for whether to run a CUDA test, the try/except
|
| 54 |
+
# below is to handle this case.
|
| 55 |
+
driver_is_available = False
|
| 56 |
+
try:
|
| 57 |
+
driver_is_available = driver.driver.is_available
|
| 58 |
+
except CudaSupportError:
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
return driver_is_available and nvvm.is_available()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def is_supported_version():
|
| 65 |
+
"""Returns True if the CUDA Runtime is a supported version.
|
| 66 |
+
|
| 67 |
+
Unsupported versions (e.g. newer versions than those known to Numba)
|
| 68 |
+
may still work; this function provides a facility to check whether the
|
| 69 |
+
current Numba version is tested and known to work with the current
|
| 70 |
+
runtime version. If the current version is unsupported, the caller can
|
| 71 |
+
decide how to act. Options include:
|
| 72 |
+
|
| 73 |
+
- Continuing silently,
|
| 74 |
+
- Emitting a warning,
|
| 75 |
+
- Generating an error or otherwise preventing the use of CUDA.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
return runtime.is_supported_version()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def cuda_error():
|
| 82 |
+
"""Returns None if there was no error initializing the CUDA driver.
|
| 83 |
+
If there was an error initializing the driver, a string describing the
|
| 84 |
+
error is returned.
|
| 85 |
+
"""
|
| 86 |
+
return driver.driver.initialization_error
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
initialize.initialize_all()
|
lib/python3.10/site-packages/numba/cuda/deviceufunc.py
ADDED
|
@@ -0,0 +1,908 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implements custom ufunc dispatch mechanism for non-CPU devices.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from abc import ABCMeta, abstractmethod
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import operator
|
| 8 |
+
import warnings
|
| 9 |
+
from functools import reduce
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from numba.np.ufunc.ufuncbuilder import _BaseUFuncBuilder, parse_identity
|
| 14 |
+
from numba.core import types, sigutils
|
| 15 |
+
from numba.core.typing import signature
|
| 16 |
+
from numba.np.ufunc.sigparse import parse_signature
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _broadcast_axis(a, b):
|
| 20 |
+
"""
|
| 21 |
+
Raises
|
| 22 |
+
------
|
| 23 |
+
ValueError if broadcast fails
|
| 24 |
+
"""
|
| 25 |
+
if a == b:
|
| 26 |
+
return a
|
| 27 |
+
elif a == 1:
|
| 28 |
+
return b
|
| 29 |
+
elif b == 1:
|
| 30 |
+
return a
|
| 31 |
+
else:
|
| 32 |
+
raise ValueError("failed to broadcast {0} and {1}".format(a, b))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _pairwise_broadcast(shape1, shape2):
|
| 36 |
+
"""
|
| 37 |
+
Raises
|
| 38 |
+
------
|
| 39 |
+
ValueError if broadcast fails
|
| 40 |
+
"""
|
| 41 |
+
shape1, shape2 = map(tuple, [shape1, shape2])
|
| 42 |
+
|
| 43 |
+
while len(shape1) < len(shape2):
|
| 44 |
+
shape1 = (1,) + shape1
|
| 45 |
+
|
| 46 |
+
while len(shape1) > len(shape2):
|
| 47 |
+
shape2 = (1,) + shape2
|
| 48 |
+
|
| 49 |
+
return tuple(_broadcast_axis(a, b) for a, b in zip(shape1, shape2))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _multi_broadcast(*shapelist):
|
| 53 |
+
"""
|
| 54 |
+
Raises
|
| 55 |
+
------
|
| 56 |
+
ValueError if broadcast fails
|
| 57 |
+
"""
|
| 58 |
+
assert shapelist
|
| 59 |
+
|
| 60 |
+
result = shapelist[0]
|
| 61 |
+
others = shapelist[1:]
|
| 62 |
+
try:
|
| 63 |
+
for i, each in enumerate(others, start=1):
|
| 64 |
+
result = _pairwise_broadcast(result, each)
|
| 65 |
+
except ValueError:
|
| 66 |
+
raise ValueError("failed to broadcast argument #{0}".format(i))
|
| 67 |
+
else:
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class UFuncMechanism(object):
|
| 72 |
+
"""
|
| 73 |
+
Prepare ufunc arguments for vectorize.
|
| 74 |
+
"""
|
| 75 |
+
DEFAULT_STREAM = None
|
| 76 |
+
SUPPORT_DEVICE_SLICING = False
|
| 77 |
+
|
| 78 |
+
def __init__(self, typemap, args):
|
| 79 |
+
"""Never used directly by user. Invoke by UFuncMechanism.call().
|
| 80 |
+
"""
|
| 81 |
+
self.typemap = typemap
|
| 82 |
+
self.args = args
|
| 83 |
+
nargs = len(self.args)
|
| 84 |
+
self.argtypes = [None] * nargs
|
| 85 |
+
self.scalarpos = []
|
| 86 |
+
self.signature = None
|
| 87 |
+
self.arrays = [None] * nargs
|
| 88 |
+
|
| 89 |
+
def _fill_arrays(self):
|
| 90 |
+
"""
|
| 91 |
+
Get all arguments in array form
|
| 92 |
+
"""
|
| 93 |
+
for i, arg in enumerate(self.args):
|
| 94 |
+
if self.is_device_array(arg):
|
| 95 |
+
self.arrays[i] = self.as_device_array(arg)
|
| 96 |
+
elif isinstance(arg, (int, float, complex, np.number)):
|
| 97 |
+
# Is scalar
|
| 98 |
+
self.scalarpos.append(i)
|
| 99 |
+
else:
|
| 100 |
+
self.arrays[i] = np.asarray(arg)
|
| 101 |
+
|
| 102 |
+
def _fill_argtypes(self):
|
| 103 |
+
"""
|
| 104 |
+
Get dtypes
|
| 105 |
+
"""
|
| 106 |
+
for i, ary in enumerate(self.arrays):
|
| 107 |
+
if ary is not None:
|
| 108 |
+
dtype = getattr(ary, 'dtype')
|
| 109 |
+
if dtype is None:
|
| 110 |
+
dtype = np.asarray(ary).dtype
|
| 111 |
+
self.argtypes[i] = dtype
|
| 112 |
+
|
| 113 |
+
def _resolve_signature(self):
|
| 114 |
+
"""Resolve signature.
|
| 115 |
+
May have ambiguous case.
|
| 116 |
+
"""
|
| 117 |
+
matches = []
|
| 118 |
+
# Resolve scalar args exact match first
|
| 119 |
+
if self.scalarpos:
|
| 120 |
+
# Try resolve scalar arguments
|
| 121 |
+
for formaltys in self.typemap:
|
| 122 |
+
match_map = []
|
| 123 |
+
for i, (formal, actual) in enumerate(zip(formaltys,
|
| 124 |
+
self.argtypes)):
|
| 125 |
+
if actual is None:
|
| 126 |
+
actual = np.asarray(self.args[i]).dtype
|
| 127 |
+
|
| 128 |
+
match_map.append(actual == formal)
|
| 129 |
+
|
| 130 |
+
if all(match_map):
|
| 131 |
+
matches.append(formaltys)
|
| 132 |
+
|
| 133 |
+
# No matching with exact match; try coercing the scalar arguments
|
| 134 |
+
if not matches:
|
| 135 |
+
matches = []
|
| 136 |
+
for formaltys in self.typemap:
|
| 137 |
+
all_matches = all(actual is None or formal == actual
|
| 138 |
+
for formal, actual in
|
| 139 |
+
zip(formaltys, self.argtypes))
|
| 140 |
+
if all_matches:
|
| 141 |
+
matches.append(formaltys)
|
| 142 |
+
|
| 143 |
+
if not matches:
|
| 144 |
+
raise TypeError("No matching version. GPU ufunc requires array "
|
| 145 |
+
"arguments to have the exact types. This behaves "
|
| 146 |
+
"like regular ufunc with casting='no'.")
|
| 147 |
+
|
| 148 |
+
if len(matches) > 1:
|
| 149 |
+
raise TypeError("Failed to resolve ufunc due to ambiguous "
|
| 150 |
+
"signature. Too many untyped scalars. "
|
| 151 |
+
"Use numpy dtype object to type tag.")
|
| 152 |
+
|
| 153 |
+
# Try scalar arguments
|
| 154 |
+
self.argtypes = matches[0]
|
| 155 |
+
|
| 156 |
+
def _get_actual_args(self):
|
| 157 |
+
"""Return the actual arguments
|
| 158 |
+
Casts scalar arguments to np.array.
|
| 159 |
+
"""
|
| 160 |
+
for i in self.scalarpos:
|
| 161 |
+
self.arrays[i] = np.array([self.args[i]], dtype=self.argtypes[i])
|
| 162 |
+
|
| 163 |
+
return self.arrays
|
| 164 |
+
|
| 165 |
+
def _broadcast(self, arys):
|
| 166 |
+
"""Perform numpy ufunc broadcasting
|
| 167 |
+
"""
|
| 168 |
+
shapelist = [a.shape for a in arys]
|
| 169 |
+
shape = _multi_broadcast(*shapelist)
|
| 170 |
+
|
| 171 |
+
for i, ary in enumerate(arys):
|
| 172 |
+
if ary.shape == shape:
|
| 173 |
+
pass
|
| 174 |
+
|
| 175 |
+
else:
|
| 176 |
+
if self.is_device_array(ary):
|
| 177 |
+
arys[i] = self.broadcast_device(ary, shape)
|
| 178 |
+
|
| 179 |
+
else:
|
| 180 |
+
ax_differs = [ax for ax in range(len(shape))
|
| 181 |
+
if ax >= ary.ndim
|
| 182 |
+
or ary.shape[ax] != shape[ax]]
|
| 183 |
+
|
| 184 |
+
missingdim = len(shape) - len(ary.shape)
|
| 185 |
+
strides = [0] * missingdim + list(ary.strides)
|
| 186 |
+
|
| 187 |
+
for ax in ax_differs:
|
| 188 |
+
strides[ax] = 0
|
| 189 |
+
|
| 190 |
+
strided = np.lib.stride_tricks.as_strided(ary,
|
| 191 |
+
shape=shape,
|
| 192 |
+
strides=strides)
|
| 193 |
+
|
| 194 |
+
arys[i] = self.force_array_layout(strided)
|
| 195 |
+
|
| 196 |
+
return arys
|
| 197 |
+
|
| 198 |
+
def get_arguments(self):
|
| 199 |
+
"""Prepare and return the arguments for the ufunc.
|
| 200 |
+
Does not call to_device().
|
| 201 |
+
"""
|
| 202 |
+
self._fill_arrays()
|
| 203 |
+
self._fill_argtypes()
|
| 204 |
+
self._resolve_signature()
|
| 205 |
+
arys = self._get_actual_args()
|
| 206 |
+
return self._broadcast(arys)
|
| 207 |
+
|
| 208 |
+
def get_function(self):
|
| 209 |
+
"""Returns (result_dtype, function)
|
| 210 |
+
"""
|
| 211 |
+
return self.typemap[self.argtypes]
|
| 212 |
+
|
| 213 |
+
def is_device_array(self, obj):
|
| 214 |
+
"""Is the `obj` a device array?
|
| 215 |
+
Override in subclass
|
| 216 |
+
"""
|
| 217 |
+
return False
|
| 218 |
+
|
| 219 |
+
def as_device_array(self, obj):
|
| 220 |
+
"""Convert the `obj` to a device array
|
| 221 |
+
Override in subclass
|
| 222 |
+
|
| 223 |
+
Default implementation is an identity function
|
| 224 |
+
"""
|
| 225 |
+
return obj
|
| 226 |
+
|
| 227 |
+
def broadcast_device(self, ary, shape):
|
| 228 |
+
"""Handles ondevice broadcasting
|
| 229 |
+
|
| 230 |
+
Override in subclass to add support.
|
| 231 |
+
"""
|
| 232 |
+
raise NotImplementedError("broadcasting on device is not supported")
|
| 233 |
+
|
| 234 |
+
def force_array_layout(self, ary):
|
| 235 |
+
"""Ensures array layout met device requirement.
|
| 236 |
+
|
| 237 |
+
Override in sublcass
|
| 238 |
+
"""
|
| 239 |
+
return ary
|
| 240 |
+
|
| 241 |
+
@classmethod
|
| 242 |
+
def call(cls, typemap, args, kws):
|
| 243 |
+
"""Perform the entire ufunc call mechanism.
|
| 244 |
+
"""
|
| 245 |
+
# Handle keywords
|
| 246 |
+
stream = kws.pop('stream', cls.DEFAULT_STREAM)
|
| 247 |
+
out = kws.pop('out', None)
|
| 248 |
+
|
| 249 |
+
if kws:
|
| 250 |
+
warnings.warn("unrecognized keywords: %s" % ', '.join(kws))
|
| 251 |
+
|
| 252 |
+
# Begin call resolution
|
| 253 |
+
cr = cls(typemap, args)
|
| 254 |
+
args = cr.get_arguments()
|
| 255 |
+
resty, func = cr.get_function()
|
| 256 |
+
|
| 257 |
+
outshape = args[0].shape
|
| 258 |
+
|
| 259 |
+
# Adjust output value
|
| 260 |
+
if out is not None and cr.is_device_array(out):
|
| 261 |
+
out = cr.as_device_array(out)
|
| 262 |
+
|
| 263 |
+
def attempt_ravel(a):
|
| 264 |
+
if cr.SUPPORT_DEVICE_SLICING:
|
| 265 |
+
raise NotImplementedError
|
| 266 |
+
|
| 267 |
+
try:
|
| 268 |
+
# Call the `.ravel()` method
|
| 269 |
+
return a.ravel()
|
| 270 |
+
except NotImplementedError:
|
| 271 |
+
# If it is not a device array
|
| 272 |
+
if not cr.is_device_array(a):
|
| 273 |
+
raise
|
| 274 |
+
# For device array, retry ravel on the host by first
|
| 275 |
+
# copying it back.
|
| 276 |
+
else:
|
| 277 |
+
hostary = cr.to_host(a, stream).ravel()
|
| 278 |
+
return cr.to_device(hostary, stream)
|
| 279 |
+
|
| 280 |
+
if args[0].ndim > 1:
|
| 281 |
+
args = [attempt_ravel(a) for a in args]
|
| 282 |
+
|
| 283 |
+
# Prepare argument on the device
|
| 284 |
+
devarys = []
|
| 285 |
+
any_device = False
|
| 286 |
+
for a in args:
|
| 287 |
+
if cr.is_device_array(a):
|
| 288 |
+
devarys.append(a)
|
| 289 |
+
any_device = True
|
| 290 |
+
else:
|
| 291 |
+
dev_a = cr.to_device(a, stream=stream)
|
| 292 |
+
devarys.append(dev_a)
|
| 293 |
+
|
| 294 |
+
# Launch
|
| 295 |
+
shape = args[0].shape
|
| 296 |
+
if out is None:
|
| 297 |
+
# No output is provided
|
| 298 |
+
devout = cr.allocate_device_array(shape, resty, stream=stream)
|
| 299 |
+
|
| 300 |
+
devarys.extend([devout])
|
| 301 |
+
cr.launch(func, shape[0], stream, devarys)
|
| 302 |
+
|
| 303 |
+
if any_device:
|
| 304 |
+
# If any of the arguments are on device,
|
| 305 |
+
# Keep output on the device
|
| 306 |
+
return devout.reshape(outshape)
|
| 307 |
+
else:
|
| 308 |
+
# Otherwise, transfer output back to host
|
| 309 |
+
return devout.copy_to_host().reshape(outshape)
|
| 310 |
+
|
| 311 |
+
elif cr.is_device_array(out):
|
| 312 |
+
# If output is provided and it is a device array,
|
| 313 |
+
# Return device array
|
| 314 |
+
if out.ndim > 1:
|
| 315 |
+
out = attempt_ravel(out)
|
| 316 |
+
devout = out
|
| 317 |
+
devarys.extend([devout])
|
| 318 |
+
cr.launch(func, shape[0], stream, devarys)
|
| 319 |
+
return devout.reshape(outshape)
|
| 320 |
+
|
| 321 |
+
else:
|
| 322 |
+
# If output is provided and it is a host array,
|
| 323 |
+
# Return host array
|
| 324 |
+
assert out.shape == shape
|
| 325 |
+
assert out.dtype == resty
|
| 326 |
+
devout = cr.allocate_device_array(shape, resty, stream=stream)
|
| 327 |
+
devarys.extend([devout])
|
| 328 |
+
cr.launch(func, shape[0], stream, devarys)
|
| 329 |
+
return devout.copy_to_host(out, stream=stream).reshape(outshape)
|
| 330 |
+
|
| 331 |
+
def to_device(self, hostary, stream):
|
| 332 |
+
"""Implement to device transfer
|
| 333 |
+
Override in subclass
|
| 334 |
+
"""
|
| 335 |
+
raise NotImplementedError
|
| 336 |
+
|
| 337 |
+
def to_host(self, devary, stream):
|
| 338 |
+
"""Implement to host transfer
|
| 339 |
+
Override in subclass
|
| 340 |
+
"""
|
| 341 |
+
raise NotImplementedError
|
| 342 |
+
|
| 343 |
+
def allocate_device_array(self, shape, dtype, stream):
|
| 344 |
+
"""Implements device allocation
|
| 345 |
+
Override in subclass
|
| 346 |
+
"""
|
| 347 |
+
raise NotImplementedError
|
| 348 |
+
|
| 349 |
+
def launch(self, func, count, stream, args):
|
| 350 |
+
"""Implements device function invocation
|
| 351 |
+
Override in subclass
|
| 352 |
+
"""
|
| 353 |
+
raise NotImplementedError
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def to_dtype(ty):
|
| 357 |
+
if isinstance(ty, types.EnumMember):
|
| 358 |
+
ty = ty.dtype
|
| 359 |
+
return np.dtype(str(ty))
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class DeviceVectorize(_BaseUFuncBuilder):
|
| 363 |
+
def __init__(self, func, identity=None, cache=False, targetoptions={}):
|
| 364 |
+
if cache:
|
| 365 |
+
raise TypeError("caching is not supported")
|
| 366 |
+
for opt in targetoptions:
|
| 367 |
+
if opt == 'nopython':
|
| 368 |
+
warnings.warn("nopython kwarg for cuda target is redundant",
|
| 369 |
+
RuntimeWarning)
|
| 370 |
+
else:
|
| 371 |
+
fmt = "Unrecognized options. "
|
| 372 |
+
fmt += "cuda vectorize target does not support option: '%s'"
|
| 373 |
+
raise KeyError(fmt % opt)
|
| 374 |
+
self.py_func = func
|
| 375 |
+
self.identity = parse_identity(identity)
|
| 376 |
+
# { arg_dtype: (return_dtype), cudakernel }
|
| 377 |
+
self.kernelmap = OrderedDict()
|
| 378 |
+
|
| 379 |
+
@property
|
| 380 |
+
def pyfunc(self):
|
| 381 |
+
return self.py_func
|
| 382 |
+
|
| 383 |
+
def add(self, sig=None):
|
| 384 |
+
# compile core as device function
|
| 385 |
+
args, return_type = sigutils.normalize_signature(sig)
|
| 386 |
+
devfnsig = signature(return_type, *args)
|
| 387 |
+
|
| 388 |
+
funcname = self.pyfunc.__name__
|
| 389 |
+
kernelsource = self._get_kernel_source(self._kernel_template,
|
| 390 |
+
devfnsig, funcname)
|
| 391 |
+
corefn, return_type = self._compile_core(devfnsig)
|
| 392 |
+
glbl = self._get_globals(corefn)
|
| 393 |
+
sig = signature(types.void, *([a[:] for a in args] + [return_type[:]]))
|
| 394 |
+
exec(kernelsource, glbl)
|
| 395 |
+
|
| 396 |
+
stager = glbl['__vectorized_%s' % funcname]
|
| 397 |
+
kernel = self._compile_kernel(stager, sig)
|
| 398 |
+
|
| 399 |
+
argdtypes = tuple(to_dtype(t) for t in devfnsig.args)
|
| 400 |
+
resdtype = to_dtype(return_type)
|
| 401 |
+
self.kernelmap[tuple(argdtypes)] = resdtype, kernel
|
| 402 |
+
|
| 403 |
+
def build_ufunc(self):
|
| 404 |
+
raise NotImplementedError
|
| 405 |
+
|
| 406 |
+
def _get_kernel_source(self, template, sig, funcname):
|
| 407 |
+
args = ['a%d' % i for i in range(len(sig.args))]
|
| 408 |
+
fmts = dict(name=funcname,
|
| 409 |
+
args=', '.join(args),
|
| 410 |
+
argitems=', '.join('%s[__tid__]' % i for i in args))
|
| 411 |
+
return template.format(**fmts)
|
| 412 |
+
|
| 413 |
+
def _compile_core(self, sig):
|
| 414 |
+
raise NotImplementedError
|
| 415 |
+
|
| 416 |
+
def _get_globals(self, corefn):
|
| 417 |
+
raise NotImplementedError
|
| 418 |
+
|
| 419 |
+
def _compile_kernel(self, fnobj, sig):
|
| 420 |
+
raise NotImplementedError
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class DeviceGUFuncVectorize(_BaseUFuncBuilder):
|
| 424 |
+
def __init__(self, func, sig, identity=None, cache=False, targetoptions={},
|
| 425 |
+
writable_args=()):
|
| 426 |
+
if cache:
|
| 427 |
+
raise TypeError("caching is not supported")
|
| 428 |
+
if writable_args:
|
| 429 |
+
raise TypeError("writable_args are not supported")
|
| 430 |
+
|
| 431 |
+
# Allow nopython flag to be set.
|
| 432 |
+
if not targetoptions.pop('nopython', True):
|
| 433 |
+
raise TypeError("nopython flag must be True")
|
| 434 |
+
# Are there any more target options?
|
| 435 |
+
if targetoptions:
|
| 436 |
+
opts = ', '.join([repr(k) for k in targetoptions.keys()])
|
| 437 |
+
fmt = "The following target options are not supported: {0}"
|
| 438 |
+
raise TypeError(fmt.format(opts))
|
| 439 |
+
|
| 440 |
+
self.py_func = func
|
| 441 |
+
self.identity = parse_identity(identity)
|
| 442 |
+
self.signature = sig
|
| 443 |
+
self.inputsig, self.outputsig = parse_signature(self.signature)
|
| 444 |
+
|
| 445 |
+
# Maps from a tuple of input_dtypes to (output_dtypes, kernel)
|
| 446 |
+
self.kernelmap = OrderedDict()
|
| 447 |
+
|
| 448 |
+
@property
|
| 449 |
+
def pyfunc(self):
|
| 450 |
+
return self.py_func
|
| 451 |
+
|
| 452 |
+
def add(self, sig=None):
|
| 453 |
+
indims = [len(x) for x in self.inputsig]
|
| 454 |
+
outdims = [len(x) for x in self.outputsig]
|
| 455 |
+
args, return_type = sigutils.normalize_signature(sig)
|
| 456 |
+
|
| 457 |
+
# It is only valid to specify types.none as a return type, or to not
|
| 458 |
+
# specify the return type (where the "Python None" is the return type)
|
| 459 |
+
valid_return_type = return_type in (types.none, None)
|
| 460 |
+
if not valid_return_type:
|
| 461 |
+
raise TypeError('guvectorized functions cannot return values: '
|
| 462 |
+
f'signature {sig} specifies {return_type} return '
|
| 463 |
+
'type')
|
| 464 |
+
|
| 465 |
+
funcname = self.py_func.__name__
|
| 466 |
+
src = expand_gufunc_template(self._kernel_template, indims,
|
| 467 |
+
outdims, funcname, args)
|
| 468 |
+
|
| 469 |
+
glbls = self._get_globals(sig)
|
| 470 |
+
|
| 471 |
+
exec(src, glbls)
|
| 472 |
+
fnobj = glbls['__gufunc_{name}'.format(name=funcname)]
|
| 473 |
+
|
| 474 |
+
outertys = list(_determine_gufunc_outer_types(args, indims + outdims))
|
| 475 |
+
kernel = self._compile_kernel(fnobj, sig=tuple(outertys))
|
| 476 |
+
|
| 477 |
+
nout = len(outdims)
|
| 478 |
+
dtypes = [np.dtype(str(t.dtype)) for t in outertys]
|
| 479 |
+
indtypes = tuple(dtypes[:-nout])
|
| 480 |
+
outdtypes = tuple(dtypes[-nout:])
|
| 481 |
+
|
| 482 |
+
self.kernelmap[indtypes] = outdtypes, kernel
|
| 483 |
+
|
| 484 |
+
def _compile_kernel(self, fnobj, sig):
|
| 485 |
+
raise NotImplementedError
|
| 486 |
+
|
| 487 |
+
def _get_globals(self, sig):
|
| 488 |
+
raise NotImplementedError
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def _determine_gufunc_outer_types(argtys, dims):
|
| 492 |
+
for at, nd in zip(argtys, dims):
|
| 493 |
+
if isinstance(at, types.Array):
|
| 494 |
+
yield at.copy(ndim=nd + 1)
|
| 495 |
+
else:
|
| 496 |
+
if nd > 0:
|
| 497 |
+
raise ValueError("gufunc signature mismatch: ndim>0 for scalar")
|
| 498 |
+
yield types.Array(dtype=at, ndim=1, layout='A')
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def expand_gufunc_template(template, indims, outdims, funcname, argtypes):
|
| 502 |
+
"""Expand gufunc source template
|
| 503 |
+
"""
|
| 504 |
+
argdims = indims + outdims
|
| 505 |
+
argnames = ["arg{0}".format(i) for i in range(len(argdims))]
|
| 506 |
+
checkedarg = "min({0})".format(', '.join(["{0}.shape[0]".format(a)
|
| 507 |
+
for a in argnames]))
|
| 508 |
+
inputs = [_gen_src_for_indexing(aref, adims, atype)
|
| 509 |
+
for aref, adims, atype in zip(argnames, indims, argtypes)]
|
| 510 |
+
outputs = [_gen_src_for_indexing(aref, adims, atype)
|
| 511 |
+
for aref, adims, atype in zip(argnames[len(indims):], outdims,
|
| 512 |
+
argtypes[len(indims):])]
|
| 513 |
+
argitems = inputs + outputs
|
| 514 |
+
src = template.format(name=funcname, args=', '.join(argnames),
|
| 515 |
+
checkedarg=checkedarg,
|
| 516 |
+
argitems=', '.join(argitems))
|
| 517 |
+
return src
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def _gen_src_for_indexing(aref, adims, atype):
|
| 521 |
+
return "{aref}[{sliced}]".format(aref=aref,
|
| 522 |
+
sliced=_gen_src_index(adims, atype))
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
def _gen_src_index(adims, atype):
|
| 526 |
+
if adims > 0:
|
| 527 |
+
return ','.join(['__tid__'] + [':'] * adims)
|
| 528 |
+
elif isinstance(atype, types.Array) and atype.ndim - 1 == adims:
|
| 529 |
+
# Special case for 0-nd in shape-signature but
|
| 530 |
+
# 1d array in type signature.
|
| 531 |
+
# Slice it so that the result has the same dimension.
|
| 532 |
+
return '__tid__:(__tid__ + 1)'
|
| 533 |
+
else:
|
| 534 |
+
return '__tid__'
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
class GUFuncEngine(object):
|
| 538 |
+
'''Determine how to broadcast and execute a gufunc
|
| 539 |
+
base on input shape and signature
|
| 540 |
+
'''
|
| 541 |
+
|
| 542 |
+
@classmethod
|
| 543 |
+
def from_signature(cls, signature):
|
| 544 |
+
return cls(*parse_signature(signature))
|
| 545 |
+
|
| 546 |
+
def __init__(self, inputsig, outputsig):
|
| 547 |
+
# signatures
|
| 548 |
+
self.sin = inputsig
|
| 549 |
+
self.sout = outputsig
|
| 550 |
+
# argument count
|
| 551 |
+
self.nin = len(self.sin)
|
| 552 |
+
self.nout = len(self.sout)
|
| 553 |
+
|
| 554 |
+
def schedule(self, ishapes):
|
| 555 |
+
if len(ishapes) != self.nin:
|
| 556 |
+
raise TypeError('invalid number of input argument')
|
| 557 |
+
|
| 558 |
+
# associate symbol values for input signature
|
| 559 |
+
symbolmap = {}
|
| 560 |
+
outer_shapes = []
|
| 561 |
+
inner_shapes = []
|
| 562 |
+
|
| 563 |
+
for argn, (shape, symbols) in enumerate(zip(ishapes, self.sin)):
|
| 564 |
+
argn += 1 # start from 1 for human
|
| 565 |
+
inner_ndim = len(symbols)
|
| 566 |
+
if len(shape) < inner_ndim:
|
| 567 |
+
fmt = "arg #%d: insufficient inner dimension"
|
| 568 |
+
raise ValueError(fmt % (argn,))
|
| 569 |
+
if inner_ndim:
|
| 570 |
+
inner_shape = shape[-inner_ndim:]
|
| 571 |
+
outer_shape = shape[:-inner_ndim]
|
| 572 |
+
else:
|
| 573 |
+
inner_shape = ()
|
| 574 |
+
outer_shape = shape
|
| 575 |
+
|
| 576 |
+
for axis, (dim, sym) in enumerate(zip(inner_shape, symbols)):
|
| 577 |
+
axis += len(outer_shape)
|
| 578 |
+
if sym in symbolmap:
|
| 579 |
+
if symbolmap[sym] != dim:
|
| 580 |
+
fmt = "arg #%d: shape[%d] mismatch argument"
|
| 581 |
+
raise ValueError(fmt % (argn, axis))
|
| 582 |
+
symbolmap[sym] = dim
|
| 583 |
+
|
| 584 |
+
outer_shapes.append(outer_shape)
|
| 585 |
+
inner_shapes.append(inner_shape)
|
| 586 |
+
|
| 587 |
+
# solve output shape
|
| 588 |
+
oshapes = []
|
| 589 |
+
for outsig in self.sout:
|
| 590 |
+
oshape = []
|
| 591 |
+
for sym in outsig:
|
| 592 |
+
oshape.append(symbolmap[sym])
|
| 593 |
+
oshapes.append(tuple(oshape))
|
| 594 |
+
|
| 595 |
+
# find the biggest outershape as looping dimension
|
| 596 |
+
sizes = [reduce(operator.mul, s, 1) for s in outer_shapes]
|
| 597 |
+
largest_i = np.argmax(sizes)
|
| 598 |
+
loopdims = outer_shapes[largest_i]
|
| 599 |
+
|
| 600 |
+
pinned = [False] * self.nin # same argument for each iteration
|
| 601 |
+
for i, d in enumerate(outer_shapes):
|
| 602 |
+
if d != loopdims:
|
| 603 |
+
if d == (1,) or d == ():
|
| 604 |
+
pinned[i] = True
|
| 605 |
+
else:
|
| 606 |
+
fmt = "arg #%d: outer dimension mismatch"
|
| 607 |
+
raise ValueError(fmt % (i + 1,))
|
| 608 |
+
|
| 609 |
+
return GUFuncSchedule(self, inner_shapes, oshapes, loopdims, pinned)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class GUFuncSchedule(object):
|
| 613 |
+
def __init__(self, parent, ishapes, oshapes, loopdims, pinned):
|
| 614 |
+
self.parent = parent
|
| 615 |
+
# core shapes
|
| 616 |
+
self.ishapes = ishapes
|
| 617 |
+
self.oshapes = oshapes
|
| 618 |
+
# looping dimension
|
| 619 |
+
self.loopdims = loopdims
|
| 620 |
+
self.loopn = reduce(operator.mul, loopdims, 1)
|
| 621 |
+
# flags
|
| 622 |
+
self.pinned = pinned
|
| 623 |
+
|
| 624 |
+
self.output_shapes = [loopdims + s for s in oshapes]
|
| 625 |
+
|
| 626 |
+
def __str__(self):
|
| 627 |
+
import pprint
|
| 628 |
+
|
| 629 |
+
attrs = 'ishapes', 'oshapes', 'loopdims', 'loopn', 'pinned'
|
| 630 |
+
values = [(k, getattr(self, k)) for k in attrs]
|
| 631 |
+
return pprint.pformat(dict(values))
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class GeneralizedUFunc(object):
|
| 635 |
+
def __init__(self, kernelmap, engine):
|
| 636 |
+
self.kernelmap = kernelmap
|
| 637 |
+
self.engine = engine
|
| 638 |
+
self.max_blocksize = 2 ** 30
|
| 639 |
+
|
| 640 |
+
def __call__(self, *args, **kws):
|
| 641 |
+
callsteps = self._call_steps(self.engine.nin, self.engine.nout,
|
| 642 |
+
args, kws)
|
| 643 |
+
indtypes, schedule, outdtypes, kernel = self._schedule(
|
| 644 |
+
callsteps.inputs, callsteps.outputs)
|
| 645 |
+
callsteps.adjust_input_types(indtypes)
|
| 646 |
+
|
| 647 |
+
outputs = callsteps.prepare_outputs(schedule, outdtypes)
|
| 648 |
+
inputs = callsteps.prepare_inputs()
|
| 649 |
+
parameters = self._broadcast(schedule, inputs, outputs)
|
| 650 |
+
|
| 651 |
+
callsteps.launch_kernel(kernel, schedule.loopn, parameters)
|
| 652 |
+
|
| 653 |
+
return callsteps.post_process_outputs(outputs)
|
| 654 |
+
|
| 655 |
+
def _schedule(self, inputs, outs):
|
| 656 |
+
input_shapes = [a.shape for a in inputs]
|
| 657 |
+
schedule = self.engine.schedule(input_shapes)
|
| 658 |
+
|
| 659 |
+
# find kernel
|
| 660 |
+
indtypes = tuple(i.dtype for i in inputs)
|
| 661 |
+
try:
|
| 662 |
+
outdtypes, kernel = self.kernelmap[indtypes]
|
| 663 |
+
except KeyError:
|
| 664 |
+
# No exact match, then use the first compatible.
|
| 665 |
+
# This does not match the numpy dispatching exactly.
|
| 666 |
+
# Later, we may just jit a new version for the missing signature.
|
| 667 |
+
indtypes = self._search_matching_signature(indtypes)
|
| 668 |
+
# Select kernel
|
| 669 |
+
outdtypes, kernel = self.kernelmap[indtypes]
|
| 670 |
+
|
| 671 |
+
# check output
|
| 672 |
+
for sched_shape, out in zip(schedule.output_shapes, outs):
|
| 673 |
+
if out is not None and sched_shape != out.shape:
|
| 674 |
+
raise ValueError('output shape mismatch')
|
| 675 |
+
|
| 676 |
+
return indtypes, schedule, outdtypes, kernel
|
| 677 |
+
|
| 678 |
+
def _search_matching_signature(self, idtypes):
|
| 679 |
+
"""
|
| 680 |
+
Given the input types in `idtypes`, return a compatible sequence of
|
| 681 |
+
types that is defined in `kernelmap`.
|
| 682 |
+
|
| 683 |
+
Note: Ordering is guaranteed by `kernelmap` being a OrderedDict
|
| 684 |
+
"""
|
| 685 |
+
for sig in self.kernelmap.keys():
|
| 686 |
+
if all(np.can_cast(actual, desired)
|
| 687 |
+
for actual, desired in zip(sig, idtypes)):
|
| 688 |
+
return sig
|
| 689 |
+
else:
|
| 690 |
+
raise TypeError("no matching signature")
|
| 691 |
+
|
| 692 |
+
def _broadcast(self, schedule, params, retvals):
|
| 693 |
+
assert schedule.loopn > 0, "zero looping dimension"
|
| 694 |
+
|
| 695 |
+
odim = 1 if not schedule.loopdims else schedule.loopn
|
| 696 |
+
newparams = []
|
| 697 |
+
for p, cs in zip(params, schedule.ishapes):
|
| 698 |
+
if not cs and p.size == 1:
|
| 699 |
+
# Broadcast scalar input
|
| 700 |
+
devary = self._broadcast_scalar_input(p, odim)
|
| 701 |
+
newparams.append(devary)
|
| 702 |
+
else:
|
| 703 |
+
# Broadcast vector input
|
| 704 |
+
newparams.append(self._broadcast_array(p, odim, cs))
|
| 705 |
+
|
| 706 |
+
newretvals = []
|
| 707 |
+
for retval, oshape in zip(retvals, schedule.oshapes):
|
| 708 |
+
newretvals.append(retval.reshape(odim, *oshape))
|
| 709 |
+
return tuple(newparams) + tuple(newretvals)
|
| 710 |
+
|
| 711 |
+
def _broadcast_array(self, ary, newdim, innerdim):
|
| 712 |
+
newshape = (newdim,) + innerdim
|
| 713 |
+
# No change in shape
|
| 714 |
+
if ary.shape == newshape:
|
| 715 |
+
return ary
|
| 716 |
+
|
| 717 |
+
# Creating new dimension
|
| 718 |
+
elif len(ary.shape) < len(newshape):
|
| 719 |
+
assert newshape[-len(ary.shape):] == ary.shape, \
|
| 720 |
+
"cannot add dim and reshape at the same time"
|
| 721 |
+
return self._broadcast_add_axis(ary, newshape)
|
| 722 |
+
|
| 723 |
+
# Collapsing dimension
|
| 724 |
+
else:
|
| 725 |
+
return ary.reshape(*newshape)
|
| 726 |
+
|
| 727 |
+
def _broadcast_add_axis(self, ary, newshape):
|
| 728 |
+
raise NotImplementedError("cannot add new axis")
|
| 729 |
+
|
| 730 |
+
def _broadcast_scalar_input(self, ary, shape):
|
| 731 |
+
raise NotImplementedError
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
class GUFuncCallSteps(metaclass=ABCMeta):
|
| 735 |
+
"""
|
| 736 |
+
Implements memory management and kernel launch operations for GUFunc calls.
|
| 737 |
+
|
| 738 |
+
One instance of this class is instantiated for each call, and the instance
|
| 739 |
+
is specific to the arguments given to the GUFunc call.
|
| 740 |
+
|
| 741 |
+
The base class implements the overall logic; subclasses provide
|
| 742 |
+
target-specific implementations of individual functions.
|
| 743 |
+
"""
|
| 744 |
+
|
| 745 |
+
# The base class uses these slots; subclasses may provide additional slots.
|
| 746 |
+
__slots__ = [
|
| 747 |
+
'outputs',
|
| 748 |
+
'inputs',
|
| 749 |
+
'_copy_result_to_host',
|
| 750 |
+
]
|
| 751 |
+
|
| 752 |
+
@abstractmethod
|
| 753 |
+
def launch_kernel(self, kernel, nelem, args):
|
| 754 |
+
"""Implement the kernel launch"""
|
| 755 |
+
|
| 756 |
+
@abstractmethod
|
| 757 |
+
def is_device_array(self, obj):
|
| 758 |
+
"""
|
| 759 |
+
Return True if `obj` is a device array for this target, False
|
| 760 |
+
otherwise.
|
| 761 |
+
"""
|
| 762 |
+
|
| 763 |
+
@abstractmethod
|
| 764 |
+
def as_device_array(self, obj):
|
| 765 |
+
"""
|
| 766 |
+
Return `obj` as a device array on this target.
|
| 767 |
+
|
| 768 |
+
May return `obj` directly if it is already on the target.
|
| 769 |
+
"""
|
| 770 |
+
|
| 771 |
+
@abstractmethod
|
| 772 |
+
def to_device(self, hostary):
|
| 773 |
+
"""
|
| 774 |
+
Copy `hostary` to the device and return the device array.
|
| 775 |
+
"""
|
| 776 |
+
|
| 777 |
+
@abstractmethod
|
| 778 |
+
def allocate_device_array(self, shape, dtype):
|
| 779 |
+
"""
|
| 780 |
+
Allocate a new uninitialized device array with the given shape and
|
| 781 |
+
dtype.
|
| 782 |
+
"""
|
| 783 |
+
|
| 784 |
+
def __init__(self, nin, nout, args, kwargs):
|
| 785 |
+
outputs = kwargs.get('out')
|
| 786 |
+
|
| 787 |
+
# Ensure the user has passed a correct number of arguments
|
| 788 |
+
if outputs is None and len(args) not in (nin, (nin + nout)):
|
| 789 |
+
def pos_argn(n):
|
| 790 |
+
return f'{n} positional argument{"s" * (n != 1)}'
|
| 791 |
+
|
| 792 |
+
msg = (f'This gufunc accepts {pos_argn(nin)} (when providing '
|
| 793 |
+
f'input only) or {pos_argn(nin + nout)} (when providing '
|
| 794 |
+
f'input and output). Got {pos_argn(len(args))}.')
|
| 795 |
+
raise TypeError(msg)
|
| 796 |
+
|
| 797 |
+
if outputs is not None and len(args) > nin:
|
| 798 |
+
raise ValueError("cannot specify argument 'out' as both positional "
|
| 799 |
+
"and keyword")
|
| 800 |
+
else:
|
| 801 |
+
# If the user did not pass outputs either in the out kwarg or as
|
| 802 |
+
# positional arguments, then we need to generate an initial list of
|
| 803 |
+
# "placeholder" outputs using None as a sentry value
|
| 804 |
+
outputs = [outputs] * nout
|
| 805 |
+
|
| 806 |
+
# Ensure all output device arrays are Numba device arrays - for
|
| 807 |
+
# example, any output passed in that supports the CUDA Array Interface
|
| 808 |
+
# is converted to a Numba CUDA device array; others are left untouched.
|
| 809 |
+
all_user_outputs_are_host = True
|
| 810 |
+
self.outputs = []
|
| 811 |
+
for output in outputs:
|
| 812 |
+
if self.is_device_array(output):
|
| 813 |
+
self.outputs.append(self.as_device_array(output))
|
| 814 |
+
all_user_outputs_are_host = False
|
| 815 |
+
else:
|
| 816 |
+
self.outputs.append(output)
|
| 817 |
+
|
| 818 |
+
all_host_arrays = not any([self.is_device_array(a) for a in args])
|
| 819 |
+
|
| 820 |
+
# - If any of the arguments are device arrays, we leave the output on
|
| 821 |
+
# the device.
|
| 822 |
+
self._copy_result_to_host = (all_host_arrays and
|
| 823 |
+
all_user_outputs_are_host)
|
| 824 |
+
|
| 825 |
+
# Normalize arguments - ensure they are either device- or host-side
|
| 826 |
+
# arrays (as opposed to lists, tuples, etc).
|
| 827 |
+
def normalize_arg(a):
|
| 828 |
+
if self.is_device_array(a):
|
| 829 |
+
convert = self.as_device_array
|
| 830 |
+
else:
|
| 831 |
+
convert = np.asarray
|
| 832 |
+
|
| 833 |
+
return convert(a)
|
| 834 |
+
|
| 835 |
+
normalized_args = [normalize_arg(a) for a in args]
|
| 836 |
+
self.inputs = normalized_args[:nin]
|
| 837 |
+
|
| 838 |
+
# Check if there are extra arguments for outputs.
|
| 839 |
+
unused_inputs = normalized_args[nin:]
|
| 840 |
+
if unused_inputs:
|
| 841 |
+
self.outputs = unused_inputs
|
| 842 |
+
|
| 843 |
+
def adjust_input_types(self, indtypes):
|
| 844 |
+
"""
|
| 845 |
+
Attempt to cast the inputs to the required types if necessary
|
| 846 |
+
and if they are not device arrays.
|
| 847 |
+
|
| 848 |
+
Side effect: Only affects the elements of `inputs` that require
|
| 849 |
+
a type cast.
|
| 850 |
+
"""
|
| 851 |
+
for i, (ity, val) in enumerate(zip(indtypes, self.inputs)):
|
| 852 |
+
if ity != val.dtype:
|
| 853 |
+
if not hasattr(val, 'astype'):
|
| 854 |
+
msg = ("compatible signature is possible by casting but "
|
| 855 |
+
"{0} does not support .astype()").format(type(val))
|
| 856 |
+
raise TypeError(msg)
|
| 857 |
+
# Cast types
|
| 858 |
+
self.inputs[i] = val.astype(ity)
|
| 859 |
+
|
| 860 |
+
def prepare_outputs(self, schedule, outdtypes):
|
| 861 |
+
"""
|
| 862 |
+
Returns a list of output parameters that all reside on the target
|
| 863 |
+
device.
|
| 864 |
+
|
| 865 |
+
Outputs that were passed-in to the GUFunc are used if they reside on the
|
| 866 |
+
device; other outputs are allocated as necessary.
|
| 867 |
+
"""
|
| 868 |
+
outputs = []
|
| 869 |
+
for shape, dtype, output in zip(schedule.output_shapes, outdtypes,
|
| 870 |
+
self.outputs):
|
| 871 |
+
if output is None or self._copy_result_to_host:
|
| 872 |
+
output = self.allocate_device_array(shape, dtype)
|
| 873 |
+
outputs.append(output)
|
| 874 |
+
|
| 875 |
+
return outputs
|
| 876 |
+
|
| 877 |
+
def prepare_inputs(self):
|
| 878 |
+
"""
|
| 879 |
+
Returns a list of input parameters that all reside on the target device.
|
| 880 |
+
"""
|
| 881 |
+
def ensure_device(parameter):
|
| 882 |
+
if self.is_device_array(parameter):
|
| 883 |
+
convert = self.as_device_array
|
| 884 |
+
else:
|
| 885 |
+
convert = self.to_device
|
| 886 |
+
|
| 887 |
+
return convert(parameter)
|
| 888 |
+
|
| 889 |
+
return [ensure_device(p) for p in self.inputs]
|
| 890 |
+
|
| 891 |
+
def post_process_outputs(self, outputs):
|
| 892 |
+
"""
|
| 893 |
+
Moves the given output(s) to the host if necessary.
|
| 894 |
+
|
| 895 |
+
Returns a single value (e.g. an array) if there was one output, or a
|
| 896 |
+
tuple of arrays if there were multiple. Although this feels a little
|
| 897 |
+
jarring, it is consistent with the behavior of GUFuncs in general.
|
| 898 |
+
"""
|
| 899 |
+
if self._copy_result_to_host:
|
| 900 |
+
outputs = [self.to_host(output, self_output)
|
| 901 |
+
for output, self_output in zip(outputs, self.outputs)]
|
| 902 |
+
elif self.outputs[0] is not None:
|
| 903 |
+
outputs = self.outputs
|
| 904 |
+
|
| 905 |
+
if len(outputs) == 1:
|
| 906 |
+
return outputs[0]
|
| 907 |
+
else:
|
| 908 |
+
return tuple(outputs)
|
lib/python3.10/site-packages/numba/cuda/dispatcher.py
ADDED
|
@@ -0,0 +1,1057 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import ctypes
|
| 5 |
+
import functools
|
| 6 |
+
|
| 7 |
+
from numba.core import config, serialize, sigutils, types, typing, utils
|
| 8 |
+
from numba.core.caching import Cache, CacheImpl
|
| 9 |
+
from numba.core.compiler_lock import global_compiler_lock
|
| 10 |
+
from numba.core.dispatcher import Dispatcher
|
| 11 |
+
from numba.core.errors import NumbaPerformanceWarning
|
| 12 |
+
from numba.core.typing.typeof import Purpose, typeof
|
| 13 |
+
|
| 14 |
+
from numba.cuda.api import get_current_device
|
| 15 |
+
from numba.cuda.args import wrap_arg
|
| 16 |
+
from numba.cuda.compiler import compile_cuda, CUDACompiler
|
| 17 |
+
from numba.cuda.cudadrv import driver
|
| 18 |
+
from numba.cuda.cudadrv.devices import get_context
|
| 19 |
+
from numba.cuda.descriptor import cuda_target
|
| 20 |
+
from numba.cuda.errors import (missing_launch_config_msg,
|
| 21 |
+
normalize_kernel_dimensions)
|
| 22 |
+
from numba.cuda import types as cuda_types
|
| 23 |
+
|
| 24 |
+
from numba import cuda
|
| 25 |
+
from numba import _dispatcher
|
| 26 |
+
|
| 27 |
+
from warnings import warn
|
| 28 |
+
|
| 29 |
+
cuda_fp16_math_funcs = ['hsin', 'hcos',
|
| 30 |
+
'hlog', 'hlog10',
|
| 31 |
+
'hlog2',
|
| 32 |
+
'hexp', 'hexp10',
|
| 33 |
+
'hexp2',
|
| 34 |
+
'hsqrt', 'hrsqrt',
|
| 35 |
+
'hfloor', 'hceil',
|
| 36 |
+
'hrcp', 'hrint',
|
| 37 |
+
'htrunc', 'hdiv']
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class _Kernel(serialize.ReduceMixin):
|
| 41 |
+
'''
|
| 42 |
+
CUDA Kernel specialized for a given set of argument types. When called, this
|
| 43 |
+
object launches the kernel on the device.
|
| 44 |
+
'''
|
| 45 |
+
|
| 46 |
+
@global_compiler_lock
|
| 47 |
+
def __init__(self, py_func, argtypes, link=None, debug=False,
|
| 48 |
+
lineinfo=False, inline=False, fastmath=False, extensions=None,
|
| 49 |
+
max_registers=None, opt=True, device=False):
|
| 50 |
+
|
| 51 |
+
if device:
|
| 52 |
+
raise RuntimeError('Cannot compile a device function as a kernel')
|
| 53 |
+
|
| 54 |
+
super().__init__()
|
| 55 |
+
|
| 56 |
+
# _DispatcherBase.nopython_signatures() expects this attribute to be
|
| 57 |
+
# present, because it assumes an overload is a CompileResult. In the
|
| 58 |
+
# CUDA target, _Kernel instances are stored instead, so we provide this
|
| 59 |
+
# attribute here to avoid duplicating nopython_signatures() in the CUDA
|
| 60 |
+
# target with slight modifications.
|
| 61 |
+
self.objectmode = False
|
| 62 |
+
|
| 63 |
+
# The finalizer constructed by _DispatcherBase._make_finalizer also
|
| 64 |
+
# expects overloads to be a CompileResult. It uses the entry_point to
|
| 65 |
+
# remove a CompileResult from a target context. However, since we never
|
| 66 |
+
# insert kernels into a target context (there is no need because they
|
| 67 |
+
# cannot be called by other functions, only through the dispatcher) it
|
| 68 |
+
# suffices to pretend we have an entry point of None.
|
| 69 |
+
self.entry_point = None
|
| 70 |
+
|
| 71 |
+
self.py_func = py_func
|
| 72 |
+
self.argtypes = argtypes
|
| 73 |
+
self.debug = debug
|
| 74 |
+
self.lineinfo = lineinfo
|
| 75 |
+
self.extensions = extensions or []
|
| 76 |
+
|
| 77 |
+
nvvm_options = {
|
| 78 |
+
'fastmath': fastmath,
|
| 79 |
+
'opt': 3 if opt else 0
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
cc = get_current_device().compute_capability
|
| 83 |
+
cres = compile_cuda(self.py_func, types.void, self.argtypes,
|
| 84 |
+
debug=self.debug,
|
| 85 |
+
lineinfo=lineinfo,
|
| 86 |
+
inline=inline,
|
| 87 |
+
fastmath=fastmath,
|
| 88 |
+
nvvm_options=nvvm_options,
|
| 89 |
+
cc=cc)
|
| 90 |
+
tgt_ctx = cres.target_context
|
| 91 |
+
code = self.py_func.__code__
|
| 92 |
+
filename = code.co_filename
|
| 93 |
+
linenum = code.co_firstlineno
|
| 94 |
+
lib, kernel = tgt_ctx.prepare_cuda_kernel(cres.library, cres.fndesc,
|
| 95 |
+
debug, lineinfo, nvvm_options,
|
| 96 |
+
filename, linenum,
|
| 97 |
+
max_registers)
|
| 98 |
+
|
| 99 |
+
if not link:
|
| 100 |
+
link = []
|
| 101 |
+
|
| 102 |
+
# A kernel needs cooperative launch if grid_sync is being used.
|
| 103 |
+
self.cooperative = 'cudaCGGetIntrinsicHandle' in lib.get_asm_str()
|
| 104 |
+
# We need to link against cudadevrt if grid sync is being used.
|
| 105 |
+
if self.cooperative:
|
| 106 |
+
lib.needs_cudadevrt = True
|
| 107 |
+
|
| 108 |
+
res = [fn for fn in cuda_fp16_math_funcs
|
| 109 |
+
if (f'__numba_wrapper_{fn}' in lib.get_asm_str())]
|
| 110 |
+
|
| 111 |
+
if res:
|
| 112 |
+
# Path to the source containing the foreign function
|
| 113 |
+
basedir = os.path.dirname(os.path.abspath(__file__))
|
| 114 |
+
functions_cu_path = os.path.join(basedir,
|
| 115 |
+
'cpp_function_wrappers.cu')
|
| 116 |
+
link.append(functions_cu_path)
|
| 117 |
+
|
| 118 |
+
for filepath in link:
|
| 119 |
+
lib.add_linking_file(filepath)
|
| 120 |
+
|
| 121 |
+
# populate members
|
| 122 |
+
self.entry_name = kernel.name
|
| 123 |
+
self.signature = cres.signature
|
| 124 |
+
self._type_annotation = cres.type_annotation
|
| 125 |
+
self._codelibrary = lib
|
| 126 |
+
self.call_helper = cres.call_helper
|
| 127 |
+
|
| 128 |
+
# The following are referred to by the cache implementation. Note:
|
| 129 |
+
# - There are no referenced environments in CUDA.
|
| 130 |
+
# - Kernels don't have lifted code.
|
| 131 |
+
# - reload_init is only for parfors.
|
| 132 |
+
self.target_context = tgt_ctx
|
| 133 |
+
self.fndesc = cres.fndesc
|
| 134 |
+
self.environment = cres.environment
|
| 135 |
+
self._referenced_environments = []
|
| 136 |
+
self.lifted = []
|
| 137 |
+
self.reload_init = []
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def library(self):
|
| 141 |
+
return self._codelibrary
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def type_annotation(self):
|
| 145 |
+
return self._type_annotation
|
| 146 |
+
|
| 147 |
+
def _find_referenced_environments(self):
|
| 148 |
+
return self._referenced_environments
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def codegen(self):
|
| 152 |
+
return self.target_context.codegen()
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def argument_types(self):
|
| 156 |
+
return tuple(self.signature.args)
|
| 157 |
+
|
| 158 |
+
@classmethod
|
| 159 |
+
def _rebuild(cls, cooperative, name, signature, codelibrary,
|
| 160 |
+
debug, lineinfo, call_helper, extensions):
|
| 161 |
+
"""
|
| 162 |
+
Rebuild an instance.
|
| 163 |
+
"""
|
| 164 |
+
instance = cls.__new__(cls)
|
| 165 |
+
# invoke parent constructor
|
| 166 |
+
super(cls, instance).__init__()
|
| 167 |
+
# populate members
|
| 168 |
+
instance.entry_point = None
|
| 169 |
+
instance.cooperative = cooperative
|
| 170 |
+
instance.entry_name = name
|
| 171 |
+
instance.signature = signature
|
| 172 |
+
instance._type_annotation = None
|
| 173 |
+
instance._codelibrary = codelibrary
|
| 174 |
+
instance.debug = debug
|
| 175 |
+
instance.lineinfo = lineinfo
|
| 176 |
+
instance.call_helper = call_helper
|
| 177 |
+
instance.extensions = extensions
|
| 178 |
+
return instance
|
| 179 |
+
|
| 180 |
+
def _reduce_states(self):
|
| 181 |
+
"""
|
| 182 |
+
Reduce the instance for serialization.
|
| 183 |
+
Compiled definitions are serialized in PTX form.
|
| 184 |
+
Type annotation are discarded.
|
| 185 |
+
Thread, block and shared memory configuration are serialized.
|
| 186 |
+
Stream information is discarded.
|
| 187 |
+
"""
|
| 188 |
+
return dict(cooperative=self.cooperative, name=self.entry_name,
|
| 189 |
+
signature=self.signature, codelibrary=self._codelibrary,
|
| 190 |
+
debug=self.debug, lineinfo=self.lineinfo,
|
| 191 |
+
call_helper=self.call_helper, extensions=self.extensions)
|
| 192 |
+
|
| 193 |
+
def bind(self):
|
| 194 |
+
"""
|
| 195 |
+
Force binding to current CUDA context
|
| 196 |
+
"""
|
| 197 |
+
self._codelibrary.get_cufunc()
|
| 198 |
+
|
| 199 |
+
@property
|
| 200 |
+
def regs_per_thread(self):
|
| 201 |
+
'''
|
| 202 |
+
The number of registers used by each thread for this kernel.
|
| 203 |
+
'''
|
| 204 |
+
return self._codelibrary.get_cufunc().attrs.regs
|
| 205 |
+
|
| 206 |
+
@property
|
| 207 |
+
def const_mem_size(self):
|
| 208 |
+
'''
|
| 209 |
+
The amount of constant memory used by this kernel.
|
| 210 |
+
'''
|
| 211 |
+
return self._codelibrary.get_cufunc().attrs.const
|
| 212 |
+
|
| 213 |
+
@property
|
| 214 |
+
def shared_mem_per_block(self):
|
| 215 |
+
'''
|
| 216 |
+
The amount of shared memory used per block for this kernel.
|
| 217 |
+
'''
|
| 218 |
+
return self._codelibrary.get_cufunc().attrs.shared
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def max_threads_per_block(self):
|
| 222 |
+
'''
|
| 223 |
+
The maximum allowable threads per block.
|
| 224 |
+
'''
|
| 225 |
+
return self._codelibrary.get_cufunc().attrs.maxthreads
|
| 226 |
+
|
| 227 |
+
@property
|
| 228 |
+
def local_mem_per_thread(self):
|
| 229 |
+
'''
|
| 230 |
+
The amount of local memory used per thread for this kernel.
|
| 231 |
+
'''
|
| 232 |
+
return self._codelibrary.get_cufunc().attrs.local
|
| 233 |
+
|
| 234 |
+
def inspect_llvm(self):
|
| 235 |
+
'''
|
| 236 |
+
Returns the LLVM IR for this kernel.
|
| 237 |
+
'''
|
| 238 |
+
return self._codelibrary.get_llvm_str()
|
| 239 |
+
|
| 240 |
+
def inspect_asm(self, cc):
|
| 241 |
+
'''
|
| 242 |
+
Returns the PTX code for this kernel.
|
| 243 |
+
'''
|
| 244 |
+
return self._codelibrary.get_asm_str(cc=cc)
|
| 245 |
+
|
| 246 |
+
def inspect_sass_cfg(self):
|
| 247 |
+
'''
|
| 248 |
+
Returns the CFG of the SASS for this kernel.
|
| 249 |
+
|
| 250 |
+
Requires nvdisasm to be available on the PATH.
|
| 251 |
+
'''
|
| 252 |
+
return self._codelibrary.get_sass_cfg()
|
| 253 |
+
|
| 254 |
+
def inspect_sass(self):
|
| 255 |
+
'''
|
| 256 |
+
Returns the SASS code for this kernel.
|
| 257 |
+
|
| 258 |
+
Requires nvdisasm to be available on the PATH.
|
| 259 |
+
'''
|
| 260 |
+
return self._codelibrary.get_sass()
|
| 261 |
+
|
| 262 |
+
def inspect_types(self, file=None):
|
| 263 |
+
'''
|
| 264 |
+
Produce a dump of the Python source of this function annotated with the
|
| 265 |
+
corresponding Numba IR and type information. The dump is written to
|
| 266 |
+
*file*, or *sys.stdout* if *file* is *None*.
|
| 267 |
+
'''
|
| 268 |
+
if self._type_annotation is None:
|
| 269 |
+
raise ValueError("Type annotation is not available")
|
| 270 |
+
|
| 271 |
+
if file is None:
|
| 272 |
+
file = sys.stdout
|
| 273 |
+
|
| 274 |
+
print("%s %s" % (self.entry_name, self.argument_types), file=file)
|
| 275 |
+
print('-' * 80, file=file)
|
| 276 |
+
print(self._type_annotation, file=file)
|
| 277 |
+
print('=' * 80, file=file)
|
| 278 |
+
|
| 279 |
+
def max_cooperative_grid_blocks(self, blockdim, dynsmemsize=0):
|
| 280 |
+
'''
|
| 281 |
+
Calculates the maximum number of blocks that can be launched for this
|
| 282 |
+
kernel in a cooperative grid in the current context, for the given block
|
| 283 |
+
and dynamic shared memory sizes.
|
| 284 |
+
|
| 285 |
+
:param blockdim: Block dimensions, either as a scalar for a 1D block, or
|
| 286 |
+
a tuple for 2D or 3D blocks.
|
| 287 |
+
:param dynsmemsize: Dynamic shared memory size in bytes.
|
| 288 |
+
:return: The maximum number of blocks in the grid.
|
| 289 |
+
'''
|
| 290 |
+
ctx = get_context()
|
| 291 |
+
cufunc = self._codelibrary.get_cufunc()
|
| 292 |
+
|
| 293 |
+
if isinstance(blockdim, tuple):
|
| 294 |
+
blockdim = functools.reduce(lambda x, y: x * y, blockdim)
|
| 295 |
+
active_per_sm = ctx.get_active_blocks_per_multiprocessor(cufunc,
|
| 296 |
+
blockdim,
|
| 297 |
+
dynsmemsize)
|
| 298 |
+
sm_count = ctx.device.MULTIPROCESSOR_COUNT
|
| 299 |
+
return active_per_sm * sm_count
|
| 300 |
+
|
| 301 |
+
def launch(self, args, griddim, blockdim, stream=0, sharedmem=0):
|
| 302 |
+
# Prepare kernel
|
| 303 |
+
cufunc = self._codelibrary.get_cufunc()
|
| 304 |
+
|
| 305 |
+
if self.debug:
|
| 306 |
+
excname = cufunc.name + "__errcode__"
|
| 307 |
+
excmem, excsz = cufunc.module.get_global_symbol(excname)
|
| 308 |
+
assert excsz == ctypes.sizeof(ctypes.c_int)
|
| 309 |
+
excval = ctypes.c_int()
|
| 310 |
+
excmem.memset(0, stream=stream)
|
| 311 |
+
|
| 312 |
+
# Prepare arguments
|
| 313 |
+
retr = [] # hold functors for writeback
|
| 314 |
+
|
| 315 |
+
kernelargs = []
|
| 316 |
+
for t, v in zip(self.argument_types, args):
|
| 317 |
+
self._prepare_args(t, v, stream, retr, kernelargs)
|
| 318 |
+
|
| 319 |
+
if driver.USE_NV_BINDING:
|
| 320 |
+
zero_stream = driver.binding.CUstream(0)
|
| 321 |
+
else:
|
| 322 |
+
zero_stream = None
|
| 323 |
+
|
| 324 |
+
stream_handle = stream and stream.handle or zero_stream
|
| 325 |
+
|
| 326 |
+
# Invoke kernel
|
| 327 |
+
driver.launch_kernel(cufunc.handle,
|
| 328 |
+
*griddim,
|
| 329 |
+
*blockdim,
|
| 330 |
+
sharedmem,
|
| 331 |
+
stream_handle,
|
| 332 |
+
kernelargs,
|
| 333 |
+
cooperative=self.cooperative)
|
| 334 |
+
|
| 335 |
+
if self.debug:
|
| 336 |
+
driver.device_to_host(ctypes.addressof(excval), excmem, excsz)
|
| 337 |
+
if excval.value != 0:
|
| 338 |
+
# An error occurred
|
| 339 |
+
def load_symbol(name):
|
| 340 |
+
mem, sz = cufunc.module.get_global_symbol("%s__%s__" %
|
| 341 |
+
(cufunc.name,
|
| 342 |
+
name))
|
| 343 |
+
val = ctypes.c_int()
|
| 344 |
+
driver.device_to_host(ctypes.addressof(val), mem, sz)
|
| 345 |
+
return val.value
|
| 346 |
+
|
| 347 |
+
tid = [load_symbol("tid" + i) for i in 'zyx']
|
| 348 |
+
ctaid = [load_symbol("ctaid" + i) for i in 'zyx']
|
| 349 |
+
code = excval.value
|
| 350 |
+
exccls, exc_args, loc = self.call_helper.get_exception(code)
|
| 351 |
+
# Prefix the exception message with the source location
|
| 352 |
+
if loc is None:
|
| 353 |
+
locinfo = ''
|
| 354 |
+
else:
|
| 355 |
+
sym, filepath, lineno = loc
|
| 356 |
+
filepath = os.path.abspath(filepath)
|
| 357 |
+
locinfo = 'In function %r, file %s, line %s, ' % (sym,
|
| 358 |
+
filepath,
|
| 359 |
+
lineno,)
|
| 360 |
+
# Prefix the exception message with the thread position
|
| 361 |
+
prefix = "%stid=%s ctaid=%s" % (locinfo, tid, ctaid)
|
| 362 |
+
if exc_args:
|
| 363 |
+
exc_args = ("%s: %s" % (prefix, exc_args[0]),) + \
|
| 364 |
+
exc_args[1:]
|
| 365 |
+
else:
|
| 366 |
+
exc_args = prefix,
|
| 367 |
+
raise exccls(*exc_args)
|
| 368 |
+
|
| 369 |
+
# retrieve auto converted arrays
|
| 370 |
+
for wb in retr:
|
| 371 |
+
wb()
|
| 372 |
+
|
| 373 |
+
def _prepare_args(self, ty, val, stream, retr, kernelargs):
|
| 374 |
+
"""
|
| 375 |
+
Convert arguments to ctypes and append to kernelargs
|
| 376 |
+
"""
|
| 377 |
+
|
| 378 |
+
# map the arguments using any extension you've registered
|
| 379 |
+
for extension in reversed(self.extensions):
|
| 380 |
+
ty, val = extension.prepare_args(
|
| 381 |
+
ty,
|
| 382 |
+
val,
|
| 383 |
+
stream=stream,
|
| 384 |
+
retr=retr)
|
| 385 |
+
|
| 386 |
+
if isinstance(ty, types.Array):
|
| 387 |
+
devary = wrap_arg(val).to_device(retr, stream)
|
| 388 |
+
|
| 389 |
+
c_intp = ctypes.c_ssize_t
|
| 390 |
+
|
| 391 |
+
meminfo = ctypes.c_void_p(0)
|
| 392 |
+
parent = ctypes.c_void_p(0)
|
| 393 |
+
nitems = c_intp(devary.size)
|
| 394 |
+
itemsize = c_intp(devary.dtype.itemsize)
|
| 395 |
+
|
| 396 |
+
ptr = driver.device_pointer(devary)
|
| 397 |
+
|
| 398 |
+
if driver.USE_NV_BINDING:
|
| 399 |
+
ptr = int(ptr)
|
| 400 |
+
|
| 401 |
+
data = ctypes.c_void_p(ptr)
|
| 402 |
+
|
| 403 |
+
kernelargs.append(meminfo)
|
| 404 |
+
kernelargs.append(parent)
|
| 405 |
+
kernelargs.append(nitems)
|
| 406 |
+
kernelargs.append(itemsize)
|
| 407 |
+
kernelargs.append(data)
|
| 408 |
+
for ax in range(devary.ndim):
|
| 409 |
+
kernelargs.append(c_intp(devary.shape[ax]))
|
| 410 |
+
for ax in range(devary.ndim):
|
| 411 |
+
kernelargs.append(c_intp(devary.strides[ax]))
|
| 412 |
+
|
| 413 |
+
elif isinstance(ty, types.Integer):
|
| 414 |
+
cval = getattr(ctypes, "c_%s" % ty)(val)
|
| 415 |
+
kernelargs.append(cval)
|
| 416 |
+
|
| 417 |
+
elif ty == types.float16:
|
| 418 |
+
cval = ctypes.c_uint16(np.float16(val).view(np.uint16))
|
| 419 |
+
kernelargs.append(cval)
|
| 420 |
+
|
| 421 |
+
elif ty == types.float64:
|
| 422 |
+
cval = ctypes.c_double(val)
|
| 423 |
+
kernelargs.append(cval)
|
| 424 |
+
|
| 425 |
+
elif ty == types.float32:
|
| 426 |
+
cval = ctypes.c_float(val)
|
| 427 |
+
kernelargs.append(cval)
|
| 428 |
+
|
| 429 |
+
elif ty == types.boolean:
|
| 430 |
+
cval = ctypes.c_uint8(int(val))
|
| 431 |
+
kernelargs.append(cval)
|
| 432 |
+
|
| 433 |
+
elif ty == types.complex64:
|
| 434 |
+
kernelargs.append(ctypes.c_float(val.real))
|
| 435 |
+
kernelargs.append(ctypes.c_float(val.imag))
|
| 436 |
+
|
| 437 |
+
elif ty == types.complex128:
|
| 438 |
+
kernelargs.append(ctypes.c_double(val.real))
|
| 439 |
+
kernelargs.append(ctypes.c_double(val.imag))
|
| 440 |
+
|
| 441 |
+
elif isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
|
| 442 |
+
kernelargs.append(ctypes.c_int64(val.view(np.int64)))
|
| 443 |
+
|
| 444 |
+
elif isinstance(ty, types.Record):
|
| 445 |
+
devrec = wrap_arg(val).to_device(retr, stream)
|
| 446 |
+
ptr = devrec.device_ctypes_pointer
|
| 447 |
+
if driver.USE_NV_BINDING:
|
| 448 |
+
ptr = ctypes.c_void_p(int(ptr))
|
| 449 |
+
kernelargs.append(ptr)
|
| 450 |
+
|
| 451 |
+
elif isinstance(ty, types.BaseTuple):
|
| 452 |
+
assert len(ty) == len(val)
|
| 453 |
+
for t, v in zip(ty, val):
|
| 454 |
+
self._prepare_args(t, v, stream, retr, kernelargs)
|
| 455 |
+
|
| 456 |
+
elif isinstance(ty, types.EnumMember):
|
| 457 |
+
try:
|
| 458 |
+
self._prepare_args(
|
| 459 |
+
ty.dtype, val.value, stream, retr, kernelargs
|
| 460 |
+
)
|
| 461 |
+
except NotImplementedError:
|
| 462 |
+
raise NotImplementedError(ty, val)
|
| 463 |
+
|
| 464 |
+
else:
|
| 465 |
+
raise NotImplementedError(ty, val)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
class ForAll(object):
|
| 469 |
+
def __init__(self, dispatcher, ntasks, tpb, stream, sharedmem):
|
| 470 |
+
if ntasks < 0:
|
| 471 |
+
raise ValueError("Can't create ForAll with negative task count: %s"
|
| 472 |
+
% ntasks)
|
| 473 |
+
self.dispatcher = dispatcher
|
| 474 |
+
self.ntasks = ntasks
|
| 475 |
+
self.thread_per_block = tpb
|
| 476 |
+
self.stream = stream
|
| 477 |
+
self.sharedmem = sharedmem
|
| 478 |
+
|
| 479 |
+
def __call__(self, *args):
|
| 480 |
+
if self.ntasks == 0:
|
| 481 |
+
return
|
| 482 |
+
|
| 483 |
+
if self.dispatcher.specialized:
|
| 484 |
+
specialized = self.dispatcher
|
| 485 |
+
else:
|
| 486 |
+
specialized = self.dispatcher.specialize(*args)
|
| 487 |
+
blockdim = self._compute_thread_per_block(specialized)
|
| 488 |
+
griddim = (self.ntasks + blockdim - 1) // blockdim
|
| 489 |
+
|
| 490 |
+
return specialized[griddim, blockdim, self.stream,
|
| 491 |
+
self.sharedmem](*args)
|
| 492 |
+
|
| 493 |
+
def _compute_thread_per_block(self, dispatcher):
|
| 494 |
+
tpb = self.thread_per_block
|
| 495 |
+
# Prefer user-specified config
|
| 496 |
+
if tpb != 0:
|
| 497 |
+
return tpb
|
| 498 |
+
# Else, ask the driver to give a good config
|
| 499 |
+
else:
|
| 500 |
+
ctx = get_context()
|
| 501 |
+
# Dispatcher is specialized, so there's only one definition - get
|
| 502 |
+
# it so we can get the cufunc from the code library
|
| 503 |
+
kernel = next(iter(dispatcher.overloads.values()))
|
| 504 |
+
kwargs = dict(
|
| 505 |
+
func=kernel._codelibrary.get_cufunc(),
|
| 506 |
+
b2d_func=0, # dynamic-shared memory is constant to blksz
|
| 507 |
+
memsize=self.sharedmem,
|
| 508 |
+
blocksizelimit=1024,
|
| 509 |
+
)
|
| 510 |
+
_, tpb = ctx.get_max_potential_block_size(**kwargs)
|
| 511 |
+
return tpb
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
class _LaunchConfiguration:
|
| 515 |
+
def __init__(self, dispatcher, griddim, blockdim, stream, sharedmem):
|
| 516 |
+
self.dispatcher = dispatcher
|
| 517 |
+
self.griddim = griddim
|
| 518 |
+
self.blockdim = blockdim
|
| 519 |
+
self.stream = stream
|
| 520 |
+
self.sharedmem = sharedmem
|
| 521 |
+
|
| 522 |
+
if config.CUDA_LOW_OCCUPANCY_WARNINGS:
|
| 523 |
+
# Warn when the grid has fewer than 128 blocks. This number is
|
| 524 |
+
# chosen somewhat heuristically - ideally the minimum is 2 times
|
| 525 |
+
# the number of SMs, but the number of SMs varies between devices -
|
| 526 |
+
# some very small GPUs might only have 4 SMs, but an H100-SXM5 has
|
| 527 |
+
# 132. In general kernels should be launched with large grids
|
| 528 |
+
# (hundreds or thousands of blocks), so warning when fewer than 128
|
| 529 |
+
# blocks are used will likely catch most beginner errors, where the
|
| 530 |
+
# grid tends to be very small (single-digit or low tens of blocks).
|
| 531 |
+
min_grid_size = 128
|
| 532 |
+
grid_size = griddim[0] * griddim[1] * griddim[2]
|
| 533 |
+
if grid_size < min_grid_size:
|
| 534 |
+
msg = (f"Grid size {grid_size} will likely result in GPU "
|
| 535 |
+
"under-utilization due to low occupancy.")
|
| 536 |
+
warn(NumbaPerformanceWarning(msg))
|
| 537 |
+
|
| 538 |
+
def __call__(self, *args):
|
| 539 |
+
return self.dispatcher.call(args, self.griddim, self.blockdim,
|
| 540 |
+
self.stream, self.sharedmem)
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
class CUDACacheImpl(CacheImpl):
|
| 544 |
+
def reduce(self, kernel):
|
| 545 |
+
return kernel._reduce_states()
|
| 546 |
+
|
| 547 |
+
def rebuild(self, target_context, payload):
|
| 548 |
+
return _Kernel._rebuild(**payload)
|
| 549 |
+
|
| 550 |
+
def check_cachable(self, cres):
|
| 551 |
+
# CUDA Kernels are always cachable - the reasons for an entity not to
|
| 552 |
+
# be cachable are:
|
| 553 |
+
#
|
| 554 |
+
# - The presence of lifted loops, or
|
| 555 |
+
# - The presence of dynamic globals.
|
| 556 |
+
#
|
| 557 |
+
# neither of which apply to CUDA kernels.
|
| 558 |
+
return True
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class CUDACache(Cache):
|
| 562 |
+
"""
|
| 563 |
+
Implements a cache that saves and loads CUDA kernels and compile results.
|
| 564 |
+
"""
|
| 565 |
+
_impl_class = CUDACacheImpl
|
| 566 |
+
|
| 567 |
+
def load_overload(self, sig, target_context):
|
| 568 |
+
# Loading an overload refreshes the context to ensure it is
|
| 569 |
+
# initialized. To initialize the correct (i.e. CUDA) target, we need to
|
| 570 |
+
# enforce that the current target is the CUDA target.
|
| 571 |
+
from numba.core.target_extension import target_override
|
| 572 |
+
with target_override('cuda'):
|
| 573 |
+
return super().load_overload(sig, target_context)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
class CUDADispatcher(Dispatcher, serialize.ReduceMixin):
|
| 577 |
+
'''
|
| 578 |
+
CUDA Dispatcher object. When configured and called, the dispatcher will
|
| 579 |
+
specialize itself for the given arguments (if no suitable specialized
|
| 580 |
+
version already exists) & compute capability, and launch on the device
|
| 581 |
+
associated with the current context.
|
| 582 |
+
|
| 583 |
+
Dispatcher objects are not to be constructed by the user, but instead are
|
| 584 |
+
created using the :func:`numba.cuda.jit` decorator.
|
| 585 |
+
'''
|
| 586 |
+
|
| 587 |
+
# Whether to fold named arguments and default values. Default values are
|
| 588 |
+
# presently unsupported on CUDA, so we can leave this as False in all
|
| 589 |
+
# cases.
|
| 590 |
+
_fold_args = False
|
| 591 |
+
|
| 592 |
+
targetdescr = cuda_target
|
| 593 |
+
|
| 594 |
+
def __init__(self, py_func, targetoptions, pipeline_class=CUDACompiler):
|
| 595 |
+
super().__init__(py_func, targetoptions=targetoptions,
|
| 596 |
+
pipeline_class=pipeline_class)
|
| 597 |
+
|
| 598 |
+
# The following properties are for specialization of CUDADispatchers. A
|
| 599 |
+
# specialized CUDADispatcher is one that is compiled for exactly one
|
| 600 |
+
# set of argument types, and bypasses some argument type checking for
|
| 601 |
+
# faster kernel launches.
|
| 602 |
+
|
| 603 |
+
# Is this a specialized dispatcher?
|
| 604 |
+
self._specialized = False
|
| 605 |
+
|
| 606 |
+
# If we produced specialized dispatchers, we cache them for each set of
|
| 607 |
+
# argument types
|
| 608 |
+
self.specializations = {}
|
| 609 |
+
|
| 610 |
+
@property
|
| 611 |
+
def _numba_type_(self):
|
| 612 |
+
return cuda_types.CUDADispatcher(self)
|
| 613 |
+
|
| 614 |
+
def enable_caching(self):
|
| 615 |
+
self._cache = CUDACache(self.py_func)
|
| 616 |
+
|
| 617 |
+
@functools.lru_cache(maxsize=128)
|
| 618 |
+
def configure(self, griddim, blockdim, stream=0, sharedmem=0):
|
| 619 |
+
griddim, blockdim = normalize_kernel_dimensions(griddim, blockdim)
|
| 620 |
+
return _LaunchConfiguration(self, griddim, blockdim, stream, sharedmem)
|
| 621 |
+
|
| 622 |
+
def __getitem__(self, args):
|
| 623 |
+
if len(args) not in [2, 3, 4]:
|
| 624 |
+
raise ValueError('must specify at least the griddim and blockdim')
|
| 625 |
+
return self.configure(*args)
|
| 626 |
+
|
| 627 |
+
def forall(self, ntasks, tpb=0, stream=0, sharedmem=0):
|
| 628 |
+
"""Returns a 1D-configured dispatcher for a given number of tasks.
|
| 629 |
+
|
| 630 |
+
This assumes that:
|
| 631 |
+
|
| 632 |
+
- the kernel maps the Global Thread ID ``cuda.grid(1)`` to tasks on a
|
| 633 |
+
1-1 basis.
|
| 634 |
+
- the kernel checks that the Global Thread ID is upper-bounded by
|
| 635 |
+
``ntasks``, and does nothing if it is not.
|
| 636 |
+
|
| 637 |
+
:param ntasks: The number of tasks.
|
| 638 |
+
:param tpb: The size of a block. An appropriate value is chosen if this
|
| 639 |
+
parameter is not supplied.
|
| 640 |
+
:param stream: The stream on which the configured dispatcher will be
|
| 641 |
+
launched.
|
| 642 |
+
:param sharedmem: The number of bytes of dynamic shared memory required
|
| 643 |
+
by the kernel.
|
| 644 |
+
:return: A configured dispatcher, ready to launch on a set of
|
| 645 |
+
arguments."""
|
| 646 |
+
|
| 647 |
+
return ForAll(self, ntasks, tpb=tpb, stream=stream, sharedmem=sharedmem)
|
| 648 |
+
|
| 649 |
+
@property
|
| 650 |
+
def extensions(self):
|
| 651 |
+
'''
|
| 652 |
+
A list of objects that must have a `prepare_args` function. When a
|
| 653 |
+
specialized kernel is called, each argument will be passed through
|
| 654 |
+
to the `prepare_args` (from the last object in this list to the
|
| 655 |
+
first). The arguments to `prepare_args` are:
|
| 656 |
+
|
| 657 |
+
- `ty` the numba type of the argument
|
| 658 |
+
- `val` the argument value itself
|
| 659 |
+
- `stream` the CUDA stream used for the current call to the kernel
|
| 660 |
+
- `retr` a list of zero-arg functions that you may want to append
|
| 661 |
+
post-call cleanup work to.
|
| 662 |
+
|
| 663 |
+
The `prepare_args` function must return a tuple `(ty, val)`, which
|
| 664 |
+
will be passed in turn to the next right-most `extension`. After all
|
| 665 |
+
the extensions have been called, the resulting `(ty, val)` will be
|
| 666 |
+
passed into Numba's default argument marshalling logic.
|
| 667 |
+
'''
|
| 668 |
+
return self.targetoptions.get('extensions')
|
| 669 |
+
|
| 670 |
+
def __call__(self, *args, **kwargs):
|
| 671 |
+
# An attempt to launch an unconfigured kernel
|
| 672 |
+
raise ValueError(missing_launch_config_msg)
|
| 673 |
+
|
| 674 |
+
def call(self, args, griddim, blockdim, stream, sharedmem):
|
| 675 |
+
'''
|
| 676 |
+
Compile if necessary and invoke this kernel with *args*.
|
| 677 |
+
'''
|
| 678 |
+
if self.specialized:
|
| 679 |
+
kernel = next(iter(self.overloads.values()))
|
| 680 |
+
else:
|
| 681 |
+
kernel = _dispatcher.Dispatcher._cuda_call(self, *args)
|
| 682 |
+
|
| 683 |
+
kernel.launch(args, griddim, blockdim, stream, sharedmem)
|
| 684 |
+
|
| 685 |
+
def _compile_for_args(self, *args, **kws):
|
| 686 |
+
# Based on _DispatcherBase._compile_for_args.
|
| 687 |
+
assert not kws
|
| 688 |
+
argtypes = [self.typeof_pyval(a) for a in args]
|
| 689 |
+
return self.compile(tuple(argtypes))
|
| 690 |
+
|
| 691 |
+
def typeof_pyval(self, val):
|
| 692 |
+
# Based on _DispatcherBase.typeof_pyval, but differs from it to support
|
| 693 |
+
# the CUDA Array Interface.
|
| 694 |
+
try:
|
| 695 |
+
return typeof(val, Purpose.argument)
|
| 696 |
+
except ValueError:
|
| 697 |
+
if cuda.is_cuda_array(val):
|
| 698 |
+
# When typing, we don't need to synchronize on the array's
|
| 699 |
+
# stream - this is done when the kernel is launched.
|
| 700 |
+
return typeof(cuda.as_cuda_array(val, sync=False),
|
| 701 |
+
Purpose.argument)
|
| 702 |
+
else:
|
| 703 |
+
raise
|
| 704 |
+
|
| 705 |
+
def specialize(self, *args):
|
| 706 |
+
'''
|
| 707 |
+
Create a new instance of this dispatcher specialized for the given
|
| 708 |
+
*args*.
|
| 709 |
+
'''
|
| 710 |
+
if self.specialized:
|
| 711 |
+
raise RuntimeError('Dispatcher already specialized')
|
| 712 |
+
|
| 713 |
+
cc = get_current_device().compute_capability
|
| 714 |
+
argtypes = tuple(self.typeof_pyval(a) for a in args)
|
| 715 |
+
|
| 716 |
+
specialization = self.specializations.get((cc, argtypes))
|
| 717 |
+
if specialization:
|
| 718 |
+
return specialization
|
| 719 |
+
|
| 720 |
+
targetoptions = self.targetoptions
|
| 721 |
+
specialization = CUDADispatcher(self.py_func,
|
| 722 |
+
targetoptions=targetoptions)
|
| 723 |
+
specialization.compile(argtypes)
|
| 724 |
+
specialization.disable_compile()
|
| 725 |
+
specialization._specialized = True
|
| 726 |
+
self.specializations[cc, argtypes] = specialization
|
| 727 |
+
return specialization
|
| 728 |
+
|
| 729 |
+
@property
|
| 730 |
+
def specialized(self):
|
| 731 |
+
"""
|
| 732 |
+
True if the Dispatcher has been specialized.
|
| 733 |
+
"""
|
| 734 |
+
return self._specialized
|
| 735 |
+
|
| 736 |
+
def get_regs_per_thread(self, signature=None):
|
| 737 |
+
'''
|
| 738 |
+
Returns the number of registers used by each thread in this kernel for
|
| 739 |
+
the device in the current context.
|
| 740 |
+
|
| 741 |
+
:param signature: The signature of the compiled kernel to get register
|
| 742 |
+
usage for. This may be omitted for a specialized
|
| 743 |
+
kernel.
|
| 744 |
+
:return: The number of registers used by the compiled variant of the
|
| 745 |
+
kernel for the given signature and current device.
|
| 746 |
+
'''
|
| 747 |
+
if signature is not None:
|
| 748 |
+
return self.overloads[signature.args].regs_per_thread
|
| 749 |
+
if self.specialized:
|
| 750 |
+
return next(iter(self.overloads.values())).regs_per_thread
|
| 751 |
+
else:
|
| 752 |
+
return {sig: overload.regs_per_thread
|
| 753 |
+
for sig, overload in self.overloads.items()}
|
| 754 |
+
|
| 755 |
+
def get_const_mem_size(self, signature=None):
|
| 756 |
+
'''
|
| 757 |
+
Returns the size in bytes of constant memory used by this kernel for
|
| 758 |
+
the device in the current context.
|
| 759 |
+
|
| 760 |
+
:param signature: The signature of the compiled kernel to get constant
|
| 761 |
+
memory usage for. This may be omitted for a
|
| 762 |
+
specialized kernel.
|
| 763 |
+
:return: The size in bytes of constant memory allocated by the
|
| 764 |
+
compiled variant of the kernel for the given signature and
|
| 765 |
+
current device.
|
| 766 |
+
'''
|
| 767 |
+
if signature is not None:
|
| 768 |
+
return self.overloads[signature.args].const_mem_size
|
| 769 |
+
if self.specialized:
|
| 770 |
+
return next(iter(self.overloads.values())).const_mem_size
|
| 771 |
+
else:
|
| 772 |
+
return {sig: overload.const_mem_size
|
| 773 |
+
for sig, overload in self.overloads.items()}
|
| 774 |
+
|
| 775 |
+
def get_shared_mem_per_block(self, signature=None):
|
| 776 |
+
'''
|
| 777 |
+
Returns the size in bytes of statically allocated shared memory
|
| 778 |
+
for this kernel.
|
| 779 |
+
|
| 780 |
+
:param signature: The signature of the compiled kernel to get shared
|
| 781 |
+
memory usage for. This may be omitted for a
|
| 782 |
+
specialized kernel.
|
| 783 |
+
:return: The amount of shared memory allocated by the compiled variant
|
| 784 |
+
of the kernel for the given signature and current device.
|
| 785 |
+
'''
|
| 786 |
+
if signature is not None:
|
| 787 |
+
return self.overloads[signature.args].shared_mem_per_block
|
| 788 |
+
if self.specialized:
|
| 789 |
+
return next(iter(self.overloads.values())).shared_mem_per_block
|
| 790 |
+
else:
|
| 791 |
+
return {sig: overload.shared_mem_per_block
|
| 792 |
+
for sig, overload in self.overloads.items()}
|
| 793 |
+
|
| 794 |
+
def get_max_threads_per_block(self, signature=None):
|
| 795 |
+
'''
|
| 796 |
+
Returns the maximum allowable number of threads per block
|
| 797 |
+
for this kernel. Exceeding this threshold will result in
|
| 798 |
+
the kernel failing to launch.
|
| 799 |
+
|
| 800 |
+
:param signature: The signature of the compiled kernel to get the max
|
| 801 |
+
threads per block for. This may be omitted for a
|
| 802 |
+
specialized kernel.
|
| 803 |
+
:return: The maximum allowable threads per block for the compiled
|
| 804 |
+
variant of the kernel for the given signature and current
|
| 805 |
+
device.
|
| 806 |
+
'''
|
| 807 |
+
if signature is not None:
|
| 808 |
+
return self.overloads[signature.args].max_threads_per_block
|
| 809 |
+
if self.specialized:
|
| 810 |
+
return next(iter(self.overloads.values())).max_threads_per_block
|
| 811 |
+
else:
|
| 812 |
+
return {sig: overload.max_threads_per_block
|
| 813 |
+
for sig, overload in self.overloads.items()}
|
| 814 |
+
|
| 815 |
+
def get_local_mem_per_thread(self, signature=None):
|
| 816 |
+
'''
|
| 817 |
+
Returns the size in bytes of local memory per thread
|
| 818 |
+
for this kernel.
|
| 819 |
+
|
| 820 |
+
:param signature: The signature of the compiled kernel to get local
|
| 821 |
+
memory usage for. This may be omitted for a
|
| 822 |
+
specialized kernel.
|
| 823 |
+
:return: The amount of local memory allocated by the compiled variant
|
| 824 |
+
of the kernel for the given signature and current device.
|
| 825 |
+
'''
|
| 826 |
+
if signature is not None:
|
| 827 |
+
return self.overloads[signature.args].local_mem_per_thread
|
| 828 |
+
if self.specialized:
|
| 829 |
+
return next(iter(self.overloads.values())).local_mem_per_thread
|
| 830 |
+
else:
|
| 831 |
+
return {sig: overload.local_mem_per_thread
|
| 832 |
+
for sig, overload in self.overloads.items()}
|
| 833 |
+
|
| 834 |
+
def get_call_template(self, args, kws):
|
| 835 |
+
# Originally copied from _DispatcherBase.get_call_template. This
|
| 836 |
+
# version deviates slightly from the _DispatcherBase version in order
|
| 837 |
+
# to force casts when calling device functions. See e.g.
|
| 838 |
+
# TestDeviceFunc.test_device_casting, added in PR #7496.
|
| 839 |
+
"""
|
| 840 |
+
Get a typing.ConcreteTemplate for this dispatcher and the given
|
| 841 |
+
*args* and *kws* types. This allows resolution of the return type.
|
| 842 |
+
|
| 843 |
+
A (template, pysig, args, kws) tuple is returned.
|
| 844 |
+
"""
|
| 845 |
+
# Ensure an exactly-matching overload is available if we can
|
| 846 |
+
# compile. We proceed with the typing even if we can't compile
|
| 847 |
+
# because we may be able to force a cast on the caller side.
|
| 848 |
+
if self._can_compile:
|
| 849 |
+
self.compile_device(tuple(args))
|
| 850 |
+
|
| 851 |
+
# Create function type for typing
|
| 852 |
+
func_name = self.py_func.__name__
|
| 853 |
+
name = "CallTemplate({0})".format(func_name)
|
| 854 |
+
|
| 855 |
+
call_template = typing.make_concrete_template(
|
| 856 |
+
name, key=func_name, signatures=self.nopython_signatures)
|
| 857 |
+
pysig = utils.pysignature(self.py_func)
|
| 858 |
+
|
| 859 |
+
return call_template, pysig, args, kws
|
| 860 |
+
|
| 861 |
+
def compile_device(self, args, return_type=None):
|
| 862 |
+
"""Compile the device function for the given argument types.
|
| 863 |
+
|
| 864 |
+
Each signature is compiled once by caching the compiled function inside
|
| 865 |
+
this object.
|
| 866 |
+
|
| 867 |
+
Returns the `CompileResult`.
|
| 868 |
+
"""
|
| 869 |
+
if args not in self.overloads:
|
| 870 |
+
with self._compiling_counter:
|
| 871 |
+
|
| 872 |
+
debug = self.targetoptions.get('debug')
|
| 873 |
+
lineinfo = self.targetoptions.get('lineinfo')
|
| 874 |
+
inline = self.targetoptions.get('inline')
|
| 875 |
+
fastmath = self.targetoptions.get('fastmath')
|
| 876 |
+
|
| 877 |
+
nvvm_options = {
|
| 878 |
+
'opt': 3 if self.targetoptions.get('opt') else 0,
|
| 879 |
+
'fastmath': fastmath
|
| 880 |
+
}
|
| 881 |
+
|
| 882 |
+
cc = get_current_device().compute_capability
|
| 883 |
+
cres = compile_cuda(self.py_func, return_type, args,
|
| 884 |
+
debug=debug,
|
| 885 |
+
lineinfo=lineinfo,
|
| 886 |
+
inline=inline,
|
| 887 |
+
fastmath=fastmath,
|
| 888 |
+
nvvm_options=nvvm_options,
|
| 889 |
+
cc=cc)
|
| 890 |
+
self.overloads[args] = cres
|
| 891 |
+
|
| 892 |
+
cres.target_context.insert_user_function(cres.entry_point,
|
| 893 |
+
cres.fndesc,
|
| 894 |
+
[cres.library])
|
| 895 |
+
else:
|
| 896 |
+
cres = self.overloads[args]
|
| 897 |
+
|
| 898 |
+
return cres
|
| 899 |
+
|
| 900 |
+
def add_overload(self, kernel, argtypes):
|
| 901 |
+
c_sig = [a._code for a in argtypes]
|
| 902 |
+
self._insert(c_sig, kernel, cuda=True)
|
| 903 |
+
self.overloads[argtypes] = kernel
|
| 904 |
+
|
| 905 |
+
def compile(self, sig):
|
| 906 |
+
'''
|
| 907 |
+
Compile and bind to the current context a version of this kernel
|
| 908 |
+
specialized for the given signature.
|
| 909 |
+
'''
|
| 910 |
+
argtypes, return_type = sigutils.normalize_signature(sig)
|
| 911 |
+
assert return_type is None or return_type == types.none
|
| 912 |
+
|
| 913 |
+
# Do we already have an in-memory compiled kernel?
|
| 914 |
+
if self.specialized:
|
| 915 |
+
return next(iter(self.overloads.values()))
|
| 916 |
+
else:
|
| 917 |
+
kernel = self.overloads.get(argtypes)
|
| 918 |
+
if kernel is not None:
|
| 919 |
+
return kernel
|
| 920 |
+
|
| 921 |
+
# Can we load from the disk cache?
|
| 922 |
+
kernel = self._cache.load_overload(sig, self.targetctx)
|
| 923 |
+
|
| 924 |
+
if kernel is not None:
|
| 925 |
+
self._cache_hits[sig] += 1
|
| 926 |
+
else:
|
| 927 |
+
# We need to compile a new kernel
|
| 928 |
+
self._cache_misses[sig] += 1
|
| 929 |
+
if not self._can_compile:
|
| 930 |
+
raise RuntimeError("Compilation disabled")
|
| 931 |
+
|
| 932 |
+
kernel = _Kernel(self.py_func, argtypes, **self.targetoptions)
|
| 933 |
+
# We call bind to force codegen, so that there is a cubin to cache
|
| 934 |
+
kernel.bind()
|
| 935 |
+
self._cache.save_overload(sig, kernel)
|
| 936 |
+
|
| 937 |
+
self.add_overload(kernel, argtypes)
|
| 938 |
+
|
| 939 |
+
return kernel
|
| 940 |
+
|
| 941 |
+
def inspect_llvm(self, signature=None):
|
| 942 |
+
'''
|
| 943 |
+
Return the LLVM IR for this kernel.
|
| 944 |
+
|
| 945 |
+
:param signature: A tuple of argument types.
|
| 946 |
+
:return: The LLVM IR for the given signature, or a dict of LLVM IR
|
| 947 |
+
for all previously-encountered signatures.
|
| 948 |
+
|
| 949 |
+
'''
|
| 950 |
+
device = self.targetoptions.get('device')
|
| 951 |
+
if signature is not None:
|
| 952 |
+
if device:
|
| 953 |
+
return self.overloads[signature].library.get_llvm_str()
|
| 954 |
+
else:
|
| 955 |
+
return self.overloads[signature].inspect_llvm()
|
| 956 |
+
else:
|
| 957 |
+
if device:
|
| 958 |
+
return {sig: overload.library.get_llvm_str()
|
| 959 |
+
for sig, overload in self.overloads.items()}
|
| 960 |
+
else:
|
| 961 |
+
return {sig: overload.inspect_llvm()
|
| 962 |
+
for sig, overload in self.overloads.items()}
|
| 963 |
+
|
| 964 |
+
def inspect_asm(self, signature=None):
|
| 965 |
+
'''
|
| 966 |
+
Return this kernel's PTX assembly code for for the device in the
|
| 967 |
+
current context.
|
| 968 |
+
|
| 969 |
+
:param signature: A tuple of argument types.
|
| 970 |
+
:return: The PTX code for the given signature, or a dict of PTX codes
|
| 971 |
+
for all previously-encountered signatures.
|
| 972 |
+
'''
|
| 973 |
+
cc = get_current_device().compute_capability
|
| 974 |
+
device = self.targetoptions.get('device')
|
| 975 |
+
if signature is not None:
|
| 976 |
+
if device:
|
| 977 |
+
return self.overloads[signature].library.get_asm_str(cc)
|
| 978 |
+
else:
|
| 979 |
+
return self.overloads[signature].inspect_asm(cc)
|
| 980 |
+
else:
|
| 981 |
+
if device:
|
| 982 |
+
return {sig: overload.library.get_asm_str(cc)
|
| 983 |
+
for sig, overload in self.overloads.items()}
|
| 984 |
+
else:
|
| 985 |
+
return {sig: overload.inspect_asm(cc)
|
| 986 |
+
for sig, overload in self.overloads.items()}
|
| 987 |
+
|
| 988 |
+
def inspect_sass_cfg(self, signature=None):
|
| 989 |
+
'''
|
| 990 |
+
Return this kernel's CFG for the device in the current context.
|
| 991 |
+
|
| 992 |
+
:param signature: A tuple of argument types.
|
| 993 |
+
:return: The CFG for the given signature, or a dict of CFGs
|
| 994 |
+
for all previously-encountered signatures.
|
| 995 |
+
|
| 996 |
+
The CFG for the device in the current context is returned.
|
| 997 |
+
|
| 998 |
+
Requires nvdisasm to be available on the PATH.
|
| 999 |
+
'''
|
| 1000 |
+
if self.targetoptions.get('device'):
|
| 1001 |
+
raise RuntimeError('Cannot get the CFG of a device function')
|
| 1002 |
+
|
| 1003 |
+
if signature is not None:
|
| 1004 |
+
return self.overloads[signature].inspect_sass_cfg()
|
| 1005 |
+
else:
|
| 1006 |
+
return {sig: defn.inspect_sass_cfg()
|
| 1007 |
+
for sig, defn in self.overloads.items()}
|
| 1008 |
+
|
| 1009 |
+
def inspect_sass(self, signature=None):
|
| 1010 |
+
'''
|
| 1011 |
+
Return this kernel's SASS assembly code for for the device in the
|
| 1012 |
+
current context.
|
| 1013 |
+
|
| 1014 |
+
:param signature: A tuple of argument types.
|
| 1015 |
+
:return: The SASS code for the given signature, or a dict of SASS codes
|
| 1016 |
+
for all previously-encountered signatures.
|
| 1017 |
+
|
| 1018 |
+
SASS for the device in the current context is returned.
|
| 1019 |
+
|
| 1020 |
+
Requires nvdisasm to be available on the PATH.
|
| 1021 |
+
'''
|
| 1022 |
+
if self.targetoptions.get('device'):
|
| 1023 |
+
raise RuntimeError('Cannot inspect SASS of a device function')
|
| 1024 |
+
|
| 1025 |
+
if signature is not None:
|
| 1026 |
+
return self.overloads[signature].inspect_sass()
|
| 1027 |
+
else:
|
| 1028 |
+
return {sig: defn.inspect_sass()
|
| 1029 |
+
for sig, defn in self.overloads.items()}
|
| 1030 |
+
|
| 1031 |
+
def inspect_types(self, file=None):
|
| 1032 |
+
'''
|
| 1033 |
+
Produce a dump of the Python source of this function annotated with the
|
| 1034 |
+
corresponding Numba IR and type information. The dump is written to
|
| 1035 |
+
*file*, or *sys.stdout* if *file* is *None*.
|
| 1036 |
+
'''
|
| 1037 |
+
if file is None:
|
| 1038 |
+
file = sys.stdout
|
| 1039 |
+
|
| 1040 |
+
for _, defn in self.overloads.items():
|
| 1041 |
+
defn.inspect_types(file=file)
|
| 1042 |
+
|
| 1043 |
+
@classmethod
|
| 1044 |
+
def _rebuild(cls, py_func, targetoptions):
|
| 1045 |
+
"""
|
| 1046 |
+
Rebuild an instance.
|
| 1047 |
+
"""
|
| 1048 |
+
instance = cls(py_func, targetoptions)
|
| 1049 |
+
return instance
|
| 1050 |
+
|
| 1051 |
+
def _reduce_states(self):
|
| 1052 |
+
"""
|
| 1053 |
+
Reduce the instance for serialization.
|
| 1054 |
+
Compiled definitions are discarded.
|
| 1055 |
+
"""
|
| 1056 |
+
return dict(py_func=self.py_func,
|
| 1057 |
+
targetoptions=self.targetoptions)
|
lib/python3.10/site-packages/numba/cuda/errors.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numbers
|
| 2 |
+
from numba.core.errors import LoweringError
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class KernelRuntimeError(RuntimeError):
|
| 6 |
+
def __init__(self, msg, tid=None, ctaid=None):
|
| 7 |
+
self.tid = tid
|
| 8 |
+
self.ctaid = ctaid
|
| 9 |
+
self.msg = msg
|
| 10 |
+
t = ("An exception was raised in thread=%s block=%s\n"
|
| 11 |
+
"\t%s")
|
| 12 |
+
msg = t % (self.tid, self.ctaid, self.msg)
|
| 13 |
+
super(KernelRuntimeError, self).__init__(msg)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class CudaLoweringError(LoweringError):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_launch_help_url = ("https://numba.readthedocs.io/en/stable/cuda/"
|
| 21 |
+
"kernels.html#kernel-invocation")
|
| 22 |
+
missing_launch_config_msg = """
|
| 23 |
+
Kernel launch configuration was not specified. Use the syntax:
|
| 24 |
+
|
| 25 |
+
kernel_function[blockspergrid, threadsperblock](arg0, arg1, ..., argn)
|
| 26 |
+
|
| 27 |
+
See {} for help.
|
| 28 |
+
|
| 29 |
+
""".format(_launch_help_url)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def normalize_kernel_dimensions(griddim, blockdim):
|
| 33 |
+
"""
|
| 34 |
+
Normalize and validate the user-supplied kernel dimensions.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def check_dim(dim, name):
|
| 38 |
+
if not isinstance(dim, (tuple, list)):
|
| 39 |
+
dim = [dim]
|
| 40 |
+
else:
|
| 41 |
+
dim = list(dim)
|
| 42 |
+
if len(dim) > 3:
|
| 43 |
+
raise ValueError('%s must be a sequence of 1, 2 or 3 integers, '
|
| 44 |
+
'got %r' % (name, dim))
|
| 45 |
+
for v in dim:
|
| 46 |
+
if not isinstance(v, numbers.Integral):
|
| 47 |
+
raise TypeError('%s must be a sequence of integers, got %r'
|
| 48 |
+
% (name, dim))
|
| 49 |
+
while len(dim) < 3:
|
| 50 |
+
dim.append(1)
|
| 51 |
+
return tuple(dim)
|
| 52 |
+
|
| 53 |
+
if None in (griddim, blockdim):
|
| 54 |
+
raise ValueError(missing_launch_config_msg)
|
| 55 |
+
|
| 56 |
+
griddim = check_dim(griddim, 'griddim')
|
| 57 |
+
blockdim = check_dim(blockdim, 'blockdim')
|
| 58 |
+
|
| 59 |
+
return griddim, blockdim
|
lib/python3.10/site-packages/numba/cuda/extending.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Added for symmetry with the core API
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from numba.core.extending import intrinsic as _intrinsic
|
| 6 |
+
|
| 7 |
+
intrinsic = _intrinsic(target='cuda')
|
lib/python3.10/site-packages/numba/cuda/initialize.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def initialize_all():
|
| 2 |
+
# Import models to register them with the data model manager
|
| 3 |
+
import numba.cuda.models # noqa: F401
|
| 4 |
+
|
| 5 |
+
from numba.cuda.decorators import jit
|
| 6 |
+
from numba.cuda.dispatcher import CUDADispatcher
|
| 7 |
+
from numba.core.target_extension import (target_registry,
|
| 8 |
+
dispatcher_registry,
|
| 9 |
+
jit_registry)
|
| 10 |
+
|
| 11 |
+
cuda_target = target_registry["cuda"]
|
| 12 |
+
jit_registry[cuda_target] = jit
|
| 13 |
+
dispatcher_registry[cuda_target] = CUDADispatcher
|
lib/python3.10/site-packages/numba/cuda/intrinsic_wrapper.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .decorators import jit
|
| 2 |
+
import numba
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@jit(device=True)
|
| 6 |
+
def all_sync(mask, predicate):
|
| 7 |
+
"""
|
| 8 |
+
If for all threads in the masked warp the predicate is true, then
|
| 9 |
+
a non-zero value is returned, otherwise 0 is returned.
|
| 10 |
+
"""
|
| 11 |
+
return numba.cuda.vote_sync_intrinsic(mask, 0, predicate)[1]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@jit(device=True)
|
| 15 |
+
def any_sync(mask, predicate):
|
| 16 |
+
"""
|
| 17 |
+
If for any thread in the masked warp the predicate is true, then
|
| 18 |
+
a non-zero value is returned, otherwise 0 is returned.
|
| 19 |
+
"""
|
| 20 |
+
return numba.cuda.vote_sync_intrinsic(mask, 1, predicate)[1]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@jit(device=True)
|
| 24 |
+
def eq_sync(mask, predicate):
|
| 25 |
+
"""
|
| 26 |
+
If for all threads in the masked warp the boolean predicate is the same,
|
| 27 |
+
then a non-zero value is returned, otherwise 0 is returned.
|
| 28 |
+
"""
|
| 29 |
+
return numba.cuda.vote_sync_intrinsic(mask, 2, predicate)[1]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@jit(device=True)
|
| 33 |
+
def ballot_sync(mask, predicate):
|
| 34 |
+
"""
|
| 35 |
+
Returns a mask of all threads in the warp whose predicate is true,
|
| 36 |
+
and are within the given mask.
|
| 37 |
+
"""
|
| 38 |
+
return numba.cuda.vote_sync_intrinsic(mask, 3, predicate)[0]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@jit(device=True)
|
| 42 |
+
def shfl_sync(mask, value, src_lane):
|
| 43 |
+
"""
|
| 44 |
+
Shuffles value across the masked warp and returns the value
|
| 45 |
+
from src_lane. If this is outside the warp, then the
|
| 46 |
+
given value is returned.
|
| 47 |
+
"""
|
| 48 |
+
return numba.cuda.shfl_sync_intrinsic(mask, 0, value, src_lane, 0x1f)[0]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@jit(device=True)
|
| 52 |
+
def shfl_up_sync(mask, value, delta):
|
| 53 |
+
"""
|
| 54 |
+
Shuffles value across the masked warp and returns the value
|
| 55 |
+
from (laneid - delta). If this is outside the warp, then the
|
| 56 |
+
given value is returned.
|
| 57 |
+
"""
|
| 58 |
+
return numba.cuda.shfl_sync_intrinsic(mask, 1, value, delta, 0)[0]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@jit(device=True)
|
| 62 |
+
def shfl_down_sync(mask, value, delta):
|
| 63 |
+
"""
|
| 64 |
+
Shuffles value across the masked warp and returns the value
|
| 65 |
+
from (laneid + delta). If this is outside the warp, then the
|
| 66 |
+
given value is returned.
|
| 67 |
+
"""
|
| 68 |
+
return numba.cuda.shfl_sync_intrinsic(mask, 2, value, delta, 0x1f)[0]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@jit(device=True)
|
| 72 |
+
def shfl_xor_sync(mask, value, lane_mask):
|
| 73 |
+
"""
|
| 74 |
+
Shuffles value across the masked warp and returns the value
|
| 75 |
+
from (laneid ^ lane_mask).
|
| 76 |
+
"""
|
| 77 |
+
return numba.cuda.shfl_sync_intrinsic(mask, 3, value, lane_mask, 0x1f)[0]
|
lib/python3.10/site-packages/numba/cuda/intrinsics.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llvmlite import ir
|
| 2 |
+
|
| 3 |
+
from numba import cuda, types
|
| 4 |
+
from numba.core import cgutils
|
| 5 |
+
from numba.core.errors import RequireLiteralValue, NumbaValueError
|
| 6 |
+
from numba.core.typing import signature
|
| 7 |
+
from numba.core.extending import overload_attribute
|
| 8 |
+
from numba.cuda import nvvmutils
|
| 9 |
+
from numba.cuda.extending import intrinsic
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
#-------------------------------------------------------------------------------
|
| 13 |
+
# Grid functions
|
| 14 |
+
|
| 15 |
+
def _type_grid_function(ndim):
|
| 16 |
+
val = ndim.literal_value
|
| 17 |
+
if val == 1:
|
| 18 |
+
restype = types.int64
|
| 19 |
+
elif val in (2, 3):
|
| 20 |
+
restype = types.UniTuple(types.int64, val)
|
| 21 |
+
else:
|
| 22 |
+
raise NumbaValueError('argument can only be 1, 2, 3')
|
| 23 |
+
|
| 24 |
+
return signature(restype, types.int32)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@intrinsic
|
| 28 |
+
def grid(typingctx, ndim):
|
| 29 |
+
'''grid(ndim)
|
| 30 |
+
|
| 31 |
+
Return the absolute position of the current thread in the entire grid of
|
| 32 |
+
blocks. *ndim* should correspond to the number of dimensions declared when
|
| 33 |
+
instantiating the kernel. If *ndim* is 1, a single integer is returned.
|
| 34 |
+
If *ndim* is 2 or 3, a tuple of the given number of integers is returned.
|
| 35 |
+
|
| 36 |
+
Computation of the first integer is as follows::
|
| 37 |
+
|
| 38 |
+
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
|
| 39 |
+
|
| 40 |
+
and is similar for the other two indices, but using the ``y`` and ``z``
|
| 41 |
+
attributes.
|
| 42 |
+
'''
|
| 43 |
+
|
| 44 |
+
if not isinstance(ndim, types.IntegerLiteral):
|
| 45 |
+
raise RequireLiteralValue(ndim)
|
| 46 |
+
|
| 47 |
+
sig = _type_grid_function(ndim)
|
| 48 |
+
|
| 49 |
+
def codegen(context, builder, sig, args):
|
| 50 |
+
restype = sig.return_type
|
| 51 |
+
if restype == types.int64:
|
| 52 |
+
return nvvmutils.get_global_id(builder, dim=1)
|
| 53 |
+
elif isinstance(restype, types.UniTuple):
|
| 54 |
+
ids = nvvmutils.get_global_id(builder, dim=restype.count)
|
| 55 |
+
return cgutils.pack_array(builder, ids)
|
| 56 |
+
|
| 57 |
+
return sig, codegen
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@intrinsic
|
| 61 |
+
def gridsize(typingctx, ndim):
|
| 62 |
+
'''gridsize(ndim)
|
| 63 |
+
|
| 64 |
+
Return the absolute size (or shape) in threads of the entire grid of
|
| 65 |
+
blocks. *ndim* should correspond to the number of dimensions declared when
|
| 66 |
+
instantiating the kernel. If *ndim* is 1, a single integer is returned.
|
| 67 |
+
If *ndim* is 2 or 3, a tuple of the given number of integers is returned.
|
| 68 |
+
|
| 69 |
+
Computation of the first integer is as follows::
|
| 70 |
+
|
| 71 |
+
cuda.blockDim.x * cuda.gridDim.x
|
| 72 |
+
|
| 73 |
+
and is similar for the other two indices, but using the ``y`` and ``z``
|
| 74 |
+
attributes.
|
| 75 |
+
'''
|
| 76 |
+
|
| 77 |
+
if not isinstance(ndim, types.IntegerLiteral):
|
| 78 |
+
raise RequireLiteralValue(ndim)
|
| 79 |
+
|
| 80 |
+
sig = _type_grid_function(ndim)
|
| 81 |
+
|
| 82 |
+
def _nthreads_for_dim(builder, dim):
|
| 83 |
+
i64 = ir.IntType(64)
|
| 84 |
+
ntid = nvvmutils.call_sreg(builder, f"ntid.{dim}")
|
| 85 |
+
nctaid = nvvmutils.call_sreg(builder, f"nctaid.{dim}")
|
| 86 |
+
return builder.mul(builder.sext(ntid, i64), builder.sext(nctaid, i64))
|
| 87 |
+
|
| 88 |
+
def codegen(context, builder, sig, args):
|
| 89 |
+
restype = sig.return_type
|
| 90 |
+
nx = _nthreads_for_dim(builder, 'x')
|
| 91 |
+
|
| 92 |
+
if restype == types.int64:
|
| 93 |
+
return nx
|
| 94 |
+
elif isinstance(restype, types.UniTuple):
|
| 95 |
+
ny = _nthreads_for_dim(builder, 'y')
|
| 96 |
+
|
| 97 |
+
if restype.count == 2:
|
| 98 |
+
return cgutils.pack_array(builder, (nx, ny))
|
| 99 |
+
elif restype.count == 3:
|
| 100 |
+
nz = _nthreads_for_dim(builder, 'z')
|
| 101 |
+
return cgutils.pack_array(builder, (nx, ny, nz))
|
| 102 |
+
|
| 103 |
+
return sig, codegen
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@intrinsic
|
| 107 |
+
def _warpsize(typingctx):
|
| 108 |
+
sig = signature(types.int32)
|
| 109 |
+
|
| 110 |
+
def codegen(context, builder, sig, args):
|
| 111 |
+
return nvvmutils.call_sreg(builder, 'warpsize')
|
| 112 |
+
|
| 113 |
+
return sig, codegen
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@overload_attribute(types.Module(cuda), 'warpsize', target='cuda')
|
| 117 |
+
def cuda_warpsize(mod):
|
| 118 |
+
'''
|
| 119 |
+
The size of a warp. All architectures implemented to date have a warp size
|
| 120 |
+
of 32.
|
| 121 |
+
'''
|
| 122 |
+
def get(mod):
|
| 123 |
+
return _warpsize()
|
| 124 |
+
return get
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
#-------------------------------------------------------------------------------
|
| 128 |
+
# syncthreads
|
| 129 |
+
|
| 130 |
+
@intrinsic
|
| 131 |
+
def syncthreads(typingctx):
|
| 132 |
+
'''
|
| 133 |
+
Synchronize all threads in the same thread block. This function implements
|
| 134 |
+
the same pattern as barriers in traditional multi-threaded programming: this
|
| 135 |
+
function waits until all threads in the block call it, at which point it
|
| 136 |
+
returns control to all its callers.
|
| 137 |
+
'''
|
| 138 |
+
sig = signature(types.none)
|
| 139 |
+
|
| 140 |
+
def codegen(context, builder, sig, args):
|
| 141 |
+
fname = 'llvm.nvvm.barrier0'
|
| 142 |
+
lmod = builder.module
|
| 143 |
+
fnty = ir.FunctionType(ir.VoidType(), ())
|
| 144 |
+
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 145 |
+
builder.call(sync, ())
|
| 146 |
+
return context.get_dummy_value()
|
| 147 |
+
|
| 148 |
+
return sig, codegen
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _syncthreads_predicate(typingctx, predicate, fname):
|
| 152 |
+
if not isinstance(predicate, types.Integer):
|
| 153 |
+
return None
|
| 154 |
+
|
| 155 |
+
sig = signature(types.i4, types.i4)
|
| 156 |
+
|
| 157 |
+
def codegen(context, builder, sig, args):
|
| 158 |
+
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),))
|
| 159 |
+
sync = cgutils.get_or_insert_function(builder.module, fnty, fname)
|
| 160 |
+
return builder.call(sync, args)
|
| 161 |
+
|
| 162 |
+
return sig, codegen
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@intrinsic
|
| 166 |
+
def syncthreads_count(typingctx, predicate):
|
| 167 |
+
'''
|
| 168 |
+
syncthreads_count(predicate)
|
| 169 |
+
|
| 170 |
+
An extension to numba.cuda.syncthreads where the return value is a count
|
| 171 |
+
of the threads where predicate is true.
|
| 172 |
+
'''
|
| 173 |
+
fname = 'llvm.nvvm.barrier0.popc'
|
| 174 |
+
return _syncthreads_predicate(typingctx, predicate, fname)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@intrinsic
|
| 178 |
+
def syncthreads_and(typingctx, predicate):
|
| 179 |
+
'''
|
| 180 |
+
syncthreads_and(predicate)
|
| 181 |
+
|
| 182 |
+
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
|
| 183 |
+
true for all threads or 0 otherwise.
|
| 184 |
+
'''
|
| 185 |
+
fname = 'llvm.nvvm.barrier0.and'
|
| 186 |
+
return _syncthreads_predicate(typingctx, predicate, fname)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
@intrinsic
|
| 190 |
+
def syncthreads_or(typingctx, predicate):
|
| 191 |
+
'''
|
| 192 |
+
syncthreads_or(predicate)
|
| 193 |
+
|
| 194 |
+
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
|
| 195 |
+
true for any thread or 0 otherwise.
|
| 196 |
+
'''
|
| 197 |
+
fname = 'llvm.nvvm.barrier0.or'
|
| 198 |
+
return _syncthreads_predicate(typingctx, predicate, fname)
|
lib/python3.10/site-packages/numba/cuda/libdevice.py
ADDED
|
@@ -0,0 +1,3382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def abs(x):
|
| 2 |
+
"""
|
| 3 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_abs.html
|
| 4 |
+
|
| 5 |
+
:param x: Argument.
|
| 6 |
+
:type x: int32
|
| 7 |
+
:rtype: int32
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def acos(x):
|
| 12 |
+
"""
|
| 13 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acos.html
|
| 14 |
+
|
| 15 |
+
:param x: Argument.
|
| 16 |
+
:type x: float64
|
| 17 |
+
:rtype: float64
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def acosf(x):
|
| 22 |
+
"""
|
| 23 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acosf.html
|
| 24 |
+
|
| 25 |
+
:param x: Argument.
|
| 26 |
+
:type x: float32
|
| 27 |
+
:rtype: float32
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def acosh(x):
|
| 32 |
+
"""
|
| 33 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acosh.html
|
| 34 |
+
|
| 35 |
+
:param x: Argument.
|
| 36 |
+
:type x: float64
|
| 37 |
+
:rtype: float64
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def acoshf(x):
|
| 42 |
+
"""
|
| 43 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acoshf.html
|
| 44 |
+
|
| 45 |
+
:param x: Argument.
|
| 46 |
+
:type x: float32
|
| 47 |
+
:rtype: float32
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def asin(x):
|
| 52 |
+
"""
|
| 53 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asin.html
|
| 54 |
+
|
| 55 |
+
:param x: Argument.
|
| 56 |
+
:type x: float64
|
| 57 |
+
:rtype: float64
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def asinf(x):
|
| 62 |
+
"""
|
| 63 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinf.html
|
| 64 |
+
|
| 65 |
+
:param x: Argument.
|
| 66 |
+
:type x: float32
|
| 67 |
+
:rtype: float32
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def asinh(x):
|
| 72 |
+
"""
|
| 73 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinh.html
|
| 74 |
+
|
| 75 |
+
:param x: Argument.
|
| 76 |
+
:type x: float64
|
| 77 |
+
:rtype: float64
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def asinhf(x):
|
| 82 |
+
"""
|
| 83 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinhf.html
|
| 84 |
+
|
| 85 |
+
:param x: Argument.
|
| 86 |
+
:type x: float32
|
| 87 |
+
:rtype: float32
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def atan(x):
|
| 92 |
+
"""
|
| 93 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan.html
|
| 94 |
+
|
| 95 |
+
:param x: Argument.
|
| 96 |
+
:type x: float64
|
| 97 |
+
:rtype: float64
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def atan2(x, y):
|
| 102 |
+
"""
|
| 103 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan2.html
|
| 104 |
+
|
| 105 |
+
:param x: Argument.
|
| 106 |
+
:type x: float64
|
| 107 |
+
:param y: Argument.
|
| 108 |
+
:type y: float64
|
| 109 |
+
:rtype: float64
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def atan2f(x, y):
|
| 114 |
+
"""
|
| 115 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan2f.html
|
| 116 |
+
|
| 117 |
+
:param x: Argument.
|
| 118 |
+
:type x: float32
|
| 119 |
+
:param y: Argument.
|
| 120 |
+
:type y: float32
|
| 121 |
+
:rtype: float32
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def atanf(x):
|
| 126 |
+
"""
|
| 127 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanf.html
|
| 128 |
+
|
| 129 |
+
:param x: Argument.
|
| 130 |
+
:type x: float32
|
| 131 |
+
:rtype: float32
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def atanh(x):
|
| 136 |
+
"""
|
| 137 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanh.html
|
| 138 |
+
|
| 139 |
+
:param x: Argument.
|
| 140 |
+
:type x: float64
|
| 141 |
+
:rtype: float64
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def atanhf(x):
|
| 146 |
+
"""
|
| 147 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanhf.html
|
| 148 |
+
|
| 149 |
+
:param x: Argument.
|
| 150 |
+
:type x: float32
|
| 151 |
+
:rtype: float32
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def brev(x):
|
| 156 |
+
"""
|
| 157 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_brev.html
|
| 158 |
+
|
| 159 |
+
:param x: Argument.
|
| 160 |
+
:type x: int32
|
| 161 |
+
:rtype: int32
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def brevll(x):
|
| 166 |
+
"""
|
| 167 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_brevll.html
|
| 168 |
+
|
| 169 |
+
:param x: Argument.
|
| 170 |
+
:type x: int64
|
| 171 |
+
:rtype: int64
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def byte_perm(x, y, z):
|
| 176 |
+
"""
|
| 177 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_byte_perm.html
|
| 178 |
+
|
| 179 |
+
:param x: Argument.
|
| 180 |
+
:type x: int32
|
| 181 |
+
:param y: Argument.
|
| 182 |
+
:type y: int32
|
| 183 |
+
:param z: Argument.
|
| 184 |
+
:type z: int32
|
| 185 |
+
:rtype: int32
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def cbrt(x):
|
| 190 |
+
"""
|
| 191 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrt.html
|
| 192 |
+
|
| 193 |
+
:param x: Argument.
|
| 194 |
+
:type x: float64
|
| 195 |
+
:rtype: float64
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def cbrtf(x):
|
| 200 |
+
"""
|
| 201 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrtf.html
|
| 202 |
+
|
| 203 |
+
:param x: Argument.
|
| 204 |
+
:type x: float32
|
| 205 |
+
:rtype: float32
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def ceil(x):
|
| 210 |
+
"""
|
| 211 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ceil.html
|
| 212 |
+
|
| 213 |
+
:param x: Argument.
|
| 214 |
+
:type x: float64
|
| 215 |
+
:rtype: float64
|
| 216 |
+
"""
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def ceilf(x):
|
| 220 |
+
"""
|
| 221 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ceilf.html
|
| 222 |
+
|
| 223 |
+
:param x: Argument.
|
| 224 |
+
:type x: float32
|
| 225 |
+
:rtype: float32
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def clz(x):
|
| 230 |
+
"""
|
| 231 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_clz.html
|
| 232 |
+
|
| 233 |
+
:param x: Argument.
|
| 234 |
+
:type x: int32
|
| 235 |
+
:rtype: int32
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def clzll(x):
|
| 240 |
+
"""
|
| 241 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_clzll.html
|
| 242 |
+
|
| 243 |
+
:param x: Argument.
|
| 244 |
+
:type x: int64
|
| 245 |
+
:rtype: int32
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def copysign(x, y):
|
| 250 |
+
"""
|
| 251 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_copysign.html
|
| 252 |
+
|
| 253 |
+
:param x: Argument.
|
| 254 |
+
:type x: float64
|
| 255 |
+
:param y: Argument.
|
| 256 |
+
:type y: float64
|
| 257 |
+
:rtype: float64
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def copysignf(x, y):
|
| 262 |
+
"""
|
| 263 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_copysignf.html
|
| 264 |
+
|
| 265 |
+
:param x: Argument.
|
| 266 |
+
:type x: float32
|
| 267 |
+
:param y: Argument.
|
| 268 |
+
:type y: float32
|
| 269 |
+
:rtype: float32
|
| 270 |
+
"""
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def cos(x):
|
| 274 |
+
"""
|
| 275 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cos.html
|
| 276 |
+
|
| 277 |
+
:param x: Argument.
|
| 278 |
+
:type x: float64
|
| 279 |
+
:rtype: float64
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def cosf(x):
|
| 284 |
+
"""
|
| 285 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cosf.html
|
| 286 |
+
|
| 287 |
+
:param x: Argument.
|
| 288 |
+
:type x: float32
|
| 289 |
+
:rtype: float32
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def cosh(x):
|
| 294 |
+
"""
|
| 295 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cosh.html
|
| 296 |
+
|
| 297 |
+
:param x: Argument.
|
| 298 |
+
:type x: float64
|
| 299 |
+
:rtype: float64
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def coshf(x):
|
| 304 |
+
"""
|
| 305 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_coshf.html
|
| 306 |
+
|
| 307 |
+
:param x: Argument.
|
| 308 |
+
:type x: float32
|
| 309 |
+
:rtype: float32
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def cospi(x):
|
| 314 |
+
"""
|
| 315 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cospi.html
|
| 316 |
+
|
| 317 |
+
:param x: Argument.
|
| 318 |
+
:type x: float64
|
| 319 |
+
:rtype: float64
|
| 320 |
+
"""
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def cospif(x):
|
| 324 |
+
"""
|
| 325 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cospif.html
|
| 326 |
+
|
| 327 |
+
:param x: Argument.
|
| 328 |
+
:type x: float32
|
| 329 |
+
:rtype: float32
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def dadd_rd(x, y):
|
| 334 |
+
"""
|
| 335 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rd.html
|
| 336 |
+
|
| 337 |
+
:param x: Argument.
|
| 338 |
+
:type x: float64
|
| 339 |
+
:param y: Argument.
|
| 340 |
+
:type y: float64
|
| 341 |
+
:rtype: float64
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def dadd_rn(x, y):
|
| 346 |
+
"""
|
| 347 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rn.html
|
| 348 |
+
|
| 349 |
+
:param x: Argument.
|
| 350 |
+
:type x: float64
|
| 351 |
+
:param y: Argument.
|
| 352 |
+
:type y: float64
|
| 353 |
+
:rtype: float64
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def dadd_ru(x, y):
|
| 358 |
+
"""
|
| 359 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_ru.html
|
| 360 |
+
|
| 361 |
+
:param x: Argument.
|
| 362 |
+
:type x: float64
|
| 363 |
+
:param y: Argument.
|
| 364 |
+
:type y: float64
|
| 365 |
+
:rtype: float64
|
| 366 |
+
"""
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def dadd_rz(x, y):
|
| 370 |
+
"""
|
| 371 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rz.html
|
| 372 |
+
|
| 373 |
+
:param x: Argument.
|
| 374 |
+
:type x: float64
|
| 375 |
+
:param y: Argument.
|
| 376 |
+
:type y: float64
|
| 377 |
+
:rtype: float64
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def ddiv_rd(x, y):
|
| 382 |
+
"""
|
| 383 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rd.html
|
| 384 |
+
|
| 385 |
+
:param x: Argument.
|
| 386 |
+
:type x: float64
|
| 387 |
+
:param y: Argument.
|
| 388 |
+
:type y: float64
|
| 389 |
+
:rtype: float64
|
| 390 |
+
"""
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def ddiv_rn(x, y):
|
| 394 |
+
"""
|
| 395 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rn.html
|
| 396 |
+
|
| 397 |
+
:param x: Argument.
|
| 398 |
+
:type x: float64
|
| 399 |
+
:param y: Argument.
|
| 400 |
+
:type y: float64
|
| 401 |
+
:rtype: float64
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def ddiv_ru(x, y):
|
| 406 |
+
"""
|
| 407 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_ru.html
|
| 408 |
+
|
| 409 |
+
:param x: Argument.
|
| 410 |
+
:type x: float64
|
| 411 |
+
:param y: Argument.
|
| 412 |
+
:type y: float64
|
| 413 |
+
:rtype: float64
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def ddiv_rz(x, y):
|
| 418 |
+
"""
|
| 419 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rz.html
|
| 420 |
+
|
| 421 |
+
:param x: Argument.
|
| 422 |
+
:type x: float64
|
| 423 |
+
:param y: Argument.
|
| 424 |
+
:type y: float64
|
| 425 |
+
:rtype: float64
|
| 426 |
+
"""
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def dmul_rd(x, y):
|
| 430 |
+
"""
|
| 431 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rd.html
|
| 432 |
+
|
| 433 |
+
:param x: Argument.
|
| 434 |
+
:type x: float64
|
| 435 |
+
:param y: Argument.
|
| 436 |
+
:type y: float64
|
| 437 |
+
:rtype: float64
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def dmul_rn(x, y):
|
| 442 |
+
"""
|
| 443 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rn.html
|
| 444 |
+
|
| 445 |
+
:param x: Argument.
|
| 446 |
+
:type x: float64
|
| 447 |
+
:param y: Argument.
|
| 448 |
+
:type y: float64
|
| 449 |
+
:rtype: float64
|
| 450 |
+
"""
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def dmul_ru(x, y):
|
| 454 |
+
"""
|
| 455 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_ru.html
|
| 456 |
+
|
| 457 |
+
:param x: Argument.
|
| 458 |
+
:type x: float64
|
| 459 |
+
:param y: Argument.
|
| 460 |
+
:type y: float64
|
| 461 |
+
:rtype: float64
|
| 462 |
+
"""
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def dmul_rz(x, y):
|
| 466 |
+
"""
|
| 467 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rz.html
|
| 468 |
+
|
| 469 |
+
:param x: Argument.
|
| 470 |
+
:type x: float64
|
| 471 |
+
:param y: Argument.
|
| 472 |
+
:type y: float64
|
| 473 |
+
:rtype: float64
|
| 474 |
+
"""
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def double2float_rd(d):
|
| 478 |
+
"""
|
| 479 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rd.html
|
| 480 |
+
|
| 481 |
+
:param d: Argument.
|
| 482 |
+
:type d: float64
|
| 483 |
+
:rtype: float32
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
def double2float_rn(d):
|
| 488 |
+
"""
|
| 489 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rn.html
|
| 490 |
+
|
| 491 |
+
:param d: Argument.
|
| 492 |
+
:type d: float64
|
| 493 |
+
:rtype: float32
|
| 494 |
+
"""
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def double2float_ru(d):
|
| 498 |
+
"""
|
| 499 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_ru.html
|
| 500 |
+
|
| 501 |
+
:param d: Argument.
|
| 502 |
+
:type d: float64
|
| 503 |
+
:rtype: float32
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def double2float_rz(d):
|
| 508 |
+
"""
|
| 509 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rz.html
|
| 510 |
+
|
| 511 |
+
:param d: Argument.
|
| 512 |
+
:type d: float64
|
| 513 |
+
:rtype: float32
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def double2hiint(d):
|
| 518 |
+
"""
|
| 519 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2hiint.html
|
| 520 |
+
|
| 521 |
+
:param d: Argument.
|
| 522 |
+
:type d: float64
|
| 523 |
+
:rtype: int32
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def double2int_rd(d):
|
| 528 |
+
"""
|
| 529 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rd.html
|
| 530 |
+
|
| 531 |
+
:param d: Argument.
|
| 532 |
+
:type d: float64
|
| 533 |
+
:rtype: int32
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
def double2int_rn(d):
|
| 538 |
+
"""
|
| 539 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rn.html
|
| 540 |
+
|
| 541 |
+
:param d: Argument.
|
| 542 |
+
:type d: float64
|
| 543 |
+
:rtype: int32
|
| 544 |
+
"""
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def double2int_ru(d):
|
| 548 |
+
"""
|
| 549 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_ru.html
|
| 550 |
+
|
| 551 |
+
:param d: Argument.
|
| 552 |
+
:type d: float64
|
| 553 |
+
:rtype: int32
|
| 554 |
+
"""
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def double2int_rz(d):
|
| 558 |
+
"""
|
| 559 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rz.html
|
| 560 |
+
|
| 561 |
+
:param d: Argument.
|
| 562 |
+
:type d: float64
|
| 563 |
+
:rtype: int32
|
| 564 |
+
"""
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
def double2ll_rd(f):
|
| 568 |
+
"""
|
| 569 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rd.html
|
| 570 |
+
|
| 571 |
+
:param f: Argument.
|
| 572 |
+
:type f: float64
|
| 573 |
+
:rtype: int64
|
| 574 |
+
"""
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def double2ll_rn(f):
|
| 578 |
+
"""
|
| 579 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rn.html
|
| 580 |
+
|
| 581 |
+
:param f: Argument.
|
| 582 |
+
:type f: float64
|
| 583 |
+
:rtype: int64
|
| 584 |
+
"""
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def double2ll_ru(f):
|
| 588 |
+
"""
|
| 589 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_ru.html
|
| 590 |
+
|
| 591 |
+
:param f: Argument.
|
| 592 |
+
:type f: float64
|
| 593 |
+
:rtype: int64
|
| 594 |
+
"""
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def double2ll_rz(f):
|
| 598 |
+
"""
|
| 599 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rz.html
|
| 600 |
+
|
| 601 |
+
:param f: Argument.
|
| 602 |
+
:type f: float64
|
| 603 |
+
:rtype: int64
|
| 604 |
+
"""
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def double2loint(d):
|
| 608 |
+
"""
|
| 609 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2loint.html
|
| 610 |
+
|
| 611 |
+
:param d: Argument.
|
| 612 |
+
:type d: float64
|
| 613 |
+
:rtype: int32
|
| 614 |
+
"""
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
def double2uint_rd(d):
|
| 618 |
+
"""
|
| 619 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rd.html
|
| 620 |
+
|
| 621 |
+
:param d: Argument.
|
| 622 |
+
:type d: float64
|
| 623 |
+
:rtype: int32
|
| 624 |
+
"""
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def double2uint_rn(d):
|
| 628 |
+
"""
|
| 629 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rn.html
|
| 630 |
+
|
| 631 |
+
:param d: Argument.
|
| 632 |
+
:type d: float64
|
| 633 |
+
:rtype: int32
|
| 634 |
+
"""
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def double2uint_ru(d):
|
| 638 |
+
"""
|
| 639 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_ru.html
|
| 640 |
+
|
| 641 |
+
:param d: Argument.
|
| 642 |
+
:type d: float64
|
| 643 |
+
:rtype: int32
|
| 644 |
+
"""
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def double2uint_rz(d):
|
| 648 |
+
"""
|
| 649 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rz.html
|
| 650 |
+
|
| 651 |
+
:param d: Argument.
|
| 652 |
+
:type d: float64
|
| 653 |
+
:rtype: int32
|
| 654 |
+
"""
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def double2ull_rd(f):
|
| 658 |
+
"""
|
| 659 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rd.html
|
| 660 |
+
|
| 661 |
+
:param f: Argument.
|
| 662 |
+
:type f: float64
|
| 663 |
+
:rtype: int64
|
| 664 |
+
"""
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
def double2ull_rn(f):
|
| 668 |
+
"""
|
| 669 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rn.html
|
| 670 |
+
|
| 671 |
+
:param f: Argument.
|
| 672 |
+
:type f: float64
|
| 673 |
+
:rtype: int64
|
| 674 |
+
"""
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
def double2ull_ru(f):
|
| 678 |
+
"""
|
| 679 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_ru.html
|
| 680 |
+
|
| 681 |
+
:param f: Argument.
|
| 682 |
+
:type f: float64
|
| 683 |
+
:rtype: int64
|
| 684 |
+
"""
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def double2ull_rz(f):
|
| 688 |
+
"""
|
| 689 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rz.html
|
| 690 |
+
|
| 691 |
+
:param f: Argument.
|
| 692 |
+
:type f: float64
|
| 693 |
+
:rtype: int64
|
| 694 |
+
"""
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def double_as_longlong(x):
|
| 698 |
+
"""
|
| 699 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double_as_longlong.html
|
| 700 |
+
|
| 701 |
+
:param x: Argument.
|
| 702 |
+
:type x: float64
|
| 703 |
+
:rtype: int64
|
| 704 |
+
"""
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
def drcp_rd(x):
|
| 708 |
+
"""
|
| 709 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rd.html
|
| 710 |
+
|
| 711 |
+
:param x: Argument.
|
| 712 |
+
:type x: float64
|
| 713 |
+
:rtype: float64
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
def drcp_rn(x):
|
| 718 |
+
"""
|
| 719 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rn.html
|
| 720 |
+
|
| 721 |
+
:param x: Argument.
|
| 722 |
+
:type x: float64
|
| 723 |
+
:rtype: float64
|
| 724 |
+
"""
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
def drcp_ru(x):
|
| 728 |
+
"""
|
| 729 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_ru.html
|
| 730 |
+
|
| 731 |
+
:param x: Argument.
|
| 732 |
+
:type x: float64
|
| 733 |
+
:rtype: float64
|
| 734 |
+
"""
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
def drcp_rz(x):
|
| 738 |
+
"""
|
| 739 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rz.html
|
| 740 |
+
|
| 741 |
+
:param x: Argument.
|
| 742 |
+
:type x: float64
|
| 743 |
+
:rtype: float64
|
| 744 |
+
"""
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
def dsqrt_rd(x):
|
| 748 |
+
"""
|
| 749 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rd.html
|
| 750 |
+
|
| 751 |
+
:param x: Argument.
|
| 752 |
+
:type x: float64
|
| 753 |
+
:rtype: float64
|
| 754 |
+
"""
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
def dsqrt_rn(x):
|
| 758 |
+
"""
|
| 759 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rn.html
|
| 760 |
+
|
| 761 |
+
:param x: Argument.
|
| 762 |
+
:type x: float64
|
| 763 |
+
:rtype: float64
|
| 764 |
+
"""
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
def dsqrt_ru(x):
|
| 768 |
+
"""
|
| 769 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_ru.html
|
| 770 |
+
|
| 771 |
+
:param x: Argument.
|
| 772 |
+
:type x: float64
|
| 773 |
+
:rtype: float64
|
| 774 |
+
"""
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def dsqrt_rz(x):
|
| 778 |
+
"""
|
| 779 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rz.html
|
| 780 |
+
|
| 781 |
+
:param x: Argument.
|
| 782 |
+
:type x: float64
|
| 783 |
+
:rtype: float64
|
| 784 |
+
"""
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
def erf(x):
|
| 788 |
+
"""
|
| 789 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erf.html
|
| 790 |
+
|
| 791 |
+
:param x: Argument.
|
| 792 |
+
:type x: float64
|
| 793 |
+
:rtype: float64
|
| 794 |
+
"""
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
def erfc(x):
|
| 798 |
+
"""
|
| 799 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfc.html
|
| 800 |
+
|
| 801 |
+
:param x: Argument.
|
| 802 |
+
:type x: float64
|
| 803 |
+
:rtype: float64
|
| 804 |
+
"""
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
def erfcf(x):
|
| 808 |
+
"""
|
| 809 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcf.html
|
| 810 |
+
|
| 811 |
+
:param x: Argument.
|
| 812 |
+
:type x: float32
|
| 813 |
+
:rtype: float32
|
| 814 |
+
"""
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
def erfcinv(x):
|
| 818 |
+
"""
|
| 819 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcinv.html
|
| 820 |
+
|
| 821 |
+
:param x: Argument.
|
| 822 |
+
:type x: float64
|
| 823 |
+
:rtype: float64
|
| 824 |
+
"""
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def erfcinvf(x):
|
| 828 |
+
"""
|
| 829 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcinvf.html
|
| 830 |
+
|
| 831 |
+
:param x: Argument.
|
| 832 |
+
:type x: float32
|
| 833 |
+
:rtype: float32
|
| 834 |
+
"""
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
def erfcx(x):
|
| 838 |
+
"""
|
| 839 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcx.html
|
| 840 |
+
|
| 841 |
+
:param x: Argument.
|
| 842 |
+
:type x: float64
|
| 843 |
+
:rtype: float64
|
| 844 |
+
"""
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def erfcxf(x):
|
| 848 |
+
"""
|
| 849 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcxf.html
|
| 850 |
+
|
| 851 |
+
:param x: Argument.
|
| 852 |
+
:type x: float32
|
| 853 |
+
:rtype: float32
|
| 854 |
+
"""
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def erff(x):
|
| 858 |
+
"""
|
| 859 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erff.html
|
| 860 |
+
|
| 861 |
+
:param x: Argument.
|
| 862 |
+
:type x: float32
|
| 863 |
+
:rtype: float32
|
| 864 |
+
"""
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def erfinv(x):
|
| 868 |
+
"""
|
| 869 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfinv.html
|
| 870 |
+
|
| 871 |
+
:param x: Argument.
|
| 872 |
+
:type x: float64
|
| 873 |
+
:rtype: float64
|
| 874 |
+
"""
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
def erfinvf(x):
|
| 878 |
+
"""
|
| 879 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfinvf.html
|
| 880 |
+
|
| 881 |
+
:param x: Argument.
|
| 882 |
+
:type x: float32
|
| 883 |
+
:rtype: float32
|
| 884 |
+
"""
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def exp(x):
|
| 888 |
+
"""
|
| 889 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp.html
|
| 890 |
+
|
| 891 |
+
:param x: Argument.
|
| 892 |
+
:type x: float64
|
| 893 |
+
:rtype: float64
|
| 894 |
+
"""
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
def exp10(x):
|
| 898 |
+
"""
|
| 899 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp10.html
|
| 900 |
+
|
| 901 |
+
:param x: Argument.
|
| 902 |
+
:type x: float64
|
| 903 |
+
:rtype: float64
|
| 904 |
+
"""
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
def exp10f(x):
|
| 908 |
+
"""
|
| 909 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp10f.html
|
| 910 |
+
|
| 911 |
+
:param x: Argument.
|
| 912 |
+
:type x: float32
|
| 913 |
+
:rtype: float32
|
| 914 |
+
"""
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
def exp2(x):
|
| 918 |
+
"""
|
| 919 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp2.html
|
| 920 |
+
|
| 921 |
+
:param x: Argument.
|
| 922 |
+
:type x: float64
|
| 923 |
+
:rtype: float64
|
| 924 |
+
"""
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
def exp2f(x):
|
| 928 |
+
"""
|
| 929 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp2f.html
|
| 930 |
+
|
| 931 |
+
:param x: Argument.
|
| 932 |
+
:type x: float32
|
| 933 |
+
:rtype: float32
|
| 934 |
+
"""
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
def expf(x):
|
| 938 |
+
"""
|
| 939 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expf.html
|
| 940 |
+
|
| 941 |
+
:param x: Argument.
|
| 942 |
+
:type x: float32
|
| 943 |
+
:rtype: float32
|
| 944 |
+
"""
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
def expm1(x):
|
| 948 |
+
"""
|
| 949 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expm1.html
|
| 950 |
+
|
| 951 |
+
:param x: Argument.
|
| 952 |
+
:type x: float64
|
| 953 |
+
:rtype: float64
|
| 954 |
+
"""
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def expm1f(x):
|
| 958 |
+
"""
|
| 959 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expm1f.html
|
| 960 |
+
|
| 961 |
+
:param x: Argument.
|
| 962 |
+
:type x: float32
|
| 963 |
+
:rtype: float32
|
| 964 |
+
"""
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
def fabs(f):
|
| 968 |
+
"""
|
| 969 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fabs.html
|
| 970 |
+
|
| 971 |
+
:param f: Argument.
|
| 972 |
+
:type f: float64
|
| 973 |
+
:rtype: float64
|
| 974 |
+
"""
|
| 975 |
+
|
| 976 |
+
|
| 977 |
+
def fabsf(f):
|
| 978 |
+
"""
|
| 979 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fabsf.html
|
| 980 |
+
|
| 981 |
+
:param f: Argument.
|
| 982 |
+
:type f: float32
|
| 983 |
+
:rtype: float32
|
| 984 |
+
"""
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
def fadd_rd(x, y):
|
| 988 |
+
"""
|
| 989 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rd.html
|
| 990 |
+
|
| 991 |
+
:param x: Argument.
|
| 992 |
+
:type x: float32
|
| 993 |
+
:param y: Argument.
|
| 994 |
+
:type y: float32
|
| 995 |
+
:rtype: float32
|
| 996 |
+
"""
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
def fadd_rn(x, y):
|
| 1000 |
+
"""
|
| 1001 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rn.html
|
| 1002 |
+
|
| 1003 |
+
:param x: Argument.
|
| 1004 |
+
:type x: float32
|
| 1005 |
+
:param y: Argument.
|
| 1006 |
+
:type y: float32
|
| 1007 |
+
:rtype: float32
|
| 1008 |
+
"""
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
def fadd_ru(x, y):
|
| 1012 |
+
"""
|
| 1013 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_ru.html
|
| 1014 |
+
|
| 1015 |
+
:param x: Argument.
|
| 1016 |
+
:type x: float32
|
| 1017 |
+
:param y: Argument.
|
| 1018 |
+
:type y: float32
|
| 1019 |
+
:rtype: float32
|
| 1020 |
+
"""
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
def fadd_rz(x, y):
|
| 1024 |
+
"""
|
| 1025 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rz.html
|
| 1026 |
+
|
| 1027 |
+
:param x: Argument.
|
| 1028 |
+
:type x: float32
|
| 1029 |
+
:param y: Argument.
|
| 1030 |
+
:type y: float32
|
| 1031 |
+
:rtype: float32
|
| 1032 |
+
"""
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
def fast_cosf(x):
|
| 1036 |
+
"""
|
| 1037 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_cosf.html
|
| 1038 |
+
|
| 1039 |
+
:param x: Argument.
|
| 1040 |
+
:type x: float32
|
| 1041 |
+
:rtype: float32
|
| 1042 |
+
"""
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
def fast_exp10f(x):
|
| 1046 |
+
"""
|
| 1047 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_exp10f.html
|
| 1048 |
+
|
| 1049 |
+
:param x: Argument.
|
| 1050 |
+
:type x: float32
|
| 1051 |
+
:rtype: float32
|
| 1052 |
+
"""
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
def fast_expf(x):
|
| 1056 |
+
"""
|
| 1057 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_expf.html
|
| 1058 |
+
|
| 1059 |
+
:param x: Argument.
|
| 1060 |
+
:type x: float32
|
| 1061 |
+
:rtype: float32
|
| 1062 |
+
"""
|
| 1063 |
+
|
| 1064 |
+
|
| 1065 |
+
def fast_fdividef(x, y):
|
| 1066 |
+
"""
|
| 1067 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_fdividef.html
|
| 1068 |
+
|
| 1069 |
+
:param x: Argument.
|
| 1070 |
+
:type x: float32
|
| 1071 |
+
:param y: Argument.
|
| 1072 |
+
:type y: float32
|
| 1073 |
+
:rtype: float32
|
| 1074 |
+
"""
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
def fast_log10f(x):
|
| 1078 |
+
"""
|
| 1079 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log10f.html
|
| 1080 |
+
|
| 1081 |
+
:param x: Argument.
|
| 1082 |
+
:type x: float32
|
| 1083 |
+
:rtype: float32
|
| 1084 |
+
"""
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
def fast_log2f(x):
|
| 1088 |
+
"""
|
| 1089 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log2f.html
|
| 1090 |
+
|
| 1091 |
+
:param x: Argument.
|
| 1092 |
+
:type x: float32
|
| 1093 |
+
:rtype: float32
|
| 1094 |
+
"""
|
| 1095 |
+
|
| 1096 |
+
|
| 1097 |
+
def fast_logf(x):
|
| 1098 |
+
"""
|
| 1099 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_logf.html
|
| 1100 |
+
|
| 1101 |
+
:param x: Argument.
|
| 1102 |
+
:type x: float32
|
| 1103 |
+
:rtype: float32
|
| 1104 |
+
"""
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
def fast_powf(x, y):
|
| 1108 |
+
"""
|
| 1109 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_powf.html
|
| 1110 |
+
|
| 1111 |
+
:param x: Argument.
|
| 1112 |
+
:type x: float32
|
| 1113 |
+
:param y: Argument.
|
| 1114 |
+
:type y: float32
|
| 1115 |
+
:rtype: float32
|
| 1116 |
+
"""
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
def fast_sincosf(x):
|
| 1120 |
+
"""
|
| 1121 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sincosf.html
|
| 1122 |
+
|
| 1123 |
+
:param x: Argument.
|
| 1124 |
+
:type x: float32
|
| 1125 |
+
:rtype: UniTuple(float32 x 2)
|
| 1126 |
+
"""
|
| 1127 |
+
|
| 1128 |
+
|
| 1129 |
+
def fast_sinf(x):
|
| 1130 |
+
"""
|
| 1131 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sinf.html
|
| 1132 |
+
|
| 1133 |
+
:param x: Argument.
|
| 1134 |
+
:type x: float32
|
| 1135 |
+
:rtype: float32
|
| 1136 |
+
"""
|
| 1137 |
+
|
| 1138 |
+
|
| 1139 |
+
def fast_tanf(x):
|
| 1140 |
+
"""
|
| 1141 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_tanf.html
|
| 1142 |
+
|
| 1143 |
+
:param x: Argument.
|
| 1144 |
+
:type x: float32
|
| 1145 |
+
:rtype: float32
|
| 1146 |
+
"""
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
def fdim(x, y):
|
| 1150 |
+
"""
|
| 1151 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdim.html
|
| 1152 |
+
|
| 1153 |
+
:param x: Argument.
|
| 1154 |
+
:type x: float64
|
| 1155 |
+
:param y: Argument.
|
| 1156 |
+
:type y: float64
|
| 1157 |
+
:rtype: float64
|
| 1158 |
+
"""
|
| 1159 |
+
|
| 1160 |
+
|
| 1161 |
+
def fdimf(x, y):
|
| 1162 |
+
"""
|
| 1163 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdimf.html
|
| 1164 |
+
|
| 1165 |
+
:param x: Argument.
|
| 1166 |
+
:type x: float32
|
| 1167 |
+
:param y: Argument.
|
| 1168 |
+
:type y: float32
|
| 1169 |
+
:rtype: float32
|
| 1170 |
+
"""
|
| 1171 |
+
|
| 1172 |
+
|
| 1173 |
+
def fdiv_rd(x, y):
|
| 1174 |
+
"""
|
| 1175 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rd.html
|
| 1176 |
+
|
| 1177 |
+
:param x: Argument.
|
| 1178 |
+
:type x: float32
|
| 1179 |
+
:param y: Argument.
|
| 1180 |
+
:type y: float32
|
| 1181 |
+
:rtype: float32
|
| 1182 |
+
"""
|
| 1183 |
+
|
| 1184 |
+
|
| 1185 |
+
def fdiv_rn(x, y):
|
| 1186 |
+
"""
|
| 1187 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rn.html
|
| 1188 |
+
|
| 1189 |
+
:param x: Argument.
|
| 1190 |
+
:type x: float32
|
| 1191 |
+
:param y: Argument.
|
| 1192 |
+
:type y: float32
|
| 1193 |
+
:rtype: float32
|
| 1194 |
+
"""
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
def fdiv_ru(x, y):
|
| 1198 |
+
"""
|
| 1199 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_ru.html
|
| 1200 |
+
|
| 1201 |
+
:param x: Argument.
|
| 1202 |
+
:type x: float32
|
| 1203 |
+
:param y: Argument.
|
| 1204 |
+
:type y: float32
|
| 1205 |
+
:rtype: float32
|
| 1206 |
+
"""
|
| 1207 |
+
|
| 1208 |
+
|
| 1209 |
+
def fdiv_rz(x, y):
|
| 1210 |
+
"""
|
| 1211 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rz.html
|
| 1212 |
+
|
| 1213 |
+
:param x: Argument.
|
| 1214 |
+
:type x: float32
|
| 1215 |
+
:param y: Argument.
|
| 1216 |
+
:type y: float32
|
| 1217 |
+
:rtype: float32
|
| 1218 |
+
"""
|
| 1219 |
+
|
| 1220 |
+
|
| 1221 |
+
def ffs(x):
|
| 1222 |
+
"""
|
| 1223 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffs.html
|
| 1224 |
+
|
| 1225 |
+
:param x: Argument.
|
| 1226 |
+
:type x: int32
|
| 1227 |
+
:rtype: int32
|
| 1228 |
+
"""
|
| 1229 |
+
|
| 1230 |
+
|
| 1231 |
+
def ffsll(x):
|
| 1232 |
+
"""
|
| 1233 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffsll.html
|
| 1234 |
+
|
| 1235 |
+
:param x: Argument.
|
| 1236 |
+
:type x: int64
|
| 1237 |
+
:rtype: int32
|
| 1238 |
+
"""
|
| 1239 |
+
|
| 1240 |
+
|
| 1241 |
+
def finitef(x):
|
| 1242 |
+
"""
|
| 1243 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_finitef.html
|
| 1244 |
+
|
| 1245 |
+
:param x: Argument.
|
| 1246 |
+
:type x: float32
|
| 1247 |
+
:rtype: int32
|
| 1248 |
+
"""
|
| 1249 |
+
|
| 1250 |
+
|
| 1251 |
+
def float2half_rn(f):
|
| 1252 |
+
"""
|
| 1253 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2half_rn.html
|
| 1254 |
+
|
| 1255 |
+
:param f: Argument.
|
| 1256 |
+
:type f: float32
|
| 1257 |
+
:rtype: int16
|
| 1258 |
+
"""
|
| 1259 |
+
|
| 1260 |
+
|
| 1261 |
+
def float2int_rd(x):
|
| 1262 |
+
"""
|
| 1263 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rd.html
|
| 1264 |
+
|
| 1265 |
+
:param in: Argument.
|
| 1266 |
+
:type in: float32
|
| 1267 |
+
:rtype: int32
|
| 1268 |
+
"""
|
| 1269 |
+
|
| 1270 |
+
|
| 1271 |
+
def float2int_rn(x):
|
| 1272 |
+
"""
|
| 1273 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rn.html
|
| 1274 |
+
|
| 1275 |
+
:param in: Argument.
|
| 1276 |
+
:type in: float32
|
| 1277 |
+
:rtype: int32
|
| 1278 |
+
"""
|
| 1279 |
+
|
| 1280 |
+
|
| 1281 |
+
def float2int_ru(x):
|
| 1282 |
+
"""
|
| 1283 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_ru.html
|
| 1284 |
+
|
| 1285 |
+
:param in: Argument.
|
| 1286 |
+
:type in: float32
|
| 1287 |
+
:rtype: int32
|
| 1288 |
+
"""
|
| 1289 |
+
|
| 1290 |
+
|
| 1291 |
+
def float2int_rz(x):
|
| 1292 |
+
"""
|
| 1293 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rz.html
|
| 1294 |
+
|
| 1295 |
+
:param in: Argument.
|
| 1296 |
+
:type in: float32
|
| 1297 |
+
:rtype: int32
|
| 1298 |
+
"""
|
| 1299 |
+
|
| 1300 |
+
|
| 1301 |
+
def float2ll_rd(f):
|
| 1302 |
+
"""
|
| 1303 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rd.html
|
| 1304 |
+
|
| 1305 |
+
:param f: Argument.
|
| 1306 |
+
:type f: float32
|
| 1307 |
+
:rtype: int64
|
| 1308 |
+
"""
|
| 1309 |
+
|
| 1310 |
+
|
| 1311 |
+
def float2ll_rn(f):
|
| 1312 |
+
"""
|
| 1313 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rn.html
|
| 1314 |
+
|
| 1315 |
+
:param f: Argument.
|
| 1316 |
+
:type f: float32
|
| 1317 |
+
:rtype: int64
|
| 1318 |
+
"""
|
| 1319 |
+
|
| 1320 |
+
|
| 1321 |
+
def float2ll_ru(f):
|
| 1322 |
+
"""
|
| 1323 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_ru.html
|
| 1324 |
+
|
| 1325 |
+
:param f: Argument.
|
| 1326 |
+
:type f: float32
|
| 1327 |
+
:rtype: int64
|
| 1328 |
+
"""
|
| 1329 |
+
|
| 1330 |
+
|
| 1331 |
+
def float2ll_rz(f):
|
| 1332 |
+
"""
|
| 1333 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rz.html
|
| 1334 |
+
|
| 1335 |
+
:param f: Argument.
|
| 1336 |
+
:type f: float32
|
| 1337 |
+
:rtype: int64
|
| 1338 |
+
"""
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
def float2uint_rd(x):
|
| 1342 |
+
"""
|
| 1343 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rd.html
|
| 1344 |
+
|
| 1345 |
+
:param in: Argument.
|
| 1346 |
+
:type in: float32
|
| 1347 |
+
:rtype: int32
|
| 1348 |
+
"""
|
| 1349 |
+
|
| 1350 |
+
|
| 1351 |
+
def float2uint_rn(x):
|
| 1352 |
+
"""
|
| 1353 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rn.html
|
| 1354 |
+
|
| 1355 |
+
:param in: Argument.
|
| 1356 |
+
:type in: float32
|
| 1357 |
+
:rtype: int32
|
| 1358 |
+
"""
|
| 1359 |
+
|
| 1360 |
+
|
| 1361 |
+
def float2uint_ru(x):
|
| 1362 |
+
"""
|
| 1363 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_ru.html
|
| 1364 |
+
|
| 1365 |
+
:param in: Argument.
|
| 1366 |
+
:type in: float32
|
| 1367 |
+
:rtype: int32
|
| 1368 |
+
"""
|
| 1369 |
+
|
| 1370 |
+
|
| 1371 |
+
def float2uint_rz(x):
|
| 1372 |
+
"""
|
| 1373 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rz.html
|
| 1374 |
+
|
| 1375 |
+
:param in: Argument.
|
| 1376 |
+
:type in: float32
|
| 1377 |
+
:rtype: int32
|
| 1378 |
+
"""
|
| 1379 |
+
|
| 1380 |
+
|
| 1381 |
+
def float2ull_rd(f):
|
| 1382 |
+
"""
|
| 1383 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rd.html
|
| 1384 |
+
|
| 1385 |
+
:param f: Argument.
|
| 1386 |
+
:type f: float32
|
| 1387 |
+
:rtype: int64
|
| 1388 |
+
"""
|
| 1389 |
+
|
| 1390 |
+
|
| 1391 |
+
def float2ull_rn(f):
|
| 1392 |
+
"""
|
| 1393 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rn.html
|
| 1394 |
+
|
| 1395 |
+
:param f: Argument.
|
| 1396 |
+
:type f: float32
|
| 1397 |
+
:rtype: int64
|
| 1398 |
+
"""
|
| 1399 |
+
|
| 1400 |
+
|
| 1401 |
+
def float2ull_ru(f):
|
| 1402 |
+
"""
|
| 1403 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_ru.html
|
| 1404 |
+
|
| 1405 |
+
:param f: Argument.
|
| 1406 |
+
:type f: float32
|
| 1407 |
+
:rtype: int64
|
| 1408 |
+
"""
|
| 1409 |
+
|
| 1410 |
+
|
| 1411 |
+
def float2ull_rz(f):
|
| 1412 |
+
"""
|
| 1413 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rz.html
|
| 1414 |
+
|
| 1415 |
+
:param f: Argument.
|
| 1416 |
+
:type f: float32
|
| 1417 |
+
:rtype: int64
|
| 1418 |
+
"""
|
| 1419 |
+
|
| 1420 |
+
|
| 1421 |
+
def float_as_int(x):
|
| 1422 |
+
"""
|
| 1423 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float_as_int.html
|
| 1424 |
+
|
| 1425 |
+
:param x: Argument.
|
| 1426 |
+
:type x: float32
|
| 1427 |
+
:rtype: int32
|
| 1428 |
+
"""
|
| 1429 |
+
|
| 1430 |
+
|
| 1431 |
+
def floor(f):
|
| 1432 |
+
"""
|
| 1433 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floor.html
|
| 1434 |
+
|
| 1435 |
+
:param f: Argument.
|
| 1436 |
+
:type f: float64
|
| 1437 |
+
:rtype: float64
|
| 1438 |
+
"""
|
| 1439 |
+
|
| 1440 |
+
|
| 1441 |
+
def floorf(f):
|
| 1442 |
+
"""
|
| 1443 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floorf.html
|
| 1444 |
+
|
| 1445 |
+
:param f: Argument.
|
| 1446 |
+
:type f: float32
|
| 1447 |
+
:rtype: float32
|
| 1448 |
+
"""
|
| 1449 |
+
|
| 1450 |
+
|
| 1451 |
+
def fma(x, y, z):
|
| 1452 |
+
"""
|
| 1453 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma.html
|
| 1454 |
+
|
| 1455 |
+
:param x: Argument.
|
| 1456 |
+
:type x: float64
|
| 1457 |
+
:param y: Argument.
|
| 1458 |
+
:type y: float64
|
| 1459 |
+
:param z: Argument.
|
| 1460 |
+
:type z: float64
|
| 1461 |
+
:rtype: float64
|
| 1462 |
+
"""
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
def fma_rd(x, y, z):
|
| 1466 |
+
"""
|
| 1467 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rd.html
|
| 1468 |
+
|
| 1469 |
+
:param x: Argument.
|
| 1470 |
+
:type x: float64
|
| 1471 |
+
:param y: Argument.
|
| 1472 |
+
:type y: float64
|
| 1473 |
+
:param z: Argument.
|
| 1474 |
+
:type z: float64
|
| 1475 |
+
:rtype: float64
|
| 1476 |
+
"""
|
| 1477 |
+
|
| 1478 |
+
|
| 1479 |
+
def fma_rn(x, y, z):
|
| 1480 |
+
"""
|
| 1481 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rn.html
|
| 1482 |
+
|
| 1483 |
+
:param x: Argument.
|
| 1484 |
+
:type x: float64
|
| 1485 |
+
:param y: Argument.
|
| 1486 |
+
:type y: float64
|
| 1487 |
+
:param z: Argument.
|
| 1488 |
+
:type z: float64
|
| 1489 |
+
:rtype: float64
|
| 1490 |
+
"""
|
| 1491 |
+
|
| 1492 |
+
|
| 1493 |
+
def fma_ru(x, y, z):
|
| 1494 |
+
"""
|
| 1495 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_ru.html
|
| 1496 |
+
|
| 1497 |
+
:param x: Argument.
|
| 1498 |
+
:type x: float64
|
| 1499 |
+
:param y: Argument.
|
| 1500 |
+
:type y: float64
|
| 1501 |
+
:param z: Argument.
|
| 1502 |
+
:type z: float64
|
| 1503 |
+
:rtype: float64
|
| 1504 |
+
"""
|
| 1505 |
+
|
| 1506 |
+
|
| 1507 |
+
def fma_rz(x, y, z):
|
| 1508 |
+
"""
|
| 1509 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rz.html
|
| 1510 |
+
|
| 1511 |
+
:param x: Argument.
|
| 1512 |
+
:type x: float64
|
| 1513 |
+
:param y: Argument.
|
| 1514 |
+
:type y: float64
|
| 1515 |
+
:param z: Argument.
|
| 1516 |
+
:type z: float64
|
| 1517 |
+
:rtype: float64
|
| 1518 |
+
"""
|
| 1519 |
+
|
| 1520 |
+
|
| 1521 |
+
def fmaf(x, y, z):
|
| 1522 |
+
"""
|
| 1523 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf.html
|
| 1524 |
+
|
| 1525 |
+
:param x: Argument.
|
| 1526 |
+
:type x: float32
|
| 1527 |
+
:param y: Argument.
|
| 1528 |
+
:type y: float32
|
| 1529 |
+
:param z: Argument.
|
| 1530 |
+
:type z: float32
|
| 1531 |
+
:rtype: float32
|
| 1532 |
+
"""
|
| 1533 |
+
|
| 1534 |
+
|
| 1535 |
+
def fmaf_rd(x, y, z):
|
| 1536 |
+
"""
|
| 1537 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rd.html
|
| 1538 |
+
|
| 1539 |
+
:param x: Argument.
|
| 1540 |
+
:type x: float32
|
| 1541 |
+
:param y: Argument.
|
| 1542 |
+
:type y: float32
|
| 1543 |
+
:param z: Argument.
|
| 1544 |
+
:type z: float32
|
| 1545 |
+
:rtype: float32
|
| 1546 |
+
"""
|
| 1547 |
+
|
| 1548 |
+
|
| 1549 |
+
def fmaf_rn(x, y, z):
|
| 1550 |
+
"""
|
| 1551 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rn.html
|
| 1552 |
+
|
| 1553 |
+
:param x: Argument.
|
| 1554 |
+
:type x: float32
|
| 1555 |
+
:param y: Argument.
|
| 1556 |
+
:type y: float32
|
| 1557 |
+
:param z: Argument.
|
| 1558 |
+
:type z: float32
|
| 1559 |
+
:rtype: float32
|
| 1560 |
+
"""
|
| 1561 |
+
|
| 1562 |
+
|
| 1563 |
+
def fmaf_ru(x, y, z):
|
| 1564 |
+
"""
|
| 1565 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_ru.html
|
| 1566 |
+
|
| 1567 |
+
:param x: Argument.
|
| 1568 |
+
:type x: float32
|
| 1569 |
+
:param y: Argument.
|
| 1570 |
+
:type y: float32
|
| 1571 |
+
:param z: Argument.
|
| 1572 |
+
:type z: float32
|
| 1573 |
+
:rtype: float32
|
| 1574 |
+
"""
|
| 1575 |
+
|
| 1576 |
+
|
| 1577 |
+
def fmaf_rz(x, y, z):
|
| 1578 |
+
"""
|
| 1579 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rz.html
|
| 1580 |
+
|
| 1581 |
+
:param x: Argument.
|
| 1582 |
+
:type x: float32
|
| 1583 |
+
:param y: Argument.
|
| 1584 |
+
:type y: float32
|
| 1585 |
+
:param z: Argument.
|
| 1586 |
+
:type z: float32
|
| 1587 |
+
:rtype: float32
|
| 1588 |
+
"""
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
def fmax(x, y):
|
| 1592 |
+
"""
|
| 1593 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmax.html
|
| 1594 |
+
|
| 1595 |
+
:param x: Argument.
|
| 1596 |
+
:type x: float64
|
| 1597 |
+
:param y: Argument.
|
| 1598 |
+
:type y: float64
|
| 1599 |
+
:rtype: float64
|
| 1600 |
+
"""
|
| 1601 |
+
|
| 1602 |
+
|
| 1603 |
+
def fmaxf(x, y):
|
| 1604 |
+
"""
|
| 1605 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaxf.html
|
| 1606 |
+
|
| 1607 |
+
:param x: Argument.
|
| 1608 |
+
:type x: float32
|
| 1609 |
+
:param y: Argument.
|
| 1610 |
+
:type y: float32
|
| 1611 |
+
:rtype: float32
|
| 1612 |
+
"""
|
| 1613 |
+
|
| 1614 |
+
|
| 1615 |
+
def fmin(x, y):
|
| 1616 |
+
"""
|
| 1617 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmin.html
|
| 1618 |
+
|
| 1619 |
+
:param x: Argument.
|
| 1620 |
+
:type x: float64
|
| 1621 |
+
:param y: Argument.
|
| 1622 |
+
:type y: float64
|
| 1623 |
+
:rtype: float64
|
| 1624 |
+
"""
|
| 1625 |
+
|
| 1626 |
+
|
| 1627 |
+
def fminf(x, y):
|
| 1628 |
+
"""
|
| 1629 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fminf.html
|
| 1630 |
+
|
| 1631 |
+
:param x: Argument.
|
| 1632 |
+
:type x: float32
|
| 1633 |
+
:param y: Argument.
|
| 1634 |
+
:type y: float32
|
| 1635 |
+
:rtype: float32
|
| 1636 |
+
"""
|
| 1637 |
+
|
| 1638 |
+
|
| 1639 |
+
def fmod(x, y):
|
| 1640 |
+
"""
|
| 1641 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmod.html
|
| 1642 |
+
|
| 1643 |
+
:param x: Argument.
|
| 1644 |
+
:type x: float64
|
| 1645 |
+
:param y: Argument.
|
| 1646 |
+
:type y: float64
|
| 1647 |
+
:rtype: float64
|
| 1648 |
+
"""
|
| 1649 |
+
|
| 1650 |
+
|
| 1651 |
+
def fmodf(x, y):
|
| 1652 |
+
"""
|
| 1653 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmodf.html
|
| 1654 |
+
|
| 1655 |
+
:param x: Argument.
|
| 1656 |
+
:type x: float32
|
| 1657 |
+
:param y: Argument.
|
| 1658 |
+
:type y: float32
|
| 1659 |
+
:rtype: float32
|
| 1660 |
+
"""
|
| 1661 |
+
|
| 1662 |
+
|
| 1663 |
+
def fmul_rd(x, y):
|
| 1664 |
+
"""
|
| 1665 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rd.html
|
| 1666 |
+
|
| 1667 |
+
:param x: Argument.
|
| 1668 |
+
:type x: float32
|
| 1669 |
+
:param y: Argument.
|
| 1670 |
+
:type y: float32
|
| 1671 |
+
:rtype: float32
|
| 1672 |
+
"""
|
| 1673 |
+
|
| 1674 |
+
|
| 1675 |
+
def fmul_rn(x, y):
|
| 1676 |
+
"""
|
| 1677 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rn.html
|
| 1678 |
+
|
| 1679 |
+
:param x: Argument.
|
| 1680 |
+
:type x: float32
|
| 1681 |
+
:param y: Argument.
|
| 1682 |
+
:type y: float32
|
| 1683 |
+
:rtype: float32
|
| 1684 |
+
"""
|
| 1685 |
+
|
| 1686 |
+
|
| 1687 |
+
def fmul_ru(x, y):
|
| 1688 |
+
"""
|
| 1689 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_ru.html
|
| 1690 |
+
|
| 1691 |
+
:param x: Argument.
|
| 1692 |
+
:type x: float32
|
| 1693 |
+
:param y: Argument.
|
| 1694 |
+
:type y: float32
|
| 1695 |
+
:rtype: float32
|
| 1696 |
+
"""
|
| 1697 |
+
|
| 1698 |
+
|
| 1699 |
+
def fmul_rz(x, y):
|
| 1700 |
+
"""
|
| 1701 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rz.html
|
| 1702 |
+
|
| 1703 |
+
:param x: Argument.
|
| 1704 |
+
:type x: float32
|
| 1705 |
+
:param y: Argument.
|
| 1706 |
+
:type y: float32
|
| 1707 |
+
:rtype: float32
|
| 1708 |
+
"""
|
| 1709 |
+
|
| 1710 |
+
|
| 1711 |
+
def frcp_rd(x):
|
| 1712 |
+
"""
|
| 1713 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rd.html
|
| 1714 |
+
|
| 1715 |
+
:param x: Argument.
|
| 1716 |
+
:type x: float32
|
| 1717 |
+
:rtype: float32
|
| 1718 |
+
"""
|
| 1719 |
+
|
| 1720 |
+
|
| 1721 |
+
def frcp_rn(x):
|
| 1722 |
+
"""
|
| 1723 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rn.html
|
| 1724 |
+
|
| 1725 |
+
:param x: Argument.
|
| 1726 |
+
:type x: float32
|
| 1727 |
+
:rtype: float32
|
| 1728 |
+
"""
|
| 1729 |
+
|
| 1730 |
+
|
| 1731 |
+
def frcp_ru(x):
|
| 1732 |
+
"""
|
| 1733 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_ru.html
|
| 1734 |
+
|
| 1735 |
+
:param x: Argument.
|
| 1736 |
+
:type x: float32
|
| 1737 |
+
:rtype: float32
|
| 1738 |
+
"""
|
| 1739 |
+
|
| 1740 |
+
|
| 1741 |
+
def frcp_rz(x):
|
| 1742 |
+
"""
|
| 1743 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rz.html
|
| 1744 |
+
|
| 1745 |
+
:param x: Argument.
|
| 1746 |
+
:type x: float32
|
| 1747 |
+
:rtype: float32
|
| 1748 |
+
"""
|
| 1749 |
+
|
| 1750 |
+
|
| 1751 |
+
def frexp(x):
|
| 1752 |
+
"""
|
| 1753 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexp.html
|
| 1754 |
+
|
| 1755 |
+
:param x: Argument.
|
| 1756 |
+
:type x: float64
|
| 1757 |
+
:rtype: Tuple(float64, int32)
|
| 1758 |
+
"""
|
| 1759 |
+
|
| 1760 |
+
|
| 1761 |
+
def frexpf(x):
|
| 1762 |
+
"""
|
| 1763 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexpf.html
|
| 1764 |
+
|
| 1765 |
+
:param x: Argument.
|
| 1766 |
+
:type x: float32
|
| 1767 |
+
:rtype: Tuple(float32, int32)
|
| 1768 |
+
"""
|
| 1769 |
+
|
| 1770 |
+
|
| 1771 |
+
def frsqrt_rn(x):
|
| 1772 |
+
"""
|
| 1773 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frsqrt_rn.html
|
| 1774 |
+
|
| 1775 |
+
:param x: Argument.
|
| 1776 |
+
:type x: float32
|
| 1777 |
+
:rtype: float32
|
| 1778 |
+
"""
|
| 1779 |
+
|
| 1780 |
+
|
| 1781 |
+
def fsqrt_rd(x):
|
| 1782 |
+
"""
|
| 1783 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rd.html
|
| 1784 |
+
|
| 1785 |
+
:param x: Argument.
|
| 1786 |
+
:type x: float32
|
| 1787 |
+
:rtype: float32
|
| 1788 |
+
"""
|
| 1789 |
+
|
| 1790 |
+
|
| 1791 |
+
def fsqrt_rn(x):
|
| 1792 |
+
"""
|
| 1793 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rn.html
|
| 1794 |
+
|
| 1795 |
+
:param x: Argument.
|
| 1796 |
+
:type x: float32
|
| 1797 |
+
:rtype: float32
|
| 1798 |
+
"""
|
| 1799 |
+
|
| 1800 |
+
|
| 1801 |
+
def fsqrt_ru(x):
|
| 1802 |
+
"""
|
| 1803 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_ru.html
|
| 1804 |
+
|
| 1805 |
+
:param x: Argument.
|
| 1806 |
+
:type x: float32
|
| 1807 |
+
:rtype: float32
|
| 1808 |
+
"""
|
| 1809 |
+
|
| 1810 |
+
|
| 1811 |
+
def fsqrt_rz(x):
|
| 1812 |
+
"""
|
| 1813 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rz.html
|
| 1814 |
+
|
| 1815 |
+
:param x: Argument.
|
| 1816 |
+
:type x: float32
|
| 1817 |
+
:rtype: float32
|
| 1818 |
+
"""
|
| 1819 |
+
|
| 1820 |
+
|
| 1821 |
+
def fsub_rd(x, y):
|
| 1822 |
+
"""
|
| 1823 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rd.html
|
| 1824 |
+
|
| 1825 |
+
:param x: Argument.
|
| 1826 |
+
:type x: float32
|
| 1827 |
+
:param y: Argument.
|
| 1828 |
+
:type y: float32
|
| 1829 |
+
:rtype: float32
|
| 1830 |
+
"""
|
| 1831 |
+
|
| 1832 |
+
|
| 1833 |
+
def fsub_rn(x, y):
|
| 1834 |
+
"""
|
| 1835 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rn.html
|
| 1836 |
+
|
| 1837 |
+
:param x: Argument.
|
| 1838 |
+
:type x: float32
|
| 1839 |
+
:param y: Argument.
|
| 1840 |
+
:type y: float32
|
| 1841 |
+
:rtype: float32
|
| 1842 |
+
"""
|
| 1843 |
+
|
| 1844 |
+
|
| 1845 |
+
def fsub_ru(x, y):
|
| 1846 |
+
"""
|
| 1847 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_ru.html
|
| 1848 |
+
|
| 1849 |
+
:param x: Argument.
|
| 1850 |
+
:type x: float32
|
| 1851 |
+
:param y: Argument.
|
| 1852 |
+
:type y: float32
|
| 1853 |
+
:rtype: float32
|
| 1854 |
+
"""
|
| 1855 |
+
|
| 1856 |
+
|
| 1857 |
+
def fsub_rz(x, y):
|
| 1858 |
+
"""
|
| 1859 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rz.html
|
| 1860 |
+
|
| 1861 |
+
:param x: Argument.
|
| 1862 |
+
:type x: float32
|
| 1863 |
+
:param y: Argument.
|
| 1864 |
+
:type y: float32
|
| 1865 |
+
:rtype: float32
|
| 1866 |
+
"""
|
| 1867 |
+
|
| 1868 |
+
|
| 1869 |
+
def hadd(x, y):
|
| 1870 |
+
"""
|
| 1871 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hadd.html
|
| 1872 |
+
|
| 1873 |
+
:param x: Argument.
|
| 1874 |
+
:type x: int32
|
| 1875 |
+
:param y: Argument.
|
| 1876 |
+
:type y: int32
|
| 1877 |
+
:rtype: int32
|
| 1878 |
+
"""
|
| 1879 |
+
|
| 1880 |
+
|
| 1881 |
+
def half2float(h):
|
| 1882 |
+
"""
|
| 1883 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_half2float.html
|
| 1884 |
+
|
| 1885 |
+
:param h: Argument.
|
| 1886 |
+
:type h: int16
|
| 1887 |
+
:rtype: float32
|
| 1888 |
+
"""
|
| 1889 |
+
|
| 1890 |
+
|
| 1891 |
+
def hiloint2double(x, y):
|
| 1892 |
+
"""
|
| 1893 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hiloint2double.html
|
| 1894 |
+
|
| 1895 |
+
:param x: Argument.
|
| 1896 |
+
:type x: int32
|
| 1897 |
+
:param y: Argument.
|
| 1898 |
+
:type y: int32
|
| 1899 |
+
:rtype: float64
|
| 1900 |
+
"""
|
| 1901 |
+
|
| 1902 |
+
|
| 1903 |
+
def hypot(x, y):
|
| 1904 |
+
"""
|
| 1905 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hypot.html
|
| 1906 |
+
|
| 1907 |
+
:param x: Argument.
|
| 1908 |
+
:type x: float64
|
| 1909 |
+
:param y: Argument.
|
| 1910 |
+
:type y: float64
|
| 1911 |
+
:rtype: float64
|
| 1912 |
+
"""
|
| 1913 |
+
|
| 1914 |
+
|
| 1915 |
+
def hypotf(x, y):
|
| 1916 |
+
"""
|
| 1917 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hypotf.html
|
| 1918 |
+
|
| 1919 |
+
:param x: Argument.
|
| 1920 |
+
:type x: float32
|
| 1921 |
+
:param y: Argument.
|
| 1922 |
+
:type y: float32
|
| 1923 |
+
:rtype: float32
|
| 1924 |
+
"""
|
| 1925 |
+
|
| 1926 |
+
|
| 1927 |
+
def ilogb(x):
|
| 1928 |
+
"""
|
| 1929 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ilogb.html
|
| 1930 |
+
|
| 1931 |
+
:param x: Argument.
|
| 1932 |
+
:type x: float64
|
| 1933 |
+
:rtype: int32
|
| 1934 |
+
"""
|
| 1935 |
+
|
| 1936 |
+
|
| 1937 |
+
def ilogbf(x):
|
| 1938 |
+
"""
|
| 1939 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ilogbf.html
|
| 1940 |
+
|
| 1941 |
+
:param x: Argument.
|
| 1942 |
+
:type x: float32
|
| 1943 |
+
:rtype: int32
|
| 1944 |
+
"""
|
| 1945 |
+
|
| 1946 |
+
|
| 1947 |
+
def int2double_rn(i):
|
| 1948 |
+
"""
|
| 1949 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2double_rn.html
|
| 1950 |
+
|
| 1951 |
+
:param i: Argument.
|
| 1952 |
+
:type i: int32
|
| 1953 |
+
:rtype: float64
|
| 1954 |
+
"""
|
| 1955 |
+
|
| 1956 |
+
|
| 1957 |
+
def int2float_rd(x):
|
| 1958 |
+
"""
|
| 1959 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rd.html
|
| 1960 |
+
|
| 1961 |
+
:param in: Argument.
|
| 1962 |
+
:type in: int32
|
| 1963 |
+
:rtype: float32
|
| 1964 |
+
"""
|
| 1965 |
+
|
| 1966 |
+
|
| 1967 |
+
def int2float_rn(x):
|
| 1968 |
+
"""
|
| 1969 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rn.html
|
| 1970 |
+
|
| 1971 |
+
:param in: Argument.
|
| 1972 |
+
:type in: int32
|
| 1973 |
+
:rtype: float32
|
| 1974 |
+
"""
|
| 1975 |
+
|
| 1976 |
+
|
| 1977 |
+
def int2float_ru(x):
|
| 1978 |
+
"""
|
| 1979 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_ru.html
|
| 1980 |
+
|
| 1981 |
+
:param in: Argument.
|
| 1982 |
+
:type in: int32
|
| 1983 |
+
:rtype: float32
|
| 1984 |
+
"""
|
| 1985 |
+
|
| 1986 |
+
|
| 1987 |
+
def int2float_rz(x):
|
| 1988 |
+
"""
|
| 1989 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rz.html
|
| 1990 |
+
|
| 1991 |
+
:param in: Argument.
|
| 1992 |
+
:type in: int32
|
| 1993 |
+
:rtype: float32
|
| 1994 |
+
"""
|
| 1995 |
+
|
| 1996 |
+
|
| 1997 |
+
def int_as_float(x):
|
| 1998 |
+
"""
|
| 1999 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int_as_float.html
|
| 2000 |
+
|
| 2001 |
+
:param x: Argument.
|
| 2002 |
+
:type x: int32
|
| 2003 |
+
:rtype: float32
|
| 2004 |
+
"""
|
| 2005 |
+
|
| 2006 |
+
|
| 2007 |
+
def isfinited(x):
|
| 2008 |
+
"""
|
| 2009 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isfinited.html
|
| 2010 |
+
|
| 2011 |
+
:param x: Argument.
|
| 2012 |
+
:type x: float64
|
| 2013 |
+
:rtype: int32
|
| 2014 |
+
"""
|
| 2015 |
+
|
| 2016 |
+
|
| 2017 |
+
def isinfd(x):
|
| 2018 |
+
"""
|
| 2019 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isinfd.html
|
| 2020 |
+
|
| 2021 |
+
:param x: Argument.
|
| 2022 |
+
:type x: float64
|
| 2023 |
+
:rtype: int32
|
| 2024 |
+
"""
|
| 2025 |
+
|
| 2026 |
+
|
| 2027 |
+
def isinff(x):
|
| 2028 |
+
"""
|
| 2029 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isinff.html
|
| 2030 |
+
|
| 2031 |
+
:param x: Argument.
|
| 2032 |
+
:type x: float32
|
| 2033 |
+
:rtype: int32
|
| 2034 |
+
"""
|
| 2035 |
+
|
| 2036 |
+
|
| 2037 |
+
def isnand(x):
|
| 2038 |
+
"""
|
| 2039 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isnand.html
|
| 2040 |
+
|
| 2041 |
+
:param x: Argument.
|
| 2042 |
+
:type x: float64
|
| 2043 |
+
:rtype: int32
|
| 2044 |
+
"""
|
| 2045 |
+
|
| 2046 |
+
|
| 2047 |
+
def isnanf(x):
|
| 2048 |
+
"""
|
| 2049 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isnanf.html
|
| 2050 |
+
|
| 2051 |
+
:param x: Argument.
|
| 2052 |
+
:type x: float32
|
| 2053 |
+
:rtype: int32
|
| 2054 |
+
"""
|
| 2055 |
+
|
| 2056 |
+
|
| 2057 |
+
def j0(x):
|
| 2058 |
+
"""
|
| 2059 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j0.html
|
| 2060 |
+
|
| 2061 |
+
:param x: Argument.
|
| 2062 |
+
:type x: float64
|
| 2063 |
+
:rtype: float64
|
| 2064 |
+
"""
|
| 2065 |
+
|
| 2066 |
+
|
| 2067 |
+
def j0f(x):
|
| 2068 |
+
"""
|
| 2069 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j0f.html
|
| 2070 |
+
|
| 2071 |
+
:param x: Argument.
|
| 2072 |
+
:type x: float32
|
| 2073 |
+
:rtype: float32
|
| 2074 |
+
"""
|
| 2075 |
+
|
| 2076 |
+
|
| 2077 |
+
def j1(x):
|
| 2078 |
+
"""
|
| 2079 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j1.html
|
| 2080 |
+
|
| 2081 |
+
:param x: Argument.
|
| 2082 |
+
:type x: float64
|
| 2083 |
+
:rtype: float64
|
| 2084 |
+
"""
|
| 2085 |
+
|
| 2086 |
+
|
| 2087 |
+
def j1f(x):
|
| 2088 |
+
"""
|
| 2089 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j1f.html
|
| 2090 |
+
|
| 2091 |
+
:param x: Argument.
|
| 2092 |
+
:type x: float32
|
| 2093 |
+
:rtype: float32
|
| 2094 |
+
"""
|
| 2095 |
+
|
| 2096 |
+
|
| 2097 |
+
def jn(n, x):
|
| 2098 |
+
"""
|
| 2099 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_jn.html
|
| 2100 |
+
|
| 2101 |
+
:param n: Argument.
|
| 2102 |
+
:type n: int32
|
| 2103 |
+
:param x: Argument.
|
| 2104 |
+
:type x: float64
|
| 2105 |
+
:rtype: float64
|
| 2106 |
+
"""
|
| 2107 |
+
|
| 2108 |
+
|
| 2109 |
+
def jnf(n, x):
|
| 2110 |
+
"""
|
| 2111 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_jnf.html
|
| 2112 |
+
|
| 2113 |
+
:param n: Argument.
|
| 2114 |
+
:type n: int32
|
| 2115 |
+
:param x: Argument.
|
| 2116 |
+
:type x: float32
|
| 2117 |
+
:rtype: float32
|
| 2118 |
+
"""
|
| 2119 |
+
|
| 2120 |
+
|
| 2121 |
+
def ldexp(x, y):
|
| 2122 |
+
"""
|
| 2123 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ldexp.html
|
| 2124 |
+
|
| 2125 |
+
:param x: Argument.
|
| 2126 |
+
:type x: float64
|
| 2127 |
+
:param y: Argument.
|
| 2128 |
+
:type y: int32
|
| 2129 |
+
:rtype: float64
|
| 2130 |
+
"""
|
| 2131 |
+
|
| 2132 |
+
|
| 2133 |
+
def ldexpf(x, y):
|
| 2134 |
+
"""
|
| 2135 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ldexpf.html
|
| 2136 |
+
|
| 2137 |
+
:param x: Argument.
|
| 2138 |
+
:type x: float32
|
| 2139 |
+
:param y: Argument.
|
| 2140 |
+
:type y: int32
|
| 2141 |
+
:rtype: float32
|
| 2142 |
+
"""
|
| 2143 |
+
|
| 2144 |
+
|
| 2145 |
+
def lgamma(x):
|
| 2146 |
+
"""
|
| 2147 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_lgamma.html
|
| 2148 |
+
|
| 2149 |
+
:param x: Argument.
|
| 2150 |
+
:type x: float64
|
| 2151 |
+
:rtype: float64
|
| 2152 |
+
"""
|
| 2153 |
+
|
| 2154 |
+
|
| 2155 |
+
def lgammaf(x):
|
| 2156 |
+
"""
|
| 2157 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_lgammaf.html
|
| 2158 |
+
|
| 2159 |
+
:param x: Argument.
|
| 2160 |
+
:type x: float32
|
| 2161 |
+
:rtype: float32
|
| 2162 |
+
"""
|
| 2163 |
+
|
| 2164 |
+
|
| 2165 |
+
def ll2double_rd(l):
|
| 2166 |
+
"""
|
| 2167 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rd.html
|
| 2168 |
+
|
| 2169 |
+
:param l: Argument.
|
| 2170 |
+
:type l: int64
|
| 2171 |
+
:rtype: float64
|
| 2172 |
+
"""
|
| 2173 |
+
|
| 2174 |
+
|
| 2175 |
+
def ll2double_rn(l):
|
| 2176 |
+
"""
|
| 2177 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rn.html
|
| 2178 |
+
|
| 2179 |
+
:param l: Argument.
|
| 2180 |
+
:type l: int64
|
| 2181 |
+
:rtype: float64
|
| 2182 |
+
"""
|
| 2183 |
+
|
| 2184 |
+
|
| 2185 |
+
def ll2double_ru(l):
|
| 2186 |
+
"""
|
| 2187 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_ru.html
|
| 2188 |
+
|
| 2189 |
+
:param l: Argument.
|
| 2190 |
+
:type l: int64
|
| 2191 |
+
:rtype: float64
|
| 2192 |
+
"""
|
| 2193 |
+
|
| 2194 |
+
|
| 2195 |
+
def ll2double_rz(l):
|
| 2196 |
+
"""
|
| 2197 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rz.html
|
| 2198 |
+
|
| 2199 |
+
:param l: Argument.
|
| 2200 |
+
:type l: int64
|
| 2201 |
+
:rtype: float64
|
| 2202 |
+
"""
|
| 2203 |
+
|
| 2204 |
+
|
| 2205 |
+
def ll2float_rd(l):
|
| 2206 |
+
"""
|
| 2207 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rd.html
|
| 2208 |
+
|
| 2209 |
+
:param l: Argument.
|
| 2210 |
+
:type l: int64
|
| 2211 |
+
:rtype: float32
|
| 2212 |
+
"""
|
| 2213 |
+
|
| 2214 |
+
|
| 2215 |
+
def ll2float_rn(l):
|
| 2216 |
+
"""
|
| 2217 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rn.html
|
| 2218 |
+
|
| 2219 |
+
:param l: Argument.
|
| 2220 |
+
:type l: int64
|
| 2221 |
+
:rtype: float32
|
| 2222 |
+
"""
|
| 2223 |
+
|
| 2224 |
+
|
| 2225 |
+
def ll2float_ru(l):
|
| 2226 |
+
"""
|
| 2227 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_ru.html
|
| 2228 |
+
|
| 2229 |
+
:param l: Argument.
|
| 2230 |
+
:type l: int64
|
| 2231 |
+
:rtype: float32
|
| 2232 |
+
"""
|
| 2233 |
+
|
| 2234 |
+
|
| 2235 |
+
def ll2float_rz(l):
|
| 2236 |
+
"""
|
| 2237 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rz.html
|
| 2238 |
+
|
| 2239 |
+
:param l: Argument.
|
| 2240 |
+
:type l: int64
|
| 2241 |
+
:rtype: float32
|
| 2242 |
+
"""
|
| 2243 |
+
|
| 2244 |
+
|
| 2245 |
+
def llabs(x):
|
| 2246 |
+
"""
|
| 2247 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llabs.html
|
| 2248 |
+
|
| 2249 |
+
:param x: Argument.
|
| 2250 |
+
:type x: int64
|
| 2251 |
+
:rtype: int64
|
| 2252 |
+
"""
|
| 2253 |
+
|
| 2254 |
+
|
| 2255 |
+
def llmax(x, y):
|
| 2256 |
+
"""
|
| 2257 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llmax.html
|
| 2258 |
+
|
| 2259 |
+
:param x: Argument.
|
| 2260 |
+
:type x: int64
|
| 2261 |
+
:param y: Argument.
|
| 2262 |
+
:type y: int64
|
| 2263 |
+
:rtype: int64
|
| 2264 |
+
"""
|
| 2265 |
+
|
| 2266 |
+
|
| 2267 |
+
def llmin(x, y):
|
| 2268 |
+
"""
|
| 2269 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llmin.html
|
| 2270 |
+
|
| 2271 |
+
:param x: Argument.
|
| 2272 |
+
:type x: int64
|
| 2273 |
+
:param y: Argument.
|
| 2274 |
+
:type y: int64
|
| 2275 |
+
:rtype: int64
|
| 2276 |
+
"""
|
| 2277 |
+
|
| 2278 |
+
|
| 2279 |
+
def llrint(x):
|
| 2280 |
+
"""
|
| 2281 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llrint.html
|
| 2282 |
+
|
| 2283 |
+
:param x: Argument.
|
| 2284 |
+
:type x: float64
|
| 2285 |
+
:rtype: int64
|
| 2286 |
+
"""
|
| 2287 |
+
|
| 2288 |
+
|
| 2289 |
+
def llrintf(x):
|
| 2290 |
+
"""
|
| 2291 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llrintf.html
|
| 2292 |
+
|
| 2293 |
+
:param x: Argument.
|
| 2294 |
+
:type x: float32
|
| 2295 |
+
:rtype: int64
|
| 2296 |
+
"""
|
| 2297 |
+
|
| 2298 |
+
|
| 2299 |
+
def llround(x):
|
| 2300 |
+
"""
|
| 2301 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llround.html
|
| 2302 |
+
|
| 2303 |
+
:param x: Argument.
|
| 2304 |
+
:type x: float64
|
| 2305 |
+
:rtype: int64
|
| 2306 |
+
"""
|
| 2307 |
+
|
| 2308 |
+
|
| 2309 |
+
def llroundf(x):
|
| 2310 |
+
"""
|
| 2311 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llroundf.html
|
| 2312 |
+
|
| 2313 |
+
:param x: Argument.
|
| 2314 |
+
:type x: float32
|
| 2315 |
+
:rtype: int64
|
| 2316 |
+
"""
|
| 2317 |
+
|
| 2318 |
+
|
| 2319 |
+
def log(x):
|
| 2320 |
+
"""
|
| 2321 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log.html
|
| 2322 |
+
|
| 2323 |
+
:param x: Argument.
|
| 2324 |
+
:type x: float64
|
| 2325 |
+
:rtype: float64
|
| 2326 |
+
"""
|
| 2327 |
+
|
| 2328 |
+
|
| 2329 |
+
def log10(x):
|
| 2330 |
+
"""
|
| 2331 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log10.html
|
| 2332 |
+
|
| 2333 |
+
:param x: Argument.
|
| 2334 |
+
:type x: float64
|
| 2335 |
+
:rtype: float64
|
| 2336 |
+
"""
|
| 2337 |
+
|
| 2338 |
+
|
| 2339 |
+
def log10f(x):
|
| 2340 |
+
"""
|
| 2341 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log10f.html
|
| 2342 |
+
|
| 2343 |
+
:param x: Argument.
|
| 2344 |
+
:type x: float32
|
| 2345 |
+
:rtype: float32
|
| 2346 |
+
"""
|
| 2347 |
+
|
| 2348 |
+
|
| 2349 |
+
def log1p(x):
|
| 2350 |
+
"""
|
| 2351 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log1p.html
|
| 2352 |
+
|
| 2353 |
+
:param x: Argument.
|
| 2354 |
+
:type x: float64
|
| 2355 |
+
:rtype: float64
|
| 2356 |
+
"""
|
| 2357 |
+
|
| 2358 |
+
|
| 2359 |
+
def log1pf(x):
|
| 2360 |
+
"""
|
| 2361 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log1pf.html
|
| 2362 |
+
|
| 2363 |
+
:param x: Argument.
|
| 2364 |
+
:type x: float32
|
| 2365 |
+
:rtype: float32
|
| 2366 |
+
"""
|
| 2367 |
+
|
| 2368 |
+
|
| 2369 |
+
def log2(x):
|
| 2370 |
+
"""
|
| 2371 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log2.html
|
| 2372 |
+
|
| 2373 |
+
:param x: Argument.
|
| 2374 |
+
:type x: float64
|
| 2375 |
+
:rtype: float64
|
| 2376 |
+
"""
|
| 2377 |
+
|
| 2378 |
+
|
| 2379 |
+
def log2f(x):
|
| 2380 |
+
"""
|
| 2381 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log2f.html
|
| 2382 |
+
|
| 2383 |
+
:param x: Argument.
|
| 2384 |
+
:type x: float32
|
| 2385 |
+
:rtype: float32
|
| 2386 |
+
"""
|
| 2387 |
+
|
| 2388 |
+
|
| 2389 |
+
def logb(x):
|
| 2390 |
+
"""
|
| 2391 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logb.html
|
| 2392 |
+
|
| 2393 |
+
:param x: Argument.
|
| 2394 |
+
:type x: float64
|
| 2395 |
+
:rtype: float64
|
| 2396 |
+
"""
|
| 2397 |
+
|
| 2398 |
+
|
| 2399 |
+
def logbf(x):
|
| 2400 |
+
"""
|
| 2401 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logbf.html
|
| 2402 |
+
|
| 2403 |
+
:param x: Argument.
|
| 2404 |
+
:type x: float32
|
| 2405 |
+
:rtype: float32
|
| 2406 |
+
"""
|
| 2407 |
+
|
| 2408 |
+
|
| 2409 |
+
def logf(x):
|
| 2410 |
+
"""
|
| 2411 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logf.html
|
| 2412 |
+
|
| 2413 |
+
:param x: Argument.
|
| 2414 |
+
:type x: float32
|
| 2415 |
+
:rtype: float32
|
| 2416 |
+
"""
|
| 2417 |
+
|
| 2418 |
+
|
| 2419 |
+
def longlong_as_double(x):
|
| 2420 |
+
"""
|
| 2421 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_longlong_as_double.html
|
| 2422 |
+
|
| 2423 |
+
:param x: Argument.
|
| 2424 |
+
:type x: int64
|
| 2425 |
+
:rtype: float64
|
| 2426 |
+
"""
|
| 2427 |
+
|
| 2428 |
+
|
| 2429 |
+
def max(x, y):
|
| 2430 |
+
"""
|
| 2431 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_max.html
|
| 2432 |
+
|
| 2433 |
+
:param x: Argument.
|
| 2434 |
+
:type x: int32
|
| 2435 |
+
:param y: Argument.
|
| 2436 |
+
:type y: int32
|
| 2437 |
+
:rtype: int32
|
| 2438 |
+
"""
|
| 2439 |
+
|
| 2440 |
+
|
| 2441 |
+
def min(x, y):
|
| 2442 |
+
"""
|
| 2443 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_min.html
|
| 2444 |
+
|
| 2445 |
+
:param x: Argument.
|
| 2446 |
+
:type x: int32
|
| 2447 |
+
:param y: Argument.
|
| 2448 |
+
:type y: int32
|
| 2449 |
+
:rtype: int32
|
| 2450 |
+
"""
|
| 2451 |
+
|
| 2452 |
+
|
| 2453 |
+
def modf(x):
|
| 2454 |
+
"""
|
| 2455 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_modf.html
|
| 2456 |
+
|
| 2457 |
+
:param x: Argument.
|
| 2458 |
+
:type x: float64
|
| 2459 |
+
:rtype: UniTuple(float64 x 2)
|
| 2460 |
+
"""
|
| 2461 |
+
|
| 2462 |
+
|
| 2463 |
+
def modff(x):
|
| 2464 |
+
"""
|
| 2465 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_modff.html
|
| 2466 |
+
|
| 2467 |
+
:param x: Argument.
|
| 2468 |
+
:type x: float32
|
| 2469 |
+
:rtype: UniTuple(float32 x 2)
|
| 2470 |
+
"""
|
| 2471 |
+
|
| 2472 |
+
|
| 2473 |
+
def mul24(x, y):
|
| 2474 |
+
"""
|
| 2475 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mul24.html
|
| 2476 |
+
|
| 2477 |
+
:param x: Argument.
|
| 2478 |
+
:type x: int32
|
| 2479 |
+
:param y: Argument.
|
| 2480 |
+
:type y: int32
|
| 2481 |
+
:rtype: int32
|
| 2482 |
+
"""
|
| 2483 |
+
|
| 2484 |
+
|
| 2485 |
+
def mul64hi(x, y):
|
| 2486 |
+
"""
|
| 2487 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mul64hi.html
|
| 2488 |
+
|
| 2489 |
+
:param x: Argument.
|
| 2490 |
+
:type x: int64
|
| 2491 |
+
:param y: Argument.
|
| 2492 |
+
:type y: int64
|
| 2493 |
+
:rtype: int64
|
| 2494 |
+
"""
|
| 2495 |
+
|
| 2496 |
+
|
| 2497 |
+
def mulhi(x, y):
|
| 2498 |
+
"""
|
| 2499 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mulhi.html
|
| 2500 |
+
|
| 2501 |
+
:param x: Argument.
|
| 2502 |
+
:type x: int32
|
| 2503 |
+
:param y: Argument.
|
| 2504 |
+
:type y: int32
|
| 2505 |
+
:rtype: int32
|
| 2506 |
+
"""
|
| 2507 |
+
|
| 2508 |
+
|
| 2509 |
+
def nearbyint(x):
|
| 2510 |
+
"""
|
| 2511 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nearbyint.html
|
| 2512 |
+
|
| 2513 |
+
:param x: Argument.
|
| 2514 |
+
:type x: float64
|
| 2515 |
+
:rtype: float64
|
| 2516 |
+
"""
|
| 2517 |
+
|
| 2518 |
+
|
| 2519 |
+
def nearbyintf(x):
|
| 2520 |
+
"""
|
| 2521 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nearbyintf.html
|
| 2522 |
+
|
| 2523 |
+
:param x: Argument.
|
| 2524 |
+
:type x: float32
|
| 2525 |
+
:rtype: float32
|
| 2526 |
+
"""
|
| 2527 |
+
|
| 2528 |
+
|
| 2529 |
+
def nextafter(x, y):
|
| 2530 |
+
"""
|
| 2531 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nextafter.html
|
| 2532 |
+
|
| 2533 |
+
:param x: Argument.
|
| 2534 |
+
:type x: float64
|
| 2535 |
+
:param y: Argument.
|
| 2536 |
+
:type y: float64
|
| 2537 |
+
:rtype: float64
|
| 2538 |
+
"""
|
| 2539 |
+
|
| 2540 |
+
|
| 2541 |
+
def nextafterf(x, y):
|
| 2542 |
+
"""
|
| 2543 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nextafterf.html
|
| 2544 |
+
|
| 2545 |
+
:param x: Argument.
|
| 2546 |
+
:type x: float32
|
| 2547 |
+
:param y: Argument.
|
| 2548 |
+
:type y: float32
|
| 2549 |
+
:rtype: float32
|
| 2550 |
+
"""
|
| 2551 |
+
|
| 2552 |
+
|
| 2553 |
+
def normcdf(x):
|
| 2554 |
+
"""
|
| 2555 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdf.html
|
| 2556 |
+
|
| 2557 |
+
:param x: Argument.
|
| 2558 |
+
:type x: float64
|
| 2559 |
+
:rtype: float64
|
| 2560 |
+
"""
|
| 2561 |
+
|
| 2562 |
+
|
| 2563 |
+
def normcdff(x):
|
| 2564 |
+
"""
|
| 2565 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdff.html
|
| 2566 |
+
|
| 2567 |
+
:param x: Argument.
|
| 2568 |
+
:type x: float32
|
| 2569 |
+
:rtype: float32
|
| 2570 |
+
"""
|
| 2571 |
+
|
| 2572 |
+
|
| 2573 |
+
def normcdfinv(x):
|
| 2574 |
+
"""
|
| 2575 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdfinv.html
|
| 2576 |
+
|
| 2577 |
+
:param x: Argument.
|
| 2578 |
+
:type x: float64
|
| 2579 |
+
:rtype: float64
|
| 2580 |
+
"""
|
| 2581 |
+
|
| 2582 |
+
|
| 2583 |
+
def normcdfinvf(x):
|
| 2584 |
+
"""
|
| 2585 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdfinvf.html
|
| 2586 |
+
|
| 2587 |
+
:param x: Argument.
|
| 2588 |
+
:type x: float32
|
| 2589 |
+
:rtype: float32
|
| 2590 |
+
"""
|
| 2591 |
+
|
| 2592 |
+
|
| 2593 |
+
def popc(x):
|
| 2594 |
+
"""
|
| 2595 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_popc.html
|
| 2596 |
+
|
| 2597 |
+
:param x: Argument.
|
| 2598 |
+
:type x: int32
|
| 2599 |
+
:rtype: int32
|
| 2600 |
+
"""
|
| 2601 |
+
|
| 2602 |
+
|
| 2603 |
+
def popcll(x):
|
| 2604 |
+
"""
|
| 2605 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_popcll.html
|
| 2606 |
+
|
| 2607 |
+
:param x: Argument.
|
| 2608 |
+
:type x: int64
|
| 2609 |
+
:rtype: int32
|
| 2610 |
+
"""
|
| 2611 |
+
|
| 2612 |
+
|
| 2613 |
+
def pow(x, y):
|
| 2614 |
+
"""
|
| 2615 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_pow.html
|
| 2616 |
+
|
| 2617 |
+
:param x: Argument.
|
| 2618 |
+
:type x: float64
|
| 2619 |
+
:param y: Argument.
|
| 2620 |
+
:type y: float64
|
| 2621 |
+
:rtype: float64
|
| 2622 |
+
"""
|
| 2623 |
+
|
| 2624 |
+
|
| 2625 |
+
def powf(x, y):
|
| 2626 |
+
"""
|
| 2627 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powf.html
|
| 2628 |
+
|
| 2629 |
+
:param x: Argument.
|
| 2630 |
+
:type x: float32
|
| 2631 |
+
:param y: Argument.
|
| 2632 |
+
:type y: float32
|
| 2633 |
+
:rtype: float32
|
| 2634 |
+
"""
|
| 2635 |
+
|
| 2636 |
+
|
| 2637 |
+
def powi(x, y):
|
| 2638 |
+
"""
|
| 2639 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powi.html
|
| 2640 |
+
|
| 2641 |
+
:param x: Argument.
|
| 2642 |
+
:type x: float64
|
| 2643 |
+
:param y: Argument.
|
| 2644 |
+
:type y: int32
|
| 2645 |
+
:rtype: float64
|
| 2646 |
+
"""
|
| 2647 |
+
|
| 2648 |
+
|
| 2649 |
+
def powif(x, y):
|
| 2650 |
+
"""
|
| 2651 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powif.html
|
| 2652 |
+
|
| 2653 |
+
:param x: Argument.
|
| 2654 |
+
:type x: float32
|
| 2655 |
+
:param y: Argument.
|
| 2656 |
+
:type y: int32
|
| 2657 |
+
:rtype: float32
|
| 2658 |
+
"""
|
| 2659 |
+
|
| 2660 |
+
|
| 2661 |
+
def rcbrt(x):
|
| 2662 |
+
"""
|
| 2663 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rcbrt.html
|
| 2664 |
+
|
| 2665 |
+
:param x: Argument.
|
| 2666 |
+
:type x: float64
|
| 2667 |
+
:rtype: float64
|
| 2668 |
+
"""
|
| 2669 |
+
|
| 2670 |
+
|
| 2671 |
+
def rcbrtf(x):
|
| 2672 |
+
"""
|
| 2673 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rcbrtf.html
|
| 2674 |
+
|
| 2675 |
+
:param x: Argument.
|
| 2676 |
+
:type x: float32
|
| 2677 |
+
:rtype: float32
|
| 2678 |
+
"""
|
| 2679 |
+
|
| 2680 |
+
|
| 2681 |
+
def remainder(x, y):
|
| 2682 |
+
"""
|
| 2683 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remainder.html
|
| 2684 |
+
|
| 2685 |
+
:param x: Argument.
|
| 2686 |
+
:type x: float64
|
| 2687 |
+
:param y: Argument.
|
| 2688 |
+
:type y: float64
|
| 2689 |
+
:rtype: float64
|
| 2690 |
+
"""
|
| 2691 |
+
|
| 2692 |
+
|
| 2693 |
+
def remainderf(x, y):
|
| 2694 |
+
"""
|
| 2695 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remainderf.html
|
| 2696 |
+
|
| 2697 |
+
:param x: Argument.
|
| 2698 |
+
:type x: float32
|
| 2699 |
+
:param y: Argument.
|
| 2700 |
+
:type y: float32
|
| 2701 |
+
:rtype: float32
|
| 2702 |
+
"""
|
| 2703 |
+
|
| 2704 |
+
|
| 2705 |
+
def remquo(x, y):
|
| 2706 |
+
"""
|
| 2707 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remquo.html
|
| 2708 |
+
|
| 2709 |
+
:param x: Argument.
|
| 2710 |
+
:type x: float64
|
| 2711 |
+
:param y: Argument.
|
| 2712 |
+
:type y: float64
|
| 2713 |
+
:rtype: Tuple(float64, int32)
|
| 2714 |
+
"""
|
| 2715 |
+
|
| 2716 |
+
|
| 2717 |
+
def remquof(x, y):
|
| 2718 |
+
"""
|
| 2719 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remquof.html
|
| 2720 |
+
|
| 2721 |
+
:param x: Argument.
|
| 2722 |
+
:type x: float32
|
| 2723 |
+
:param y: Argument.
|
| 2724 |
+
:type y: float32
|
| 2725 |
+
:rtype: Tuple(float32, int32)
|
| 2726 |
+
"""
|
| 2727 |
+
|
| 2728 |
+
|
| 2729 |
+
def rhadd(x, y):
|
| 2730 |
+
"""
|
| 2731 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rhadd.html
|
| 2732 |
+
|
| 2733 |
+
:param x: Argument.
|
| 2734 |
+
:type x: int32
|
| 2735 |
+
:param y: Argument.
|
| 2736 |
+
:type y: int32
|
| 2737 |
+
:rtype: int32
|
| 2738 |
+
"""
|
| 2739 |
+
|
| 2740 |
+
|
| 2741 |
+
def rint(x):
|
| 2742 |
+
"""
|
| 2743 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rint.html
|
| 2744 |
+
|
| 2745 |
+
:param x: Argument.
|
| 2746 |
+
:type x: float64
|
| 2747 |
+
:rtype: float64
|
| 2748 |
+
"""
|
| 2749 |
+
|
| 2750 |
+
|
| 2751 |
+
def rintf(x):
|
| 2752 |
+
"""
|
| 2753 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rintf.html
|
| 2754 |
+
|
| 2755 |
+
:param x: Argument.
|
| 2756 |
+
:type x: float32
|
| 2757 |
+
:rtype: float32
|
| 2758 |
+
"""
|
| 2759 |
+
|
| 2760 |
+
|
| 2761 |
+
def round(x):
|
| 2762 |
+
"""
|
| 2763 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_round.html
|
| 2764 |
+
|
| 2765 |
+
:param x: Argument.
|
| 2766 |
+
:type x: float64
|
| 2767 |
+
:rtype: float64
|
| 2768 |
+
"""
|
| 2769 |
+
|
| 2770 |
+
|
| 2771 |
+
def roundf(x):
|
| 2772 |
+
"""
|
| 2773 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_roundf.html
|
| 2774 |
+
|
| 2775 |
+
:param x: Argument.
|
| 2776 |
+
:type x: float32
|
| 2777 |
+
:rtype: float32
|
| 2778 |
+
"""
|
| 2779 |
+
|
| 2780 |
+
|
| 2781 |
+
def rsqrt(x):
|
| 2782 |
+
"""
|
| 2783 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rsqrt.html
|
| 2784 |
+
|
| 2785 |
+
:param x: Argument.
|
| 2786 |
+
:type x: float64
|
| 2787 |
+
:rtype: float64
|
| 2788 |
+
"""
|
| 2789 |
+
|
| 2790 |
+
|
| 2791 |
+
def rsqrtf(x):
|
| 2792 |
+
"""
|
| 2793 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rsqrtf.html
|
| 2794 |
+
|
| 2795 |
+
:param x: Argument.
|
| 2796 |
+
:type x: float32
|
| 2797 |
+
:rtype: float32
|
| 2798 |
+
"""
|
| 2799 |
+
|
| 2800 |
+
|
| 2801 |
+
def sad(x, y, z):
|
| 2802 |
+
"""
|
| 2803 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sad.html
|
| 2804 |
+
|
| 2805 |
+
:param x: Argument.
|
| 2806 |
+
:type x: int32
|
| 2807 |
+
:param y: Argument.
|
| 2808 |
+
:type y: int32
|
| 2809 |
+
:param z: Argument.
|
| 2810 |
+
:type z: int32
|
| 2811 |
+
:rtype: int32
|
| 2812 |
+
"""
|
| 2813 |
+
|
| 2814 |
+
|
| 2815 |
+
def saturatef(x):
|
| 2816 |
+
"""
|
| 2817 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_saturatef.html
|
| 2818 |
+
|
| 2819 |
+
:param x: Argument.
|
| 2820 |
+
:type x: float32
|
| 2821 |
+
:rtype: float32
|
| 2822 |
+
"""
|
| 2823 |
+
|
| 2824 |
+
|
| 2825 |
+
def scalbn(x, y):
|
| 2826 |
+
"""
|
| 2827 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_scalbn.html
|
| 2828 |
+
|
| 2829 |
+
:param x: Argument.
|
| 2830 |
+
:type x: float64
|
| 2831 |
+
:param y: Argument.
|
| 2832 |
+
:type y: int32
|
| 2833 |
+
:rtype: float64
|
| 2834 |
+
"""
|
| 2835 |
+
|
| 2836 |
+
|
| 2837 |
+
def scalbnf(x, y):
|
| 2838 |
+
"""
|
| 2839 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_scalbnf.html
|
| 2840 |
+
|
| 2841 |
+
:param x: Argument.
|
| 2842 |
+
:type x: float32
|
| 2843 |
+
:param y: Argument.
|
| 2844 |
+
:type y: int32
|
| 2845 |
+
:rtype: float32
|
| 2846 |
+
"""
|
| 2847 |
+
|
| 2848 |
+
|
| 2849 |
+
def signbitd(x):
|
| 2850 |
+
"""
|
| 2851 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_signbitd.html
|
| 2852 |
+
|
| 2853 |
+
:param x: Argument.
|
| 2854 |
+
:type x: float64
|
| 2855 |
+
:rtype: int32
|
| 2856 |
+
"""
|
| 2857 |
+
|
| 2858 |
+
|
| 2859 |
+
def signbitf(x):
|
| 2860 |
+
"""
|
| 2861 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_signbitf.html
|
| 2862 |
+
|
| 2863 |
+
:param x: Argument.
|
| 2864 |
+
:type x: float32
|
| 2865 |
+
:rtype: int32
|
| 2866 |
+
"""
|
| 2867 |
+
|
| 2868 |
+
|
| 2869 |
+
def sin(x):
|
| 2870 |
+
"""
|
| 2871 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sin.html
|
| 2872 |
+
|
| 2873 |
+
:param x: Argument.
|
| 2874 |
+
:type x: float64
|
| 2875 |
+
:rtype: float64
|
| 2876 |
+
"""
|
| 2877 |
+
|
| 2878 |
+
|
| 2879 |
+
def sincos(x):
|
| 2880 |
+
"""
|
| 2881 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincos.html
|
| 2882 |
+
|
| 2883 |
+
:param x: Argument.
|
| 2884 |
+
:type x: float64
|
| 2885 |
+
:rtype: UniTuple(float64 x 2)
|
| 2886 |
+
"""
|
| 2887 |
+
|
| 2888 |
+
|
| 2889 |
+
def sincosf(x):
|
| 2890 |
+
"""
|
| 2891 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincosf.html
|
| 2892 |
+
|
| 2893 |
+
:param x: Argument.
|
| 2894 |
+
:type x: float32
|
| 2895 |
+
:rtype: UniTuple(float32 x 2)
|
| 2896 |
+
"""
|
| 2897 |
+
|
| 2898 |
+
|
| 2899 |
+
def sincospi(x):
|
| 2900 |
+
"""
|
| 2901 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincospi.html
|
| 2902 |
+
|
| 2903 |
+
:param x: Argument.
|
| 2904 |
+
:type x: float64
|
| 2905 |
+
:rtype: UniTuple(float64 x 2)
|
| 2906 |
+
"""
|
| 2907 |
+
|
| 2908 |
+
|
| 2909 |
+
def sincospif(x):
|
| 2910 |
+
"""
|
| 2911 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincospif.html
|
| 2912 |
+
|
| 2913 |
+
:param x: Argument.
|
| 2914 |
+
:type x: float32
|
| 2915 |
+
:rtype: UniTuple(float32 x 2)
|
| 2916 |
+
"""
|
| 2917 |
+
|
| 2918 |
+
|
| 2919 |
+
def sinf(x):
|
| 2920 |
+
"""
|
| 2921 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinf.html
|
| 2922 |
+
|
| 2923 |
+
:param x: Argument.
|
| 2924 |
+
:type x: float32
|
| 2925 |
+
:rtype: float32
|
| 2926 |
+
"""
|
| 2927 |
+
|
| 2928 |
+
|
| 2929 |
+
def sinh(x):
|
| 2930 |
+
"""
|
| 2931 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinh.html
|
| 2932 |
+
|
| 2933 |
+
:param x: Argument.
|
| 2934 |
+
:type x: float64
|
| 2935 |
+
:rtype: float64
|
| 2936 |
+
"""
|
| 2937 |
+
|
| 2938 |
+
|
| 2939 |
+
def sinhf(x):
|
| 2940 |
+
"""
|
| 2941 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinhf.html
|
| 2942 |
+
|
| 2943 |
+
:param x: Argument.
|
| 2944 |
+
:type x: float32
|
| 2945 |
+
:rtype: float32
|
| 2946 |
+
"""
|
| 2947 |
+
|
| 2948 |
+
|
| 2949 |
+
def sinpi(x):
|
| 2950 |
+
"""
|
| 2951 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinpi.html
|
| 2952 |
+
|
| 2953 |
+
:param x: Argument.
|
| 2954 |
+
:type x: float64
|
| 2955 |
+
:rtype: float64
|
| 2956 |
+
"""
|
| 2957 |
+
|
| 2958 |
+
|
| 2959 |
+
def sinpif(x):
|
| 2960 |
+
"""
|
| 2961 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinpif.html
|
| 2962 |
+
|
| 2963 |
+
:param x: Argument.
|
| 2964 |
+
:type x: float32
|
| 2965 |
+
:rtype: float32
|
| 2966 |
+
"""
|
| 2967 |
+
|
| 2968 |
+
|
| 2969 |
+
def sqrt(x):
|
| 2970 |
+
"""
|
| 2971 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sqrt.html
|
| 2972 |
+
|
| 2973 |
+
:param x: Argument.
|
| 2974 |
+
:type x: float64
|
| 2975 |
+
:rtype: float64
|
| 2976 |
+
"""
|
| 2977 |
+
|
| 2978 |
+
|
| 2979 |
+
def sqrtf(x):
|
| 2980 |
+
"""
|
| 2981 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sqrtf.html
|
| 2982 |
+
|
| 2983 |
+
:param x: Argument.
|
| 2984 |
+
:type x: float32
|
| 2985 |
+
:rtype: float32
|
| 2986 |
+
"""
|
| 2987 |
+
|
| 2988 |
+
|
| 2989 |
+
def tan(x):
|
| 2990 |
+
"""
|
| 2991 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tan.html
|
| 2992 |
+
|
| 2993 |
+
:param x: Argument.
|
| 2994 |
+
:type x: float64
|
| 2995 |
+
:rtype: float64
|
| 2996 |
+
"""
|
| 2997 |
+
|
| 2998 |
+
|
| 2999 |
+
def tanf(x):
|
| 3000 |
+
"""
|
| 3001 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanf.html
|
| 3002 |
+
|
| 3003 |
+
:param x: Argument.
|
| 3004 |
+
:type x: float32
|
| 3005 |
+
:rtype: float32
|
| 3006 |
+
"""
|
| 3007 |
+
|
| 3008 |
+
|
| 3009 |
+
def tanh(x):
|
| 3010 |
+
"""
|
| 3011 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanh.html
|
| 3012 |
+
|
| 3013 |
+
:param x: Argument.
|
| 3014 |
+
:type x: float64
|
| 3015 |
+
:rtype: float64
|
| 3016 |
+
"""
|
| 3017 |
+
|
| 3018 |
+
|
| 3019 |
+
def tanhf(x):
|
| 3020 |
+
"""
|
| 3021 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanhf.html
|
| 3022 |
+
|
| 3023 |
+
:param x: Argument.
|
| 3024 |
+
:type x: float32
|
| 3025 |
+
:rtype: float32
|
| 3026 |
+
"""
|
| 3027 |
+
|
| 3028 |
+
|
| 3029 |
+
def tgamma(x):
|
| 3030 |
+
"""
|
| 3031 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tgamma.html
|
| 3032 |
+
|
| 3033 |
+
:param x: Argument.
|
| 3034 |
+
:type x: float64
|
| 3035 |
+
:rtype: float64
|
| 3036 |
+
"""
|
| 3037 |
+
|
| 3038 |
+
|
| 3039 |
+
def tgammaf(x):
|
| 3040 |
+
"""
|
| 3041 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tgammaf.html
|
| 3042 |
+
|
| 3043 |
+
:param x: Argument.
|
| 3044 |
+
:type x: float32
|
| 3045 |
+
:rtype: float32
|
| 3046 |
+
"""
|
| 3047 |
+
|
| 3048 |
+
|
| 3049 |
+
def trunc(x):
|
| 3050 |
+
"""
|
| 3051 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_trunc.html
|
| 3052 |
+
|
| 3053 |
+
:param x: Argument.
|
| 3054 |
+
:type x: float64
|
| 3055 |
+
:rtype: float64
|
| 3056 |
+
"""
|
| 3057 |
+
|
| 3058 |
+
|
| 3059 |
+
def truncf(x):
|
| 3060 |
+
"""
|
| 3061 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_truncf.html
|
| 3062 |
+
|
| 3063 |
+
:param x: Argument.
|
| 3064 |
+
:type x: float32
|
| 3065 |
+
:rtype: float32
|
| 3066 |
+
"""
|
| 3067 |
+
|
| 3068 |
+
|
| 3069 |
+
def uhadd(x, y):
|
| 3070 |
+
"""
|
| 3071 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uhadd.html
|
| 3072 |
+
|
| 3073 |
+
:param x: Argument.
|
| 3074 |
+
:type x: int32
|
| 3075 |
+
:param y: Argument.
|
| 3076 |
+
:type y: int32
|
| 3077 |
+
:rtype: int32
|
| 3078 |
+
"""
|
| 3079 |
+
|
| 3080 |
+
|
| 3081 |
+
def uint2double_rn(i):
|
| 3082 |
+
"""
|
| 3083 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2double_rn.html
|
| 3084 |
+
|
| 3085 |
+
:param i: Argument.
|
| 3086 |
+
:type i: int32
|
| 3087 |
+
:rtype: float64
|
| 3088 |
+
"""
|
| 3089 |
+
|
| 3090 |
+
|
| 3091 |
+
def uint2float_rd(x):
|
| 3092 |
+
"""
|
| 3093 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rd.html
|
| 3094 |
+
|
| 3095 |
+
:param in: Argument.
|
| 3096 |
+
:type in: int32
|
| 3097 |
+
:rtype: float32
|
| 3098 |
+
"""
|
| 3099 |
+
|
| 3100 |
+
|
| 3101 |
+
def uint2float_rn(x):
|
| 3102 |
+
"""
|
| 3103 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rn.html
|
| 3104 |
+
|
| 3105 |
+
:param in: Argument.
|
| 3106 |
+
:type in: int32
|
| 3107 |
+
:rtype: float32
|
| 3108 |
+
"""
|
| 3109 |
+
|
| 3110 |
+
|
| 3111 |
+
def uint2float_ru(x):
|
| 3112 |
+
"""
|
| 3113 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_ru.html
|
| 3114 |
+
|
| 3115 |
+
:param in: Argument.
|
| 3116 |
+
:type in: int32
|
| 3117 |
+
:rtype: float32
|
| 3118 |
+
"""
|
| 3119 |
+
|
| 3120 |
+
|
| 3121 |
+
def uint2float_rz(x):
|
| 3122 |
+
"""
|
| 3123 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rz.html
|
| 3124 |
+
|
| 3125 |
+
:param in: Argument.
|
| 3126 |
+
:type in: int32
|
| 3127 |
+
:rtype: float32
|
| 3128 |
+
"""
|
| 3129 |
+
|
| 3130 |
+
|
| 3131 |
+
def ull2double_rd(l):
|
| 3132 |
+
"""
|
| 3133 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rd.html
|
| 3134 |
+
|
| 3135 |
+
:param l: Argument.
|
| 3136 |
+
:type l: int64
|
| 3137 |
+
:rtype: float64
|
| 3138 |
+
"""
|
| 3139 |
+
|
| 3140 |
+
|
| 3141 |
+
def ull2double_rn(l):
|
| 3142 |
+
"""
|
| 3143 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rn.html
|
| 3144 |
+
|
| 3145 |
+
:param l: Argument.
|
| 3146 |
+
:type l: int64
|
| 3147 |
+
:rtype: float64
|
| 3148 |
+
"""
|
| 3149 |
+
|
| 3150 |
+
|
| 3151 |
+
def ull2double_ru(l):
|
| 3152 |
+
"""
|
| 3153 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_ru.html
|
| 3154 |
+
|
| 3155 |
+
:param l: Argument.
|
| 3156 |
+
:type l: int64
|
| 3157 |
+
:rtype: float64
|
| 3158 |
+
"""
|
| 3159 |
+
|
| 3160 |
+
|
| 3161 |
+
def ull2double_rz(l):
|
| 3162 |
+
"""
|
| 3163 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rz.html
|
| 3164 |
+
|
| 3165 |
+
:param l: Argument.
|
| 3166 |
+
:type l: int64
|
| 3167 |
+
:rtype: float64
|
| 3168 |
+
"""
|
| 3169 |
+
|
| 3170 |
+
|
| 3171 |
+
def ull2float_rd(l):
|
| 3172 |
+
"""
|
| 3173 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rd.html
|
| 3174 |
+
|
| 3175 |
+
:param l: Argument.
|
| 3176 |
+
:type l: int64
|
| 3177 |
+
:rtype: float32
|
| 3178 |
+
"""
|
| 3179 |
+
|
| 3180 |
+
|
| 3181 |
+
def ull2float_rn(l):
|
| 3182 |
+
"""
|
| 3183 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rn.html
|
| 3184 |
+
|
| 3185 |
+
:param l: Argument.
|
| 3186 |
+
:type l: int64
|
| 3187 |
+
:rtype: float32
|
| 3188 |
+
"""
|
| 3189 |
+
|
| 3190 |
+
|
| 3191 |
+
def ull2float_ru(l):
|
| 3192 |
+
"""
|
| 3193 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_ru.html
|
| 3194 |
+
|
| 3195 |
+
:param l: Argument.
|
| 3196 |
+
:type l: int64
|
| 3197 |
+
:rtype: float32
|
| 3198 |
+
"""
|
| 3199 |
+
|
| 3200 |
+
|
| 3201 |
+
def ull2float_rz(l):
|
| 3202 |
+
"""
|
| 3203 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rz.html
|
| 3204 |
+
|
| 3205 |
+
:param l: Argument.
|
| 3206 |
+
:type l: int64
|
| 3207 |
+
:rtype: float32
|
| 3208 |
+
"""
|
| 3209 |
+
|
| 3210 |
+
|
| 3211 |
+
def ullmax(x, y):
|
| 3212 |
+
"""
|
| 3213 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ullmax.html
|
| 3214 |
+
|
| 3215 |
+
:param x: Argument.
|
| 3216 |
+
:type x: int64
|
| 3217 |
+
:param y: Argument.
|
| 3218 |
+
:type y: int64
|
| 3219 |
+
:rtype: int64
|
| 3220 |
+
"""
|
| 3221 |
+
|
| 3222 |
+
|
| 3223 |
+
def ullmin(x, y):
|
| 3224 |
+
"""
|
| 3225 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ullmin.html
|
| 3226 |
+
|
| 3227 |
+
:param x: Argument.
|
| 3228 |
+
:type x: int64
|
| 3229 |
+
:param y: Argument.
|
| 3230 |
+
:type y: int64
|
| 3231 |
+
:rtype: int64
|
| 3232 |
+
"""
|
| 3233 |
+
|
| 3234 |
+
|
| 3235 |
+
def umax(x, y):
|
| 3236 |
+
"""
|
| 3237 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umax.html
|
| 3238 |
+
|
| 3239 |
+
:param x: Argument.
|
| 3240 |
+
:type x: int32
|
| 3241 |
+
:param y: Argument.
|
| 3242 |
+
:type y: int32
|
| 3243 |
+
:rtype: int32
|
| 3244 |
+
"""
|
| 3245 |
+
|
| 3246 |
+
|
| 3247 |
+
def umin(x, y):
|
| 3248 |
+
"""
|
| 3249 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umin.html
|
| 3250 |
+
|
| 3251 |
+
:param x: Argument.
|
| 3252 |
+
:type x: int32
|
| 3253 |
+
:param y: Argument.
|
| 3254 |
+
:type y: int32
|
| 3255 |
+
:rtype: int32
|
| 3256 |
+
"""
|
| 3257 |
+
|
| 3258 |
+
|
| 3259 |
+
def umul24(x, y):
|
| 3260 |
+
"""
|
| 3261 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umul24.html
|
| 3262 |
+
|
| 3263 |
+
:param x: Argument.
|
| 3264 |
+
:type x: int32
|
| 3265 |
+
:param y: Argument.
|
| 3266 |
+
:type y: int32
|
| 3267 |
+
:rtype: int32
|
| 3268 |
+
"""
|
| 3269 |
+
|
| 3270 |
+
|
| 3271 |
+
def umul64hi(x, y):
|
| 3272 |
+
"""
|
| 3273 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umul64hi.html
|
| 3274 |
+
|
| 3275 |
+
:param x: Argument.
|
| 3276 |
+
:type x: int64
|
| 3277 |
+
:param y: Argument.
|
| 3278 |
+
:type y: int64
|
| 3279 |
+
:rtype: int64
|
| 3280 |
+
"""
|
| 3281 |
+
|
| 3282 |
+
|
| 3283 |
+
def umulhi(x, y):
|
| 3284 |
+
"""
|
| 3285 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umulhi.html
|
| 3286 |
+
|
| 3287 |
+
:param x: Argument.
|
| 3288 |
+
:type x: int32
|
| 3289 |
+
:param y: Argument.
|
| 3290 |
+
:type y: int32
|
| 3291 |
+
:rtype: int32
|
| 3292 |
+
"""
|
| 3293 |
+
|
| 3294 |
+
|
| 3295 |
+
def urhadd(x, y):
|
| 3296 |
+
"""
|
| 3297 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_urhadd.html
|
| 3298 |
+
|
| 3299 |
+
:param x: Argument.
|
| 3300 |
+
:type x: int32
|
| 3301 |
+
:param y: Argument.
|
| 3302 |
+
:type y: int32
|
| 3303 |
+
:rtype: int32
|
| 3304 |
+
"""
|
| 3305 |
+
|
| 3306 |
+
|
| 3307 |
+
def usad(x, y, z):
|
| 3308 |
+
"""
|
| 3309 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_usad.html
|
| 3310 |
+
|
| 3311 |
+
:param x: Argument.
|
| 3312 |
+
:type x: int32
|
| 3313 |
+
:param y: Argument.
|
| 3314 |
+
:type y: int32
|
| 3315 |
+
:param z: Argument.
|
| 3316 |
+
:type z: int32
|
| 3317 |
+
:rtype: int32
|
| 3318 |
+
"""
|
| 3319 |
+
|
| 3320 |
+
|
| 3321 |
+
def y0(x):
|
| 3322 |
+
"""
|
| 3323 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y0.html
|
| 3324 |
+
|
| 3325 |
+
:param x: Argument.
|
| 3326 |
+
:type x: float64
|
| 3327 |
+
:rtype: float64
|
| 3328 |
+
"""
|
| 3329 |
+
|
| 3330 |
+
|
| 3331 |
+
def y0f(x):
|
| 3332 |
+
"""
|
| 3333 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y0f.html
|
| 3334 |
+
|
| 3335 |
+
:param x: Argument.
|
| 3336 |
+
:type x: float32
|
| 3337 |
+
:rtype: float32
|
| 3338 |
+
"""
|
| 3339 |
+
|
| 3340 |
+
|
| 3341 |
+
def y1(x):
|
| 3342 |
+
"""
|
| 3343 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y1.html
|
| 3344 |
+
|
| 3345 |
+
:param x: Argument.
|
| 3346 |
+
:type x: float64
|
| 3347 |
+
:rtype: float64
|
| 3348 |
+
"""
|
| 3349 |
+
|
| 3350 |
+
|
| 3351 |
+
def y1f(x):
|
| 3352 |
+
"""
|
| 3353 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y1f.html
|
| 3354 |
+
|
| 3355 |
+
:param x: Argument.
|
| 3356 |
+
:type x: float32
|
| 3357 |
+
:rtype: float32
|
| 3358 |
+
"""
|
| 3359 |
+
|
| 3360 |
+
|
| 3361 |
+
def yn(n, x):
|
| 3362 |
+
"""
|
| 3363 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_yn.html
|
| 3364 |
+
|
| 3365 |
+
:param n: Argument.
|
| 3366 |
+
:type n: int32
|
| 3367 |
+
:param x: Argument.
|
| 3368 |
+
:type x: float64
|
| 3369 |
+
:rtype: float64
|
| 3370 |
+
"""
|
| 3371 |
+
|
| 3372 |
+
|
| 3373 |
+
def ynf(n, x):
|
| 3374 |
+
"""
|
| 3375 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ynf.html
|
| 3376 |
+
|
| 3377 |
+
:param n: Argument.
|
| 3378 |
+
:type n: int32
|
| 3379 |
+
:param x: Argument.
|
| 3380 |
+
:type x: float32
|
| 3381 |
+
:rtype: float32
|
| 3382 |
+
"""
|
lib/python3.10/site-packages/numba/cuda/libdevicedecl.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda import libdevice, libdevicefuncs
|
| 2 |
+
from numba.core.typing.templates import ConcreteTemplate, Registry
|
| 3 |
+
|
| 4 |
+
registry = Registry()
|
| 5 |
+
register_global = registry.register_global
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def libdevice_declare(func, retty, args):
|
| 9 |
+
class Libdevice_function(ConcreteTemplate):
|
| 10 |
+
cases = [libdevicefuncs.create_signature(retty, args)]
|
| 11 |
+
|
| 12 |
+
pyfunc = getattr(libdevice, func[5:])
|
| 13 |
+
register_global(pyfunc)(Libdevice_function)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
for func, (retty, args) in libdevicefuncs.functions.items():
|
| 17 |
+
libdevice_declare(func, retty, args)
|
lib/python3.10/site-packages/numba/cuda/libdevicefuncs.py
ADDED
|
@@ -0,0 +1,1057 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import namedtuple
|
| 2 |
+
from textwrap import indent
|
| 3 |
+
|
| 4 |
+
from numba.types import float32, float64, int16, int32, int64, void, Tuple
|
| 5 |
+
from numba.core.typing.templates import signature
|
| 6 |
+
|
| 7 |
+
arg = namedtuple("arg", ("name", "ty", "is_ptr"))
|
| 8 |
+
|
| 9 |
+
functions = {
|
| 10 |
+
"__nv_abs": (int32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 11 |
+
"__nv_acos": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 12 |
+
"__nv_acosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 13 |
+
"__nv_acosh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 14 |
+
"__nv_acoshf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 15 |
+
"__nv_asin": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 16 |
+
"__nv_asinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 17 |
+
"__nv_asinh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 18 |
+
"__nv_asinhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 19 |
+
"__nv_atan": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 20 |
+
"__nv_atan2": (
|
| 21 |
+
float64,
|
| 22 |
+
[
|
| 23 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 24 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 25 |
+
],
|
| 26 |
+
),
|
| 27 |
+
"__nv_atan2f": (
|
| 28 |
+
float32,
|
| 29 |
+
[
|
| 30 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 31 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 32 |
+
],
|
| 33 |
+
),
|
| 34 |
+
"__nv_atanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 35 |
+
"__nv_atanh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 36 |
+
"__nv_atanhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 37 |
+
"__nv_brev": (int32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 38 |
+
"__nv_brevll": (int64, [arg(name="x", ty=int64, is_ptr=False)]),
|
| 39 |
+
"__nv_byte_perm": (
|
| 40 |
+
int32,
|
| 41 |
+
[
|
| 42 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 43 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 44 |
+
arg(name="z", ty=int32, is_ptr=False),
|
| 45 |
+
],
|
| 46 |
+
),
|
| 47 |
+
"__nv_cbrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 48 |
+
"__nv_cbrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 49 |
+
"__nv_ceil": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 50 |
+
"__nv_ceilf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 51 |
+
"__nv_clz": (int32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 52 |
+
"__nv_clzll": (int32, [arg(name="x", ty=int64, is_ptr=False)]),
|
| 53 |
+
"__nv_copysign": (
|
| 54 |
+
float64,
|
| 55 |
+
[
|
| 56 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 57 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 58 |
+
],
|
| 59 |
+
),
|
| 60 |
+
"__nv_copysignf": (
|
| 61 |
+
float32,
|
| 62 |
+
[
|
| 63 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 64 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 65 |
+
],
|
| 66 |
+
),
|
| 67 |
+
"__nv_cos": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 68 |
+
"__nv_cosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 69 |
+
"__nv_cosh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 70 |
+
"__nv_coshf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 71 |
+
"__nv_cospi": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 72 |
+
"__nv_cospif": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 73 |
+
"__nv_dadd_rd": (
|
| 74 |
+
float64,
|
| 75 |
+
[
|
| 76 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 77 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 78 |
+
],
|
| 79 |
+
),
|
| 80 |
+
"__nv_dadd_rn": (
|
| 81 |
+
float64,
|
| 82 |
+
[
|
| 83 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 84 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 85 |
+
],
|
| 86 |
+
),
|
| 87 |
+
"__nv_dadd_ru": (
|
| 88 |
+
float64,
|
| 89 |
+
[
|
| 90 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 91 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 92 |
+
],
|
| 93 |
+
),
|
| 94 |
+
"__nv_dadd_rz": (
|
| 95 |
+
float64,
|
| 96 |
+
[
|
| 97 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 98 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 99 |
+
],
|
| 100 |
+
),
|
| 101 |
+
"__nv_ddiv_rd": (
|
| 102 |
+
float64,
|
| 103 |
+
[
|
| 104 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 105 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 106 |
+
],
|
| 107 |
+
),
|
| 108 |
+
"__nv_ddiv_rn": (
|
| 109 |
+
float64,
|
| 110 |
+
[
|
| 111 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 112 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 113 |
+
],
|
| 114 |
+
),
|
| 115 |
+
"__nv_ddiv_ru": (
|
| 116 |
+
float64,
|
| 117 |
+
[
|
| 118 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 119 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 120 |
+
],
|
| 121 |
+
),
|
| 122 |
+
"__nv_ddiv_rz": (
|
| 123 |
+
float64,
|
| 124 |
+
[
|
| 125 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 126 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 127 |
+
],
|
| 128 |
+
),
|
| 129 |
+
"__nv_dmul_rd": (
|
| 130 |
+
float64,
|
| 131 |
+
[
|
| 132 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 133 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 134 |
+
],
|
| 135 |
+
),
|
| 136 |
+
"__nv_dmul_rn": (
|
| 137 |
+
float64,
|
| 138 |
+
[
|
| 139 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 140 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 141 |
+
],
|
| 142 |
+
),
|
| 143 |
+
"__nv_dmul_ru": (
|
| 144 |
+
float64,
|
| 145 |
+
[
|
| 146 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 147 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 148 |
+
],
|
| 149 |
+
),
|
| 150 |
+
"__nv_dmul_rz": (
|
| 151 |
+
float64,
|
| 152 |
+
[
|
| 153 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 154 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 155 |
+
],
|
| 156 |
+
),
|
| 157 |
+
"__nv_double2float_rd": (
|
| 158 |
+
float32,
|
| 159 |
+
[arg(name="d", ty=float64, is_ptr=False)],
|
| 160 |
+
),
|
| 161 |
+
"__nv_double2float_rn": (
|
| 162 |
+
float32,
|
| 163 |
+
[arg(name="d", ty=float64, is_ptr=False)],
|
| 164 |
+
),
|
| 165 |
+
"__nv_double2float_ru": (
|
| 166 |
+
float32,
|
| 167 |
+
[arg(name="d", ty=float64, is_ptr=False)],
|
| 168 |
+
),
|
| 169 |
+
"__nv_double2float_rz": (
|
| 170 |
+
float32,
|
| 171 |
+
[arg(name="d", ty=float64, is_ptr=False)],
|
| 172 |
+
),
|
| 173 |
+
"__nv_double2hiint": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 174 |
+
"__nv_double2int_rd": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 175 |
+
"__nv_double2int_rn": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 176 |
+
"__nv_double2int_ru": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 177 |
+
"__nv_double2int_rz": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 178 |
+
"__nv_double2ll_rd": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 179 |
+
"__nv_double2ll_rn": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 180 |
+
"__nv_double2ll_ru": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 181 |
+
"__nv_double2ll_rz": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 182 |
+
"__nv_double2loint": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 183 |
+
"__nv_double2uint_rd": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 184 |
+
"__nv_double2uint_rn": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 185 |
+
"__nv_double2uint_ru": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 186 |
+
"__nv_double2uint_rz": (int32, [arg(name="d", ty=float64, is_ptr=False)]),
|
| 187 |
+
"__nv_double2ull_rd": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 188 |
+
"__nv_double2ull_rn": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 189 |
+
"__nv_double2ull_ru": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 190 |
+
"__nv_double2ull_rz": (int64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 191 |
+
"__nv_double_as_longlong": (
|
| 192 |
+
int64,
|
| 193 |
+
[arg(name="x", ty=float64, is_ptr=False)],
|
| 194 |
+
),
|
| 195 |
+
"__nv_drcp_rd": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 196 |
+
"__nv_drcp_rn": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 197 |
+
"__nv_drcp_ru": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 198 |
+
"__nv_drcp_rz": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 199 |
+
"__nv_dsqrt_rd": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 200 |
+
"__nv_dsqrt_rn": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 201 |
+
"__nv_dsqrt_ru": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 202 |
+
"__nv_dsqrt_rz": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 203 |
+
"__nv_erf": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 204 |
+
"__nv_erfc": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 205 |
+
"__nv_erfcf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 206 |
+
"__nv_erfcinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 207 |
+
"__nv_erfcinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 208 |
+
"__nv_erfcx": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 209 |
+
"__nv_erfcxf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 210 |
+
"__nv_erff": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 211 |
+
"__nv_erfinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 212 |
+
"__nv_erfinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 213 |
+
"__nv_exp": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 214 |
+
"__nv_exp10": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 215 |
+
"__nv_exp10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 216 |
+
"__nv_exp2": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 217 |
+
"__nv_exp2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 218 |
+
"__nv_expf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 219 |
+
"__nv_expm1": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 220 |
+
"__nv_expm1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 221 |
+
"__nv_fabs": (float64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 222 |
+
"__nv_fabsf": (float32, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 223 |
+
"__nv_fadd_rd": (
|
| 224 |
+
float32,
|
| 225 |
+
[
|
| 226 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 227 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 228 |
+
],
|
| 229 |
+
),
|
| 230 |
+
"__nv_fadd_rn": (
|
| 231 |
+
float32,
|
| 232 |
+
[
|
| 233 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 234 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 235 |
+
],
|
| 236 |
+
),
|
| 237 |
+
"__nv_fadd_ru": (
|
| 238 |
+
float32,
|
| 239 |
+
[
|
| 240 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 241 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 242 |
+
],
|
| 243 |
+
),
|
| 244 |
+
"__nv_fadd_rz": (
|
| 245 |
+
float32,
|
| 246 |
+
[
|
| 247 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 248 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 249 |
+
],
|
| 250 |
+
),
|
| 251 |
+
"__nv_fast_cosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 252 |
+
"__nv_fast_exp10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 253 |
+
"__nv_fast_expf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 254 |
+
"__nv_fast_fdividef": (
|
| 255 |
+
float32,
|
| 256 |
+
[
|
| 257 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 258 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 259 |
+
],
|
| 260 |
+
),
|
| 261 |
+
"__nv_fast_log10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 262 |
+
"__nv_fast_log2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 263 |
+
"__nv_fast_logf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 264 |
+
"__nv_fast_powf": (
|
| 265 |
+
float32,
|
| 266 |
+
[
|
| 267 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 268 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 269 |
+
],
|
| 270 |
+
),
|
| 271 |
+
"__nv_fast_sincosf": (
|
| 272 |
+
void,
|
| 273 |
+
[
|
| 274 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 275 |
+
arg(name="sptr", ty=float32, is_ptr=True),
|
| 276 |
+
arg(name="cptr", ty=float32, is_ptr=True),
|
| 277 |
+
],
|
| 278 |
+
),
|
| 279 |
+
"__nv_fast_sinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 280 |
+
"__nv_fast_tanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 281 |
+
"__nv_fdim": (
|
| 282 |
+
float64,
|
| 283 |
+
[
|
| 284 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 285 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 286 |
+
],
|
| 287 |
+
),
|
| 288 |
+
"__nv_fdimf": (
|
| 289 |
+
float32,
|
| 290 |
+
[
|
| 291 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 292 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 293 |
+
],
|
| 294 |
+
),
|
| 295 |
+
"__nv_fdiv_rd": (
|
| 296 |
+
float32,
|
| 297 |
+
[
|
| 298 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 299 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 300 |
+
],
|
| 301 |
+
),
|
| 302 |
+
"__nv_fdiv_rn": (
|
| 303 |
+
float32,
|
| 304 |
+
[
|
| 305 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 306 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 307 |
+
],
|
| 308 |
+
),
|
| 309 |
+
"__nv_fdiv_ru": (
|
| 310 |
+
float32,
|
| 311 |
+
[
|
| 312 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 313 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 314 |
+
],
|
| 315 |
+
),
|
| 316 |
+
"__nv_fdiv_rz": (
|
| 317 |
+
float32,
|
| 318 |
+
[
|
| 319 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 320 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 321 |
+
],
|
| 322 |
+
),
|
| 323 |
+
"__nv_ffs": (int32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 324 |
+
"__nv_ffsll": (int32, [arg(name="x", ty=int64, is_ptr=False)]),
|
| 325 |
+
"__nv_finitef": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 326 |
+
"__nv_float2half_rn": (int16, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 327 |
+
"__nv_float2int_rd": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 328 |
+
"__nv_float2int_rn": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 329 |
+
"__nv_float2int_ru": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 330 |
+
"__nv_float2int_rz": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 331 |
+
"__nv_float2ll_rd": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 332 |
+
"__nv_float2ll_rn": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 333 |
+
"__nv_float2ll_ru": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 334 |
+
"__nv_float2ll_rz": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 335 |
+
"__nv_float2uint_rd": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 336 |
+
"__nv_float2uint_rn": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 337 |
+
"__nv_float2uint_ru": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 338 |
+
"__nv_float2uint_rz": (int32, [arg(name="in", ty=float32, is_ptr=False)]),
|
| 339 |
+
"__nv_float2ull_rd": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 340 |
+
"__nv_float2ull_rn": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 341 |
+
"__nv_float2ull_ru": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 342 |
+
"__nv_float2ull_rz": (int64, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 343 |
+
"__nv_float_as_int": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 344 |
+
"__nv_floor": (float64, [arg(name="f", ty=float64, is_ptr=False)]),
|
| 345 |
+
"__nv_floorf": (float32, [arg(name="f", ty=float32, is_ptr=False)]),
|
| 346 |
+
"__nv_fma": (
|
| 347 |
+
float64,
|
| 348 |
+
[
|
| 349 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 350 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 351 |
+
arg(name="z", ty=float64, is_ptr=False),
|
| 352 |
+
],
|
| 353 |
+
),
|
| 354 |
+
"__nv_fma_rd": (
|
| 355 |
+
float64,
|
| 356 |
+
[
|
| 357 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 358 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 359 |
+
arg(name="z", ty=float64, is_ptr=False),
|
| 360 |
+
],
|
| 361 |
+
),
|
| 362 |
+
"__nv_fma_rn": (
|
| 363 |
+
float64,
|
| 364 |
+
[
|
| 365 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 366 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 367 |
+
arg(name="z", ty=float64, is_ptr=False),
|
| 368 |
+
],
|
| 369 |
+
),
|
| 370 |
+
"__nv_fma_ru": (
|
| 371 |
+
float64,
|
| 372 |
+
[
|
| 373 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 374 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 375 |
+
arg(name="z", ty=float64, is_ptr=False),
|
| 376 |
+
],
|
| 377 |
+
),
|
| 378 |
+
"__nv_fma_rz": (
|
| 379 |
+
float64,
|
| 380 |
+
[
|
| 381 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 382 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 383 |
+
arg(name="z", ty=float64, is_ptr=False),
|
| 384 |
+
],
|
| 385 |
+
),
|
| 386 |
+
"__nv_fmaf": (
|
| 387 |
+
float32,
|
| 388 |
+
[
|
| 389 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 390 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 391 |
+
arg(name="z", ty=float32, is_ptr=False),
|
| 392 |
+
],
|
| 393 |
+
),
|
| 394 |
+
"__nv_fmaf_rd": (
|
| 395 |
+
float32,
|
| 396 |
+
[
|
| 397 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 398 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 399 |
+
arg(name="z", ty=float32, is_ptr=False),
|
| 400 |
+
],
|
| 401 |
+
),
|
| 402 |
+
"__nv_fmaf_rn": (
|
| 403 |
+
float32,
|
| 404 |
+
[
|
| 405 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 406 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 407 |
+
arg(name="z", ty=float32, is_ptr=False),
|
| 408 |
+
],
|
| 409 |
+
),
|
| 410 |
+
"__nv_fmaf_ru": (
|
| 411 |
+
float32,
|
| 412 |
+
[
|
| 413 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 414 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 415 |
+
arg(name="z", ty=float32, is_ptr=False),
|
| 416 |
+
],
|
| 417 |
+
),
|
| 418 |
+
"__nv_fmaf_rz": (
|
| 419 |
+
float32,
|
| 420 |
+
[
|
| 421 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 422 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 423 |
+
arg(name="z", ty=float32, is_ptr=False),
|
| 424 |
+
],
|
| 425 |
+
),
|
| 426 |
+
"__nv_fmax": (
|
| 427 |
+
float64,
|
| 428 |
+
[
|
| 429 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 430 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 431 |
+
],
|
| 432 |
+
),
|
| 433 |
+
"__nv_fmaxf": (
|
| 434 |
+
float32,
|
| 435 |
+
[
|
| 436 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 437 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 438 |
+
],
|
| 439 |
+
),
|
| 440 |
+
"__nv_fmin": (
|
| 441 |
+
float64,
|
| 442 |
+
[
|
| 443 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 444 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 445 |
+
],
|
| 446 |
+
),
|
| 447 |
+
"__nv_fminf": (
|
| 448 |
+
float32,
|
| 449 |
+
[
|
| 450 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 451 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 452 |
+
],
|
| 453 |
+
),
|
| 454 |
+
"__nv_fmod": (
|
| 455 |
+
float64,
|
| 456 |
+
[
|
| 457 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 458 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 459 |
+
],
|
| 460 |
+
),
|
| 461 |
+
"__nv_fmodf": (
|
| 462 |
+
float32,
|
| 463 |
+
[
|
| 464 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 465 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 466 |
+
],
|
| 467 |
+
),
|
| 468 |
+
"__nv_fmul_rd": (
|
| 469 |
+
float32,
|
| 470 |
+
[
|
| 471 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 472 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 473 |
+
],
|
| 474 |
+
),
|
| 475 |
+
"__nv_fmul_rn": (
|
| 476 |
+
float32,
|
| 477 |
+
[
|
| 478 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 479 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 480 |
+
],
|
| 481 |
+
),
|
| 482 |
+
"__nv_fmul_ru": (
|
| 483 |
+
float32,
|
| 484 |
+
[
|
| 485 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 486 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 487 |
+
],
|
| 488 |
+
),
|
| 489 |
+
"__nv_fmul_rz": (
|
| 490 |
+
float32,
|
| 491 |
+
[
|
| 492 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 493 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 494 |
+
],
|
| 495 |
+
),
|
| 496 |
+
"__nv_frcp_rd": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 497 |
+
"__nv_frcp_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 498 |
+
"__nv_frcp_ru": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 499 |
+
"__nv_frcp_rz": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 500 |
+
"__nv_frexp": (
|
| 501 |
+
float64,
|
| 502 |
+
[
|
| 503 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 504 |
+
arg(name="b", ty=int32, is_ptr=True),
|
| 505 |
+
],
|
| 506 |
+
),
|
| 507 |
+
"__nv_frexpf": (
|
| 508 |
+
float32,
|
| 509 |
+
[
|
| 510 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 511 |
+
arg(name="b", ty=int32, is_ptr=True),
|
| 512 |
+
],
|
| 513 |
+
),
|
| 514 |
+
"__nv_frsqrt_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 515 |
+
"__nv_fsqrt_rd": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 516 |
+
"__nv_fsqrt_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 517 |
+
"__nv_fsqrt_ru": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 518 |
+
"__nv_fsqrt_rz": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 519 |
+
"__nv_fsub_rd": (
|
| 520 |
+
float32,
|
| 521 |
+
[
|
| 522 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 523 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 524 |
+
],
|
| 525 |
+
),
|
| 526 |
+
"__nv_fsub_rn": (
|
| 527 |
+
float32,
|
| 528 |
+
[
|
| 529 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 530 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 531 |
+
],
|
| 532 |
+
),
|
| 533 |
+
"__nv_fsub_ru": (
|
| 534 |
+
float32,
|
| 535 |
+
[
|
| 536 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 537 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 538 |
+
],
|
| 539 |
+
),
|
| 540 |
+
"__nv_fsub_rz": (
|
| 541 |
+
float32,
|
| 542 |
+
[
|
| 543 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 544 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 545 |
+
],
|
| 546 |
+
),
|
| 547 |
+
"__nv_hadd": (
|
| 548 |
+
int32,
|
| 549 |
+
[
|
| 550 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 551 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 552 |
+
],
|
| 553 |
+
),
|
| 554 |
+
"__nv_half2float": (float32, [arg(name="h", ty=int16, is_ptr=False)]),
|
| 555 |
+
"__nv_hiloint2double": (
|
| 556 |
+
float64,
|
| 557 |
+
[
|
| 558 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 559 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 560 |
+
],
|
| 561 |
+
),
|
| 562 |
+
"__nv_hypot": (
|
| 563 |
+
float64,
|
| 564 |
+
[
|
| 565 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 566 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 567 |
+
],
|
| 568 |
+
),
|
| 569 |
+
"__nv_hypotf": (
|
| 570 |
+
float32,
|
| 571 |
+
[
|
| 572 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 573 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 574 |
+
],
|
| 575 |
+
),
|
| 576 |
+
"__nv_ilogb": (int32, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 577 |
+
"__nv_ilogbf": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 578 |
+
"__nv_int2double_rn": (float64, [arg(name="i", ty=int32, is_ptr=False)]),
|
| 579 |
+
"__nv_int2float_rd": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 580 |
+
"__nv_int2float_rn": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 581 |
+
"__nv_int2float_ru": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 582 |
+
"__nv_int2float_rz": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 583 |
+
"__nv_int_as_float": (float32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 584 |
+
"__nv_isfinited": (int32, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 585 |
+
"__nv_isinfd": (int32, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 586 |
+
"__nv_isinff": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 587 |
+
"__nv_isnand": (int32, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 588 |
+
"__nv_isnanf": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 589 |
+
"__nv_j0": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 590 |
+
"__nv_j0f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 591 |
+
"__nv_j1": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 592 |
+
"__nv_j1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 593 |
+
"__nv_jn": (
|
| 594 |
+
float64,
|
| 595 |
+
[
|
| 596 |
+
arg(name="n", ty=int32, is_ptr=False),
|
| 597 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 598 |
+
],
|
| 599 |
+
),
|
| 600 |
+
"__nv_jnf": (
|
| 601 |
+
float32,
|
| 602 |
+
[
|
| 603 |
+
arg(name="n", ty=int32, is_ptr=False),
|
| 604 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 605 |
+
],
|
| 606 |
+
),
|
| 607 |
+
"__nv_ldexp": (
|
| 608 |
+
float64,
|
| 609 |
+
[
|
| 610 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 611 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 612 |
+
],
|
| 613 |
+
),
|
| 614 |
+
"__nv_ldexpf": (
|
| 615 |
+
float32,
|
| 616 |
+
[
|
| 617 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 618 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 619 |
+
],
|
| 620 |
+
),
|
| 621 |
+
"__nv_lgamma": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 622 |
+
"__nv_lgammaf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 623 |
+
"__nv_ll2double_rd": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 624 |
+
"__nv_ll2double_rn": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 625 |
+
"__nv_ll2double_ru": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 626 |
+
"__nv_ll2double_rz": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 627 |
+
"__nv_ll2float_rd": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 628 |
+
"__nv_ll2float_rn": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 629 |
+
"__nv_ll2float_ru": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 630 |
+
"__nv_ll2float_rz": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 631 |
+
"__nv_llabs": (int64, [arg(name="x", ty=int64, is_ptr=False)]),
|
| 632 |
+
"__nv_llmax": (
|
| 633 |
+
int64,
|
| 634 |
+
[
|
| 635 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 636 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 637 |
+
],
|
| 638 |
+
),
|
| 639 |
+
"__nv_llmin": (
|
| 640 |
+
int64,
|
| 641 |
+
[
|
| 642 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 643 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 644 |
+
],
|
| 645 |
+
),
|
| 646 |
+
"__nv_llrint": (int64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 647 |
+
"__nv_llrintf": (int64, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 648 |
+
"__nv_llround": (int64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 649 |
+
"__nv_llroundf": (int64, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 650 |
+
"__nv_log": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 651 |
+
"__nv_log10": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 652 |
+
"__nv_log10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 653 |
+
"__nv_log1p": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 654 |
+
"__nv_log1pf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 655 |
+
"__nv_log2": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 656 |
+
"__nv_log2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 657 |
+
"__nv_logb": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 658 |
+
"__nv_logbf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 659 |
+
"__nv_logf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 660 |
+
"__nv_longlong_as_double": (
|
| 661 |
+
float64,
|
| 662 |
+
[arg(name="x", ty=int64, is_ptr=False)],
|
| 663 |
+
),
|
| 664 |
+
"__nv_max": (
|
| 665 |
+
int32,
|
| 666 |
+
[
|
| 667 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 668 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 669 |
+
],
|
| 670 |
+
),
|
| 671 |
+
"__nv_min": (
|
| 672 |
+
int32,
|
| 673 |
+
[
|
| 674 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 675 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 676 |
+
],
|
| 677 |
+
),
|
| 678 |
+
"__nv_modf": (
|
| 679 |
+
float64,
|
| 680 |
+
[
|
| 681 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 682 |
+
arg(name="b", ty=float64, is_ptr=True),
|
| 683 |
+
],
|
| 684 |
+
),
|
| 685 |
+
"__nv_modff": (
|
| 686 |
+
float32,
|
| 687 |
+
[
|
| 688 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 689 |
+
arg(name="b", ty=float32, is_ptr=True),
|
| 690 |
+
],
|
| 691 |
+
),
|
| 692 |
+
"__nv_mul24": (
|
| 693 |
+
int32,
|
| 694 |
+
[
|
| 695 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 696 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 697 |
+
],
|
| 698 |
+
),
|
| 699 |
+
"__nv_mul64hi": (
|
| 700 |
+
int64,
|
| 701 |
+
[
|
| 702 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 703 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 704 |
+
],
|
| 705 |
+
),
|
| 706 |
+
"__nv_mulhi": (
|
| 707 |
+
int32,
|
| 708 |
+
[
|
| 709 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 710 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 711 |
+
],
|
| 712 |
+
),
|
| 713 |
+
# __nv_nan and __nv_nanf are excluded - they return a representation of a
|
| 714 |
+
# quiet NaN, but the argument they take seems to be undocumented, and
|
| 715 |
+
# follows a strange form - it is not an output like every other pointer
|
| 716 |
+
# argument. If a NaN is required, one can be obtained in CUDA Python by
|
| 717 |
+
# other means, e.g. `math.nan`. They are left in this list for completeness
|
| 718 |
+
# / reference.
|
| 719 |
+
# "__nv_nan": (float64, [arg(name="tagp", ty=int8, is_ptr=True)]),
|
| 720 |
+
# "__nv_nanf": (float32, [arg(name="tagp", ty=int8, is_ptr=True)]),
|
| 721 |
+
"__nv_nearbyint": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 722 |
+
"__nv_nearbyintf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 723 |
+
"__nv_nextafter": (
|
| 724 |
+
float64,
|
| 725 |
+
[
|
| 726 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 727 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 728 |
+
],
|
| 729 |
+
),
|
| 730 |
+
"__nv_nextafterf": (
|
| 731 |
+
float32,
|
| 732 |
+
[
|
| 733 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 734 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 735 |
+
],
|
| 736 |
+
),
|
| 737 |
+
"__nv_normcdf": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 738 |
+
"__nv_normcdff": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 739 |
+
"__nv_normcdfinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 740 |
+
"__nv_normcdfinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 741 |
+
"__nv_popc": (int32, [arg(name="x", ty=int32, is_ptr=False)]),
|
| 742 |
+
"__nv_popcll": (int32, [arg(name="x", ty=int64, is_ptr=False)]),
|
| 743 |
+
"__nv_pow": (
|
| 744 |
+
float64,
|
| 745 |
+
[
|
| 746 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 747 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 748 |
+
],
|
| 749 |
+
),
|
| 750 |
+
"__nv_powf": (
|
| 751 |
+
float32,
|
| 752 |
+
[
|
| 753 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 754 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 755 |
+
],
|
| 756 |
+
),
|
| 757 |
+
"__nv_powi": (
|
| 758 |
+
float64,
|
| 759 |
+
[
|
| 760 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 761 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 762 |
+
],
|
| 763 |
+
),
|
| 764 |
+
"__nv_powif": (
|
| 765 |
+
float32,
|
| 766 |
+
[
|
| 767 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 768 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 769 |
+
],
|
| 770 |
+
),
|
| 771 |
+
"__nv_rcbrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 772 |
+
"__nv_rcbrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 773 |
+
"__nv_remainder": (
|
| 774 |
+
float64,
|
| 775 |
+
[
|
| 776 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 777 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 778 |
+
],
|
| 779 |
+
),
|
| 780 |
+
"__nv_remainderf": (
|
| 781 |
+
float32,
|
| 782 |
+
[
|
| 783 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 784 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 785 |
+
],
|
| 786 |
+
),
|
| 787 |
+
"__nv_remquo": (
|
| 788 |
+
float64,
|
| 789 |
+
[
|
| 790 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 791 |
+
arg(name="y", ty=float64, is_ptr=False),
|
| 792 |
+
arg(name="c", ty=int32, is_ptr=True),
|
| 793 |
+
],
|
| 794 |
+
),
|
| 795 |
+
"__nv_remquof": (
|
| 796 |
+
float32,
|
| 797 |
+
[
|
| 798 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 799 |
+
arg(name="y", ty=float32, is_ptr=False),
|
| 800 |
+
arg(name="quo", ty=int32, is_ptr=True),
|
| 801 |
+
],
|
| 802 |
+
),
|
| 803 |
+
"__nv_rhadd": (
|
| 804 |
+
int32,
|
| 805 |
+
[
|
| 806 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 807 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 808 |
+
],
|
| 809 |
+
),
|
| 810 |
+
"__nv_rint": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 811 |
+
"__nv_rintf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 812 |
+
"__nv_round": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 813 |
+
"__nv_roundf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 814 |
+
"__nv_rsqrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 815 |
+
"__nv_rsqrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 816 |
+
"__nv_sad": (
|
| 817 |
+
int32,
|
| 818 |
+
[
|
| 819 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 820 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 821 |
+
arg(name="z", ty=int32, is_ptr=False),
|
| 822 |
+
],
|
| 823 |
+
),
|
| 824 |
+
"__nv_saturatef": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 825 |
+
"__nv_scalbn": (
|
| 826 |
+
float64,
|
| 827 |
+
[
|
| 828 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 829 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 830 |
+
],
|
| 831 |
+
),
|
| 832 |
+
"__nv_scalbnf": (
|
| 833 |
+
float32,
|
| 834 |
+
[
|
| 835 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 836 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 837 |
+
],
|
| 838 |
+
),
|
| 839 |
+
"__nv_signbitd": (int32, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 840 |
+
"__nv_signbitf": (int32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 841 |
+
"__nv_sin": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 842 |
+
"__nv_sincos": (
|
| 843 |
+
void,
|
| 844 |
+
[
|
| 845 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 846 |
+
arg(name="sptr", ty=float64, is_ptr=True),
|
| 847 |
+
arg(name="cptr", ty=float64, is_ptr=True),
|
| 848 |
+
],
|
| 849 |
+
),
|
| 850 |
+
"__nv_sincosf": (
|
| 851 |
+
void,
|
| 852 |
+
[
|
| 853 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 854 |
+
arg(name="sptr", ty=float32, is_ptr=True),
|
| 855 |
+
arg(name="cptr", ty=float32, is_ptr=True),
|
| 856 |
+
],
|
| 857 |
+
),
|
| 858 |
+
"__nv_sincospi": (
|
| 859 |
+
void,
|
| 860 |
+
[
|
| 861 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 862 |
+
arg(name="sptr", ty=float64, is_ptr=True),
|
| 863 |
+
arg(name="cptr", ty=float64, is_ptr=True),
|
| 864 |
+
],
|
| 865 |
+
),
|
| 866 |
+
"__nv_sincospif": (
|
| 867 |
+
void,
|
| 868 |
+
[
|
| 869 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 870 |
+
arg(name="sptr", ty=float32, is_ptr=True),
|
| 871 |
+
arg(name="cptr", ty=float32, is_ptr=True),
|
| 872 |
+
],
|
| 873 |
+
),
|
| 874 |
+
"__nv_sinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 875 |
+
"__nv_sinh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 876 |
+
"__nv_sinhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 877 |
+
"__nv_sinpi": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 878 |
+
"__nv_sinpif": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 879 |
+
"__nv_sqrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 880 |
+
"__nv_sqrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 881 |
+
"__nv_tan": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 882 |
+
"__nv_tanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 883 |
+
"__nv_tanh": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 884 |
+
"__nv_tanhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 885 |
+
"__nv_tgamma": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 886 |
+
"__nv_tgammaf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 887 |
+
"__nv_trunc": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 888 |
+
"__nv_truncf": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 889 |
+
"__nv_uhadd": (
|
| 890 |
+
int32,
|
| 891 |
+
[
|
| 892 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 893 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 894 |
+
],
|
| 895 |
+
),
|
| 896 |
+
"__nv_uint2double_rn": (float64, [arg(name="i", ty=int32, is_ptr=False)]),
|
| 897 |
+
"__nv_uint2float_rd": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 898 |
+
"__nv_uint2float_rn": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 899 |
+
"__nv_uint2float_ru": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 900 |
+
"__nv_uint2float_rz": (float32, [arg(name="in", ty=int32, is_ptr=False)]),
|
| 901 |
+
"__nv_ull2double_rd": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 902 |
+
"__nv_ull2double_rn": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 903 |
+
"__nv_ull2double_ru": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 904 |
+
"__nv_ull2double_rz": (float64, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 905 |
+
"__nv_ull2float_rd": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 906 |
+
"__nv_ull2float_rn": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 907 |
+
"__nv_ull2float_ru": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 908 |
+
"__nv_ull2float_rz": (float32, [arg(name="l", ty=int64, is_ptr=False)]),
|
| 909 |
+
"__nv_ullmax": (
|
| 910 |
+
int64,
|
| 911 |
+
[
|
| 912 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 913 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 914 |
+
],
|
| 915 |
+
),
|
| 916 |
+
"__nv_ullmin": (
|
| 917 |
+
int64,
|
| 918 |
+
[
|
| 919 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 920 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 921 |
+
],
|
| 922 |
+
),
|
| 923 |
+
"__nv_umax": (
|
| 924 |
+
int32,
|
| 925 |
+
[
|
| 926 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 927 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 928 |
+
],
|
| 929 |
+
),
|
| 930 |
+
"__nv_umin": (
|
| 931 |
+
int32,
|
| 932 |
+
[
|
| 933 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 934 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 935 |
+
],
|
| 936 |
+
),
|
| 937 |
+
"__nv_umul24": (
|
| 938 |
+
int32,
|
| 939 |
+
[
|
| 940 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 941 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 942 |
+
],
|
| 943 |
+
),
|
| 944 |
+
"__nv_umul64hi": (
|
| 945 |
+
int64,
|
| 946 |
+
[
|
| 947 |
+
arg(name="x", ty=int64, is_ptr=False),
|
| 948 |
+
arg(name="y", ty=int64, is_ptr=False),
|
| 949 |
+
],
|
| 950 |
+
),
|
| 951 |
+
"__nv_umulhi": (
|
| 952 |
+
int32,
|
| 953 |
+
[
|
| 954 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 955 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 956 |
+
],
|
| 957 |
+
),
|
| 958 |
+
"__nv_urhadd": (
|
| 959 |
+
int32,
|
| 960 |
+
[
|
| 961 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 962 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 963 |
+
],
|
| 964 |
+
),
|
| 965 |
+
"__nv_usad": (
|
| 966 |
+
int32,
|
| 967 |
+
[
|
| 968 |
+
arg(name="x", ty=int32, is_ptr=False),
|
| 969 |
+
arg(name="y", ty=int32, is_ptr=False),
|
| 970 |
+
arg(name="z", ty=int32, is_ptr=False),
|
| 971 |
+
],
|
| 972 |
+
),
|
| 973 |
+
"__nv_y0": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 974 |
+
"__nv_y0f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 975 |
+
"__nv_y1": (float64, [arg(name="x", ty=float64, is_ptr=False)]),
|
| 976 |
+
"__nv_y1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]),
|
| 977 |
+
"__nv_yn": (
|
| 978 |
+
float64,
|
| 979 |
+
[
|
| 980 |
+
arg(name="n", ty=int32, is_ptr=False),
|
| 981 |
+
arg(name="x", ty=float64, is_ptr=False),
|
| 982 |
+
],
|
| 983 |
+
),
|
| 984 |
+
"__nv_ynf": (
|
| 985 |
+
float32,
|
| 986 |
+
[
|
| 987 |
+
arg(name="n", ty=int32, is_ptr=False),
|
| 988 |
+
arg(name="x", ty=float32, is_ptr=False),
|
| 989 |
+
],
|
| 990 |
+
),
|
| 991 |
+
}
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
def create_signature(retty, args):
|
| 995 |
+
"""
|
| 996 |
+
Given the return type and arguments for a libdevice function, return the
|
| 997 |
+
signature of the stub function used to call it from CUDA Python.
|
| 998 |
+
"""
|
| 999 |
+
|
| 1000 |
+
# Any pointer arguments should be part of the return type.
|
| 1001 |
+
return_types = [arg.ty for arg in args if arg.is_ptr]
|
| 1002 |
+
# If the return type is void, there is no point adding it to the list of
|
| 1003 |
+
# return types.
|
| 1004 |
+
if retty != void:
|
| 1005 |
+
return_types.insert(0, retty)
|
| 1006 |
+
|
| 1007 |
+
if len(return_types) > 1:
|
| 1008 |
+
retty = Tuple(return_types)
|
| 1009 |
+
else:
|
| 1010 |
+
retty = return_types[0]
|
| 1011 |
+
|
| 1012 |
+
argtypes = [arg.ty for arg in args if not arg.is_ptr]
|
| 1013 |
+
|
| 1014 |
+
return signature(retty, *argtypes)
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
# The following code generates the stubs for libdevice functions.
|
| 1018 |
+
#
|
| 1019 |
+
# Stubs can be regenerated (e.g. if the functions dict above is modified) with:
|
| 1020 |
+
#
|
| 1021 |
+
# python -c "from numba.cuda.libdevicefuncs import generate_stubs; \
|
| 1022 |
+
# generate_stubs()" > numba/cuda/libdevice.py
|
| 1023 |
+
|
| 1024 |
+
docstring_template = """
|
| 1025 |
+
See https://docs.nvidia.com/cuda/libdevice-users-guide/{func}.html
|
| 1026 |
+
|
| 1027 |
+
{param_types}
|
| 1028 |
+
:rtype: {retty}
|
| 1029 |
+
"""
|
| 1030 |
+
|
| 1031 |
+
param_template = """\
|
| 1032 |
+
:param {a.name}: Argument.
|
| 1033 |
+
:type {a.name}: {a.ty}"""
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
def generate_stubs():
|
| 1037 |
+
for name, (retty, args) in functions.items():
|
| 1038 |
+
# Some libdevice functions have arguments called `in`, which causes a
|
| 1039 |
+
# syntax error in Python, so we rename these to `x`.
|
| 1040 |
+
def argname(arg):
|
| 1041 |
+
if arg.name == "in":
|
| 1042 |
+
return "x"
|
| 1043 |
+
else:
|
| 1044 |
+
return arg.name
|
| 1045 |
+
|
| 1046 |
+
argnames = [argname(a) for a in args if not a.is_ptr]
|
| 1047 |
+
argstr = ", ".join(argnames)
|
| 1048 |
+
signature = create_signature(retty, args)
|
| 1049 |
+
|
| 1050 |
+
param_types = "\n".join(
|
| 1051 |
+
[param_template.format(a=a) for a in args if not a.is_ptr]
|
| 1052 |
+
)
|
| 1053 |
+
docstring = docstring_template.format(
|
| 1054 |
+
param_types=param_types, retty=signature.return_type, func=name
|
| 1055 |
+
)
|
| 1056 |
+
docstring = indent(docstring, " ")
|
| 1057 |
+
print(f'def {name[5:]}({argstr}):\n """{docstring}"""\n\n')
|
lib/python3.10/site-packages/numba/cuda/libdeviceimpl.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llvmlite import ir
|
| 2 |
+
from numba.core import cgutils, types
|
| 3 |
+
from numba.core.imputils import Registry
|
| 4 |
+
from numba.cuda import libdevice, libdevicefuncs
|
| 5 |
+
|
| 6 |
+
registry = Registry()
|
| 7 |
+
lower = registry.lower
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def libdevice_implement(func, retty, nbargs):
|
| 11 |
+
def core(context, builder, sig, args):
|
| 12 |
+
lmod = builder.module
|
| 13 |
+
fretty = context.get_value_type(retty)
|
| 14 |
+
fargtys = [context.get_value_type(arg.ty) for arg in nbargs]
|
| 15 |
+
fnty = ir.FunctionType(fretty, fargtys)
|
| 16 |
+
fn = cgutils.get_or_insert_function(lmod, fnty, func)
|
| 17 |
+
return builder.call(fn, args)
|
| 18 |
+
|
| 19 |
+
key = getattr(libdevice, func[5:])
|
| 20 |
+
|
| 21 |
+
argtys = [arg.ty for arg in args if not arg.is_ptr]
|
| 22 |
+
lower(key, *argtys)(core)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def libdevice_implement_multiple_returns(func, retty, prototype_args):
|
| 26 |
+
sig = libdevicefuncs.create_signature(retty, prototype_args)
|
| 27 |
+
nb_retty = sig.return_type
|
| 28 |
+
|
| 29 |
+
def core(context, builder, sig, args):
|
| 30 |
+
lmod = builder.module
|
| 31 |
+
|
| 32 |
+
fargtys = []
|
| 33 |
+
for arg in prototype_args:
|
| 34 |
+
ty = context.get_value_type(arg.ty)
|
| 35 |
+
if arg.is_ptr:
|
| 36 |
+
ty = ty.as_pointer()
|
| 37 |
+
fargtys.append(ty)
|
| 38 |
+
|
| 39 |
+
fretty = context.get_value_type(retty)
|
| 40 |
+
|
| 41 |
+
fnty = ir.FunctionType(fretty, fargtys)
|
| 42 |
+
fn = cgutils.get_or_insert_function(lmod, fnty, func)
|
| 43 |
+
|
| 44 |
+
# For returned values that are returned through a pointer, we need to
|
| 45 |
+
# allocate variables on the stack and pass a pointer to them.
|
| 46 |
+
actual_args = []
|
| 47 |
+
virtual_args = []
|
| 48 |
+
arg_idx = 0
|
| 49 |
+
for arg in prototype_args:
|
| 50 |
+
if arg.is_ptr:
|
| 51 |
+
# Allocate space for return value and add to args
|
| 52 |
+
tmp_arg = cgutils.alloca_once(builder,
|
| 53 |
+
context.get_value_type(arg.ty))
|
| 54 |
+
actual_args.append(tmp_arg)
|
| 55 |
+
virtual_args.append(tmp_arg)
|
| 56 |
+
else:
|
| 57 |
+
actual_args.append(args[arg_idx])
|
| 58 |
+
arg_idx += 1
|
| 59 |
+
|
| 60 |
+
ret = builder.call(fn, actual_args)
|
| 61 |
+
|
| 62 |
+
# Following the call, we need to assemble the returned values into a
|
| 63 |
+
# tuple for returning back to the caller.
|
| 64 |
+
tuple_args = []
|
| 65 |
+
if retty != types.void:
|
| 66 |
+
tuple_args.append(ret)
|
| 67 |
+
for arg in virtual_args:
|
| 68 |
+
tuple_args.append(builder.load(arg))
|
| 69 |
+
|
| 70 |
+
if isinstance(nb_retty, types.UniTuple):
|
| 71 |
+
return cgutils.pack_array(builder, tuple_args)
|
| 72 |
+
else:
|
| 73 |
+
return cgutils.pack_struct(builder, tuple_args)
|
| 74 |
+
|
| 75 |
+
key = getattr(libdevice, func[5:])
|
| 76 |
+
lower(key, *sig.args)(core)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
for func, (retty, args) in libdevicefuncs.functions.items():
|
| 80 |
+
if any([arg.is_ptr for arg in args]):
|
| 81 |
+
libdevice_implement_multiple_returns(func, retty, args)
|
| 82 |
+
else:
|
| 83 |
+
libdevice_implement(func, retty, args)
|
lib/python3.10/site-packages/numba/cuda/mathimpl.py
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import operator
|
| 3 |
+
from llvmlite import ir
|
| 4 |
+
from numba.core import types, typing, cgutils, targetconfig
|
| 5 |
+
from numba.core.imputils import Registry
|
| 6 |
+
from numba.types import float32, float64, int64, uint64
|
| 7 |
+
from numba.cuda import libdevice
|
| 8 |
+
from numba import cuda
|
| 9 |
+
|
| 10 |
+
registry = Registry()
|
| 11 |
+
lower = registry.lower
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
booleans = []
|
| 15 |
+
booleans += [('isnand', 'isnanf', math.isnan)]
|
| 16 |
+
booleans += [('isinfd', 'isinff', math.isinf)]
|
| 17 |
+
booleans += [('isfinited', 'finitef', math.isfinite)]
|
| 18 |
+
|
| 19 |
+
unarys = []
|
| 20 |
+
unarys += [('ceil', 'ceilf', math.ceil)]
|
| 21 |
+
unarys += [('floor', 'floorf', math.floor)]
|
| 22 |
+
unarys += [('fabs', 'fabsf', math.fabs)]
|
| 23 |
+
unarys += [('exp', 'expf', math.exp)]
|
| 24 |
+
unarys += [('expm1', 'expm1f', math.expm1)]
|
| 25 |
+
unarys += [('erf', 'erff', math.erf)]
|
| 26 |
+
unarys += [('erfc', 'erfcf', math.erfc)]
|
| 27 |
+
unarys += [('tgamma', 'tgammaf', math.gamma)]
|
| 28 |
+
unarys += [('lgamma', 'lgammaf', math.lgamma)]
|
| 29 |
+
unarys += [('sqrt', 'sqrtf', math.sqrt)]
|
| 30 |
+
unarys += [('log', 'logf', math.log)]
|
| 31 |
+
unarys += [('log2', 'log2f', math.log2)]
|
| 32 |
+
unarys += [('log10', 'log10f', math.log10)]
|
| 33 |
+
unarys += [('log1p', 'log1pf', math.log1p)]
|
| 34 |
+
unarys += [('acosh', 'acoshf', math.acosh)]
|
| 35 |
+
unarys += [('acos', 'acosf', math.acos)]
|
| 36 |
+
unarys += [('cos', 'cosf', math.cos)]
|
| 37 |
+
unarys += [('cosh', 'coshf', math.cosh)]
|
| 38 |
+
unarys += [('asinh', 'asinhf', math.asinh)]
|
| 39 |
+
unarys += [('asin', 'asinf', math.asin)]
|
| 40 |
+
unarys += [('sin', 'sinf', math.sin)]
|
| 41 |
+
unarys += [('sinh', 'sinhf', math.sinh)]
|
| 42 |
+
unarys += [('atan', 'atanf', math.atan)]
|
| 43 |
+
unarys += [('atanh', 'atanhf', math.atanh)]
|
| 44 |
+
unarys += [('tan', 'tanf', math.tan)]
|
| 45 |
+
unarys += [('trunc', 'truncf', math.trunc)]
|
| 46 |
+
|
| 47 |
+
unarys_fastmath = {}
|
| 48 |
+
unarys_fastmath['cosf'] = 'fast_cosf'
|
| 49 |
+
unarys_fastmath['sinf'] = 'fast_sinf'
|
| 50 |
+
unarys_fastmath['tanf'] = 'fast_tanf'
|
| 51 |
+
unarys_fastmath['expf'] = 'fast_expf'
|
| 52 |
+
unarys_fastmath['log2f'] = 'fast_log2f'
|
| 53 |
+
unarys_fastmath['log10f'] = 'fast_log10f'
|
| 54 |
+
unarys_fastmath['logf'] = 'fast_logf'
|
| 55 |
+
|
| 56 |
+
binarys = []
|
| 57 |
+
binarys += [('copysign', 'copysignf', math.copysign)]
|
| 58 |
+
binarys += [('atan2', 'atan2f', math.atan2)]
|
| 59 |
+
binarys += [('pow', 'powf', math.pow)]
|
| 60 |
+
binarys += [('fmod', 'fmodf', math.fmod)]
|
| 61 |
+
binarys += [('hypot', 'hypotf', math.hypot)]
|
| 62 |
+
binarys += [('remainder', 'remainderf', math.remainder)]
|
| 63 |
+
|
| 64 |
+
binarys_fastmath = {}
|
| 65 |
+
binarys_fastmath['powf'] = 'fast_powf'
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@lower(math.isinf, types.Integer)
|
| 69 |
+
@lower(math.isnan, types.Integer)
|
| 70 |
+
def math_isinf_isnan_int(context, builder, sig, args):
|
| 71 |
+
return context.get_constant(types.boolean, 0)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@lower(operator.truediv, types.float32, types.float32)
|
| 75 |
+
def maybe_fast_truediv(context, builder, sig, args):
|
| 76 |
+
if context.fastmath:
|
| 77 |
+
sig = typing.signature(float32, float32, float32)
|
| 78 |
+
impl = context.get_function(libdevice.fast_fdividef, sig)
|
| 79 |
+
return impl(builder, args)
|
| 80 |
+
else:
|
| 81 |
+
with cgutils.if_zero(builder, args[1]):
|
| 82 |
+
context.error_model.fp_zero_division(builder, ("division by zero",))
|
| 83 |
+
res = builder.fdiv(*args)
|
| 84 |
+
return res
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@lower(math.isfinite, types.Integer)
|
| 88 |
+
def math_isfinite_int(context, builder, sig, args):
|
| 89 |
+
return context.get_constant(types.boolean, 1)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@lower(math.sin, types.float16)
|
| 93 |
+
def fp16_sin_impl(context, builder, sig, args):
|
| 94 |
+
def fp16_sin(x):
|
| 95 |
+
return cuda.fp16.hsin(x)
|
| 96 |
+
|
| 97 |
+
return context.compile_internal(builder, fp16_sin, sig, args)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@lower(math.cos, types.float16)
|
| 101 |
+
def fp16_cos_impl(context, builder, sig, args):
|
| 102 |
+
def fp16_cos(x):
|
| 103 |
+
return cuda.fp16.hcos(x)
|
| 104 |
+
|
| 105 |
+
return context.compile_internal(builder, fp16_cos, sig, args)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@lower(math.log, types.float16)
|
| 109 |
+
def fp16_log_impl(context, builder, sig, args):
|
| 110 |
+
def fp16_log(x):
|
| 111 |
+
return cuda.fp16.hlog(x)
|
| 112 |
+
|
| 113 |
+
return context.compile_internal(builder, fp16_log, sig, args)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@lower(math.log10, types.float16)
|
| 117 |
+
def fp16_log10_impl(context, builder, sig, args):
|
| 118 |
+
def fp16_log10(x):
|
| 119 |
+
return cuda.fp16.hlog10(x)
|
| 120 |
+
|
| 121 |
+
return context.compile_internal(builder, fp16_log10, sig, args)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@lower(math.log2, types.float16)
|
| 125 |
+
def fp16_log2_impl(context, builder, sig, args):
|
| 126 |
+
def fp16_log2(x):
|
| 127 |
+
return cuda.fp16.hlog2(x)
|
| 128 |
+
|
| 129 |
+
return context.compile_internal(builder, fp16_log2, sig, args)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@lower(math.exp, types.float16)
|
| 133 |
+
def fp16_exp_impl(context, builder, sig, args):
|
| 134 |
+
def fp16_exp(x):
|
| 135 |
+
return cuda.fp16.hexp(x)
|
| 136 |
+
|
| 137 |
+
return context.compile_internal(builder, fp16_exp, sig, args)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@lower(math.floor, types.float16)
|
| 141 |
+
def fp16_floor_impl(context, builder, sig, args):
|
| 142 |
+
def fp16_floor(x):
|
| 143 |
+
return cuda.fp16.hfloor(x)
|
| 144 |
+
|
| 145 |
+
return context.compile_internal(builder, fp16_floor, sig, args)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@lower(math.ceil, types.float16)
|
| 149 |
+
def fp16_ceil_impl(context, builder, sig, args):
|
| 150 |
+
def fp16_ceil(x):
|
| 151 |
+
return cuda.fp16.hceil(x)
|
| 152 |
+
|
| 153 |
+
return context.compile_internal(builder, fp16_ceil, sig, args)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@lower(math.sqrt, types.float16)
|
| 157 |
+
def fp16_sqrt_impl(context, builder, sig, args):
|
| 158 |
+
def fp16_sqrt(x):
|
| 159 |
+
return cuda.fp16.hsqrt(x)
|
| 160 |
+
|
| 161 |
+
return context.compile_internal(builder, fp16_sqrt, sig, args)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@lower(math.fabs, types.float16)
|
| 165 |
+
def fp16_fabs_impl(context, builder, sig, args):
|
| 166 |
+
def fp16_fabs(x):
|
| 167 |
+
return cuda.fp16.habs(x)
|
| 168 |
+
|
| 169 |
+
return context.compile_internal(builder, fp16_fabs, sig, args)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@lower(math.trunc, types.float16)
|
| 173 |
+
def fp16_trunc_impl(context, builder, sig, args):
|
| 174 |
+
def fp16_trunc(x):
|
| 175 |
+
return cuda.fp16.htrunc(x)
|
| 176 |
+
|
| 177 |
+
return context.compile_internal(builder, fp16_trunc, sig, args)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def impl_boolean(key, ty, libfunc):
|
| 181 |
+
def lower_boolean_impl(context, builder, sig, args):
|
| 182 |
+
libfunc_impl = context.get_function(libfunc,
|
| 183 |
+
typing.signature(types.int32, ty))
|
| 184 |
+
result = libfunc_impl(builder, args)
|
| 185 |
+
return context.cast(builder, result, types.int32, types.boolean)
|
| 186 |
+
|
| 187 |
+
lower(key, ty)(lower_boolean_impl)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def get_lower_unary_impl(key, ty, libfunc):
|
| 191 |
+
def lower_unary_impl(context, builder, sig, args):
|
| 192 |
+
actual_libfunc = libfunc
|
| 193 |
+
fast_replacement = None
|
| 194 |
+
if ty == float32 and context.fastmath:
|
| 195 |
+
fast_replacement = unarys_fastmath.get(libfunc.__name__)
|
| 196 |
+
|
| 197 |
+
if fast_replacement is not None:
|
| 198 |
+
actual_libfunc = getattr(libdevice, fast_replacement)
|
| 199 |
+
|
| 200 |
+
libfunc_impl = context.get_function(actual_libfunc,
|
| 201 |
+
typing.signature(ty, ty))
|
| 202 |
+
return libfunc_impl(builder, args)
|
| 203 |
+
return lower_unary_impl
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def get_unary_impl_for_fn_and_ty(fn, ty):
|
| 207 |
+
# tanh is a special case - because it is not registered like the other
|
| 208 |
+
# unary implementations, it does not appear in the unarys list. However,
|
| 209 |
+
# its implementation can be looked up by key like the other
|
| 210 |
+
# implementations, so we add it to the list we search here.
|
| 211 |
+
tanh_impls = ('tanh', 'tanhf', math.tanh)
|
| 212 |
+
for fname64, fname32, key in unarys + [tanh_impls]:
|
| 213 |
+
if fn == key:
|
| 214 |
+
if ty == float32:
|
| 215 |
+
impl = getattr(libdevice, fname32)
|
| 216 |
+
elif ty == float64:
|
| 217 |
+
impl = getattr(libdevice, fname64)
|
| 218 |
+
|
| 219 |
+
return get_lower_unary_impl(key, ty, impl)
|
| 220 |
+
|
| 221 |
+
raise RuntimeError(f"Implementation of {fn} for {ty} not found")
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def impl_unary(key, ty, libfunc):
|
| 225 |
+
lower_unary_impl = get_lower_unary_impl(key, ty, libfunc)
|
| 226 |
+
lower(key, ty)(lower_unary_impl)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def impl_unary_int(key, ty, libfunc):
|
| 230 |
+
def lower_unary_int_impl(context, builder, sig, args):
|
| 231 |
+
if sig.args[0] == int64:
|
| 232 |
+
convert = builder.sitofp
|
| 233 |
+
elif sig.args[0] == uint64:
|
| 234 |
+
convert = builder.uitofp
|
| 235 |
+
else:
|
| 236 |
+
m = 'Only 64-bit integers are supported for generic unary int ops'
|
| 237 |
+
raise TypeError(m)
|
| 238 |
+
|
| 239 |
+
arg = convert(args[0], ir.DoubleType())
|
| 240 |
+
sig = typing.signature(float64, float64)
|
| 241 |
+
libfunc_impl = context.get_function(libfunc, sig)
|
| 242 |
+
return libfunc_impl(builder, [arg])
|
| 243 |
+
|
| 244 |
+
lower(key, ty)(lower_unary_int_impl)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def get_lower_binary_impl(key, ty, libfunc):
|
| 248 |
+
def lower_binary_impl(context, builder, sig, args):
|
| 249 |
+
actual_libfunc = libfunc
|
| 250 |
+
fast_replacement = None
|
| 251 |
+
if ty == float32 and context.fastmath:
|
| 252 |
+
fast_replacement = binarys_fastmath.get(libfunc.__name__)
|
| 253 |
+
|
| 254 |
+
if fast_replacement is not None:
|
| 255 |
+
actual_libfunc = getattr(libdevice, fast_replacement)
|
| 256 |
+
|
| 257 |
+
libfunc_impl = context.get_function(actual_libfunc,
|
| 258 |
+
typing.signature(ty, ty, ty))
|
| 259 |
+
return libfunc_impl(builder, args)
|
| 260 |
+
return lower_binary_impl
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def get_binary_impl_for_fn_and_ty(fn, ty):
|
| 264 |
+
for fname64, fname32, key in binarys:
|
| 265 |
+
if fn == key:
|
| 266 |
+
if ty == float32:
|
| 267 |
+
impl = getattr(libdevice, fname32)
|
| 268 |
+
elif ty == float64:
|
| 269 |
+
impl = getattr(libdevice, fname64)
|
| 270 |
+
|
| 271 |
+
return get_lower_binary_impl(key, ty, impl)
|
| 272 |
+
|
| 273 |
+
raise RuntimeError(f"Implementation of {fn} for {ty} not found")
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def impl_binary(key, ty, libfunc):
|
| 277 |
+
lower_binary_impl = get_lower_binary_impl(key, ty, libfunc)
|
| 278 |
+
lower(key, ty, ty)(lower_binary_impl)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def impl_binary_int(key, ty, libfunc):
|
| 282 |
+
def lower_binary_int_impl(context, builder, sig, args):
|
| 283 |
+
if sig.args[0] == int64:
|
| 284 |
+
convert = builder.sitofp
|
| 285 |
+
elif sig.args[0] == uint64:
|
| 286 |
+
convert = builder.uitofp
|
| 287 |
+
else:
|
| 288 |
+
m = 'Only 64-bit integers are supported for generic binary int ops'
|
| 289 |
+
raise TypeError(m)
|
| 290 |
+
|
| 291 |
+
args = [convert(arg, ir.DoubleType()) for arg in args]
|
| 292 |
+
sig = typing.signature(float64, float64, float64)
|
| 293 |
+
libfunc_impl = context.get_function(libfunc, sig)
|
| 294 |
+
return libfunc_impl(builder, args)
|
| 295 |
+
|
| 296 |
+
lower(key, ty, ty)(lower_binary_int_impl)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
for fname64, fname32, key in booleans:
|
| 300 |
+
impl32 = getattr(libdevice, fname32)
|
| 301 |
+
impl64 = getattr(libdevice, fname64)
|
| 302 |
+
impl_boolean(key, float32, impl32)
|
| 303 |
+
impl_boolean(key, float64, impl64)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
for fname64, fname32, key in unarys:
|
| 307 |
+
impl32 = getattr(libdevice, fname32)
|
| 308 |
+
impl64 = getattr(libdevice, fname64)
|
| 309 |
+
impl_unary(key, float32, impl32)
|
| 310 |
+
impl_unary(key, float64, impl64)
|
| 311 |
+
impl_unary_int(key, int64, impl64)
|
| 312 |
+
impl_unary_int(key, uint64, impl64)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
for fname64, fname32, key in binarys:
|
| 316 |
+
impl32 = getattr(libdevice, fname32)
|
| 317 |
+
impl64 = getattr(libdevice, fname64)
|
| 318 |
+
impl_binary(key, float32, impl32)
|
| 319 |
+
impl_binary(key, float64, impl64)
|
| 320 |
+
impl_binary_int(key, int64, impl64)
|
| 321 |
+
impl_binary_int(key, uint64, impl64)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def impl_pow_int(ty, libfunc):
|
| 325 |
+
def lower_pow_impl_int(context, builder, sig, args):
|
| 326 |
+
powi_sig = typing.signature(ty, ty, types.int32)
|
| 327 |
+
libfunc_impl = context.get_function(libfunc, powi_sig)
|
| 328 |
+
return libfunc_impl(builder, args)
|
| 329 |
+
|
| 330 |
+
lower(math.pow, ty, types.int32)(lower_pow_impl_int)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
impl_pow_int(types.float32, libdevice.powif)
|
| 334 |
+
impl_pow_int(types.float64, libdevice.powi)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def impl_modf(ty, libfunc):
|
| 338 |
+
retty = types.UniTuple(ty, 2)
|
| 339 |
+
|
| 340 |
+
def lower_modf_impl(context, builder, sig, args):
|
| 341 |
+
modf_sig = typing.signature(retty, ty)
|
| 342 |
+
libfunc_impl = context.get_function(libfunc, modf_sig)
|
| 343 |
+
return libfunc_impl(builder, args)
|
| 344 |
+
|
| 345 |
+
lower(math.modf, ty)(lower_modf_impl)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
impl_modf(types.float32, libdevice.modff)
|
| 349 |
+
impl_modf(types.float64, libdevice.modf)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def impl_frexp(ty, libfunc):
|
| 353 |
+
retty = types.Tuple((ty, types.int32))
|
| 354 |
+
|
| 355 |
+
def lower_frexp_impl(context, builder, sig, args):
|
| 356 |
+
frexp_sig = typing.signature(retty, ty)
|
| 357 |
+
libfunc_impl = context.get_function(libfunc, frexp_sig)
|
| 358 |
+
return libfunc_impl(builder, args)
|
| 359 |
+
|
| 360 |
+
lower(math.frexp, ty)(lower_frexp_impl)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
impl_frexp(types.float32, libdevice.frexpf)
|
| 364 |
+
impl_frexp(types.float64, libdevice.frexp)
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def impl_ldexp(ty, libfunc):
|
| 368 |
+
def lower_ldexp_impl(context, builder, sig, args):
|
| 369 |
+
ldexp_sig = typing.signature(ty, ty, types.int32)
|
| 370 |
+
libfunc_impl = context.get_function(libfunc, ldexp_sig)
|
| 371 |
+
return libfunc_impl(builder, args)
|
| 372 |
+
|
| 373 |
+
lower(math.ldexp, ty, types.int32)(lower_ldexp_impl)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
impl_ldexp(types.float32, libdevice.ldexpf)
|
| 377 |
+
impl_ldexp(types.float64, libdevice.ldexp)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def impl_tanh(ty, libfunc):
|
| 381 |
+
def lower_tanh_impl(context, builder, sig, args):
|
| 382 |
+
def get_compute_capability():
|
| 383 |
+
flags = targetconfig.ConfigStack().top()
|
| 384 |
+
return flags.compute_capability
|
| 385 |
+
|
| 386 |
+
def tanh_impl_libdevice():
|
| 387 |
+
tanh_sig = typing.signature(ty, ty)
|
| 388 |
+
libfunc_impl = context.get_function(libfunc, tanh_sig)
|
| 389 |
+
return libfunc_impl(builder, args)
|
| 390 |
+
|
| 391 |
+
def tanhf_impl_fastmath():
|
| 392 |
+
fnty = ir.FunctionType(ir.FloatType(), [ir.FloatType()])
|
| 393 |
+
asm = ir.InlineAsm(fnty, 'tanh.approx.f32 $0, $1;', '=f,f')
|
| 394 |
+
return builder.call(asm, args)
|
| 395 |
+
|
| 396 |
+
if ty == float32 and context.fastmath:
|
| 397 |
+
cc = get_compute_capability()
|
| 398 |
+
if cc >= (7,5):
|
| 399 |
+
return tanhf_impl_fastmath()
|
| 400 |
+
|
| 401 |
+
return tanh_impl_libdevice()
|
| 402 |
+
|
| 403 |
+
lower(math.tanh, ty)(lower_tanh_impl)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
impl_tanh(types.float32, libdevice.tanhf)
|
| 407 |
+
impl_tanh(types.float64, libdevice.tanh)
|
| 408 |
+
|
| 409 |
+
impl_unary_int(math.tanh, int64, libdevice.tanh)
|
| 410 |
+
impl_unary_int(math.tanh, uint64, libdevice.tanh)
|
| 411 |
+
|
| 412 |
+
# Complex power implementations - translations of _Py_c_pow from CPython
|
| 413 |
+
# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151
|
| 414 |
+
#
|
| 415 |
+
# The complex64 variant casts all constants and some variables to ensure that
|
| 416 |
+
# as much computation is done in single precision as possible. A small number
|
| 417 |
+
# of operations are still done in 64-bit, but these come from libdevice code.
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def cpow_implement(fty, cty):
|
| 421 |
+
def core(context, builder, sig, args):
|
| 422 |
+
def cpow_internal(a, b):
|
| 423 |
+
|
| 424 |
+
if b.real == fty(0.0) and b.imag == fty(0.0):
|
| 425 |
+
return cty(1.0) + cty(0.0j)
|
| 426 |
+
elif a.real == fty(0.0) and b.real == fty(0.0):
|
| 427 |
+
return cty(0.0) + cty(0.0j)
|
| 428 |
+
|
| 429 |
+
vabs = math.hypot(a.real, a.imag)
|
| 430 |
+
len = math.pow(vabs, b.real)
|
| 431 |
+
at = math.atan2(a.imag, a.real)
|
| 432 |
+
phase = at * b.real
|
| 433 |
+
if b.imag != fty(0.0):
|
| 434 |
+
len /= math.exp(at * b.imag)
|
| 435 |
+
phase += b.imag * math.log(vabs)
|
| 436 |
+
|
| 437 |
+
return len * (cty(math.cos(phase)) +
|
| 438 |
+
cty(math.sin(phase) * cty(1.0j)))
|
| 439 |
+
|
| 440 |
+
return context.compile_internal(builder, cpow_internal, sig, args)
|
| 441 |
+
|
| 442 |
+
lower(operator.pow, cty, cty)(core)
|
| 443 |
+
lower(operator.ipow, cty, cty)(core)
|
| 444 |
+
lower(pow, cty, cty)(core)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
cpow_implement(types.float32, types.complex64)
|
| 448 |
+
cpow_implement(types.float64, types.complex128)
|
lib/python3.10/site-packages/numba/cuda/models.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
from llvmlite import ir
|
| 4 |
+
|
| 5 |
+
from numba.core.datamodel.registry import DataModelManager, register
|
| 6 |
+
from numba.core.extending import models
|
| 7 |
+
from numba.core import types
|
| 8 |
+
from numba.cuda.types import Dim3, GridGroup, CUDADispatcher
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
cuda_data_manager = DataModelManager()
|
| 12 |
+
|
| 13 |
+
register_model = functools.partial(register, cuda_data_manager)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@register_model(Dim3)
|
| 17 |
+
class Dim3Model(models.StructModel):
|
| 18 |
+
def __init__(self, dmm, fe_type):
|
| 19 |
+
members = [
|
| 20 |
+
('x', types.int32),
|
| 21 |
+
('y', types.int32),
|
| 22 |
+
('z', types.int32)
|
| 23 |
+
]
|
| 24 |
+
super().__init__(dmm, fe_type, members)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@register_model(GridGroup)
|
| 28 |
+
class GridGroupModel(models.PrimitiveModel):
|
| 29 |
+
def __init__(self, dmm, fe_type):
|
| 30 |
+
be_type = ir.IntType(64)
|
| 31 |
+
super().__init__(dmm, fe_type, be_type)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@register_model(types.Float)
|
| 35 |
+
class FloatModel(models.PrimitiveModel):
|
| 36 |
+
def __init__(self, dmm, fe_type):
|
| 37 |
+
if fe_type == types.float16:
|
| 38 |
+
be_type = ir.IntType(16)
|
| 39 |
+
elif fe_type == types.float32:
|
| 40 |
+
be_type = ir.FloatType()
|
| 41 |
+
elif fe_type == types.float64:
|
| 42 |
+
be_type = ir.DoubleType()
|
| 43 |
+
else:
|
| 44 |
+
raise NotImplementedError(fe_type)
|
| 45 |
+
super(FloatModel, self).__init__(dmm, fe_type, be_type)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
register_model(CUDADispatcher)(models.OpaqueModel)
|
lib/python3.10/site-packages/numba/cuda/nvvmutils.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from llvmlite import ir
|
| 3 |
+
from numba.core import cgutils, targetconfig
|
| 4 |
+
from .cudadrv import nvvm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def declare_atomic_cas_int(lmod, isize):
|
| 8 |
+
fname = '___numba_atomic_i' + str(isize) + '_cas_hack'
|
| 9 |
+
fnty = ir.FunctionType(ir.IntType(isize),
|
| 10 |
+
(ir.PointerType(ir.IntType(isize)),
|
| 11 |
+
ir.IntType(isize),
|
| 12 |
+
ir.IntType(isize)))
|
| 13 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def atomic_cmpxchg(builder, lmod, isize, ptr, cmp, val):
|
| 17 |
+
out = builder.cmpxchg(ptr, cmp, val, 'monotonic', 'monotonic')
|
| 18 |
+
return builder.extract_value(out, 0)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def declare_atomic_add_float32(lmod):
|
| 22 |
+
fname = 'llvm.nvvm.atomic.load.add.f32.p0f32'
|
| 23 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 24 |
+
(ir.PointerType(ir.FloatType(), 0), ir.FloatType()))
|
| 25 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def declare_atomic_add_float64(lmod):
|
| 29 |
+
flags = targetconfig.ConfigStack().top()
|
| 30 |
+
if flags.compute_capability >= (6, 0):
|
| 31 |
+
fname = 'llvm.nvvm.atomic.load.add.f64.p0f64'
|
| 32 |
+
else:
|
| 33 |
+
fname = '___numba_atomic_double_add'
|
| 34 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 35 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 36 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def declare_atomic_sub_float32(lmod):
|
| 40 |
+
fname = '___numba_atomic_float_sub'
|
| 41 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 42 |
+
(ir.PointerType(ir.FloatType()), ir.FloatType()))
|
| 43 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def declare_atomic_sub_float64(lmod):
|
| 47 |
+
fname = '___numba_atomic_double_sub'
|
| 48 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 49 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 50 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def declare_atomic_inc_int32(lmod):
|
| 54 |
+
fname = 'llvm.nvvm.atomic.load.inc.32.p0i32'
|
| 55 |
+
fnty = ir.FunctionType(ir.IntType(32),
|
| 56 |
+
(ir.PointerType(ir.IntType(32)), ir.IntType(32)))
|
| 57 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def declare_atomic_inc_int64(lmod):
|
| 61 |
+
fname = '___numba_atomic_u64_inc'
|
| 62 |
+
fnty = ir.FunctionType(ir.IntType(64),
|
| 63 |
+
(ir.PointerType(ir.IntType(64)), ir.IntType(64)))
|
| 64 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def declare_atomic_dec_int32(lmod):
|
| 68 |
+
fname = 'llvm.nvvm.atomic.load.dec.32.p0i32'
|
| 69 |
+
fnty = ir.FunctionType(ir.IntType(32),
|
| 70 |
+
(ir.PointerType(ir.IntType(32)), ir.IntType(32)))
|
| 71 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def declare_atomic_dec_int64(lmod):
|
| 75 |
+
fname = '___numba_atomic_u64_dec'
|
| 76 |
+
fnty = ir.FunctionType(ir.IntType(64),
|
| 77 |
+
(ir.PointerType(ir.IntType(64)), ir.IntType(64)))
|
| 78 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def declare_atomic_max_float32(lmod):
|
| 82 |
+
fname = '___numba_atomic_float_max'
|
| 83 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 84 |
+
(ir.PointerType(ir.FloatType()), ir.FloatType()))
|
| 85 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def declare_atomic_max_float64(lmod):
|
| 89 |
+
fname = '___numba_atomic_double_max'
|
| 90 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 91 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 92 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def declare_atomic_min_float32(lmod):
|
| 96 |
+
fname = '___numba_atomic_float_min'
|
| 97 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 98 |
+
(ir.PointerType(ir.FloatType()), ir.FloatType()))
|
| 99 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def declare_atomic_min_float64(lmod):
|
| 103 |
+
fname = '___numba_atomic_double_min'
|
| 104 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 105 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 106 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def declare_atomic_nanmax_float32(lmod):
|
| 110 |
+
fname = '___numba_atomic_float_nanmax'
|
| 111 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 112 |
+
(ir.PointerType(ir.FloatType()), ir.FloatType()))
|
| 113 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def declare_atomic_nanmax_float64(lmod):
|
| 117 |
+
fname = '___numba_atomic_double_nanmax'
|
| 118 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 119 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 120 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def declare_atomic_nanmin_float32(lmod):
|
| 124 |
+
fname = '___numba_atomic_float_nanmin'
|
| 125 |
+
fnty = ir.FunctionType(ir.FloatType(),
|
| 126 |
+
(ir.PointerType(ir.FloatType()), ir.FloatType()))
|
| 127 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def declare_atomic_nanmin_float64(lmod):
|
| 131 |
+
fname = '___numba_atomic_double_nanmin'
|
| 132 |
+
fnty = ir.FunctionType(ir.DoubleType(),
|
| 133 |
+
(ir.PointerType(ir.DoubleType()), ir.DoubleType()))
|
| 134 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def declare_cudaCGGetIntrinsicHandle(lmod):
|
| 138 |
+
fname = 'cudaCGGetIntrinsicHandle'
|
| 139 |
+
fnty = ir.FunctionType(ir.IntType(64),
|
| 140 |
+
(ir.IntType(32),))
|
| 141 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def declare_cudaCGSynchronize(lmod):
|
| 145 |
+
fname = 'cudaCGSynchronize'
|
| 146 |
+
fnty = ir.FunctionType(ir.IntType(32),
|
| 147 |
+
(ir.IntType(64), ir.IntType(32)))
|
| 148 |
+
return cgutils.get_or_insert_function(lmod, fnty, fname)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def declare_string(builder, value):
|
| 152 |
+
lmod = builder.basic_block.function.module
|
| 153 |
+
cval = cgutils.make_bytearray(value.encode("utf-8") + b"\x00")
|
| 154 |
+
gl = cgutils.add_global_variable(lmod, cval.type, name="_str",
|
| 155 |
+
addrspace=nvvm.ADDRSPACE_CONSTANT)
|
| 156 |
+
gl.linkage = 'internal'
|
| 157 |
+
gl.global_constant = True
|
| 158 |
+
gl.initializer = cval
|
| 159 |
+
|
| 160 |
+
return builder.addrspacecast(gl, ir.PointerType(ir.IntType(8)), 'generic')
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def declare_vprint(lmod):
|
| 164 |
+
voidptrty = ir.PointerType(ir.IntType(8))
|
| 165 |
+
# NOTE: the second argument to vprintf() points to the variable-length
|
| 166 |
+
# array of arguments (after the format)
|
| 167 |
+
vprintfty = ir.FunctionType(ir.IntType(32), [voidptrty, voidptrty])
|
| 168 |
+
vprintf = cgutils.get_or_insert_function(lmod, vprintfty, "vprintf")
|
| 169 |
+
return vprintf
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# -----------------------------------------------------------------------------
|
| 173 |
+
|
| 174 |
+
SREG_MAPPING = {
|
| 175 |
+
'tid.x': 'llvm.nvvm.read.ptx.sreg.tid.x',
|
| 176 |
+
'tid.y': 'llvm.nvvm.read.ptx.sreg.tid.y',
|
| 177 |
+
'tid.z': 'llvm.nvvm.read.ptx.sreg.tid.z',
|
| 178 |
+
|
| 179 |
+
'ntid.x': 'llvm.nvvm.read.ptx.sreg.ntid.x',
|
| 180 |
+
'ntid.y': 'llvm.nvvm.read.ptx.sreg.ntid.y',
|
| 181 |
+
'ntid.z': 'llvm.nvvm.read.ptx.sreg.ntid.z',
|
| 182 |
+
|
| 183 |
+
'ctaid.x': 'llvm.nvvm.read.ptx.sreg.ctaid.x',
|
| 184 |
+
'ctaid.y': 'llvm.nvvm.read.ptx.sreg.ctaid.y',
|
| 185 |
+
'ctaid.z': 'llvm.nvvm.read.ptx.sreg.ctaid.z',
|
| 186 |
+
|
| 187 |
+
'nctaid.x': 'llvm.nvvm.read.ptx.sreg.nctaid.x',
|
| 188 |
+
'nctaid.y': 'llvm.nvvm.read.ptx.sreg.nctaid.y',
|
| 189 |
+
'nctaid.z': 'llvm.nvvm.read.ptx.sreg.nctaid.z',
|
| 190 |
+
|
| 191 |
+
'warpsize': 'llvm.nvvm.read.ptx.sreg.warpsize',
|
| 192 |
+
'laneid': 'llvm.nvvm.read.ptx.sreg.laneid',
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def call_sreg(builder, name):
|
| 197 |
+
module = builder.module
|
| 198 |
+
fnty = ir.FunctionType(ir.IntType(32), ())
|
| 199 |
+
fn = cgutils.get_or_insert_function(module, fnty, SREG_MAPPING[name])
|
| 200 |
+
return builder.call(fn, ())
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class SRegBuilder(object):
|
| 204 |
+
def __init__(self, builder):
|
| 205 |
+
self.builder = builder
|
| 206 |
+
|
| 207 |
+
def tid(self, xyz):
|
| 208 |
+
return call_sreg(self.builder, 'tid.%s' % xyz)
|
| 209 |
+
|
| 210 |
+
def ctaid(self, xyz):
|
| 211 |
+
return call_sreg(self.builder, 'ctaid.%s' % xyz)
|
| 212 |
+
|
| 213 |
+
def ntid(self, xyz):
|
| 214 |
+
return call_sreg(self.builder, 'ntid.%s' % xyz)
|
| 215 |
+
|
| 216 |
+
def nctaid(self, xyz):
|
| 217 |
+
return call_sreg(self.builder, 'nctaid.%s' % xyz)
|
| 218 |
+
|
| 219 |
+
def getdim(self, xyz):
|
| 220 |
+
i64 = ir.IntType(64)
|
| 221 |
+
tid = self.builder.sext(self.tid(xyz), i64)
|
| 222 |
+
ntid = self.builder.sext(self.ntid(xyz), i64)
|
| 223 |
+
nctaid = self.builder.sext(self.ctaid(xyz), i64)
|
| 224 |
+
res = self.builder.add(self.builder.mul(ntid, nctaid), tid)
|
| 225 |
+
return res
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def get_global_id(builder, dim):
|
| 229 |
+
sreg = SRegBuilder(builder)
|
| 230 |
+
it = (sreg.getdim(xyz) for xyz in 'xyz')
|
| 231 |
+
seq = list(itertools.islice(it, None, dim))
|
| 232 |
+
if dim == 1:
|
| 233 |
+
return seq[0]
|
| 234 |
+
else:
|
| 235 |
+
return seq
|
lib/python3.10/site-packages/numba/cuda/printimpl.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import singledispatch
|
| 2 |
+
from llvmlite import ir
|
| 3 |
+
from numba.core import types, cgutils
|
| 4 |
+
from numba.core.errors import NumbaWarning
|
| 5 |
+
from numba.core.imputils import Registry
|
| 6 |
+
from numba.cuda import nvvmutils
|
| 7 |
+
from warnings import warn
|
| 8 |
+
|
| 9 |
+
registry = Registry()
|
| 10 |
+
lower = registry.lower
|
| 11 |
+
|
| 12 |
+
voidptr = ir.PointerType(ir.IntType(8))
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# NOTE: we don't use @lower here since print_item() doesn't return a LLVM value
|
| 16 |
+
|
| 17 |
+
@singledispatch
|
| 18 |
+
def print_item(ty, context, builder, val):
|
| 19 |
+
"""
|
| 20 |
+
Handle printing of a single value of the given Numba type.
|
| 21 |
+
A (format string, [list of arguments]) is returned that will allow
|
| 22 |
+
forming the final printf()-like call.
|
| 23 |
+
"""
|
| 24 |
+
raise NotImplementedError("printing unimplemented for values of type %s"
|
| 25 |
+
% (ty,))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@print_item.register(types.Integer)
|
| 29 |
+
@print_item.register(types.IntegerLiteral)
|
| 30 |
+
def int_print_impl(ty, context, builder, val):
|
| 31 |
+
if ty in types.unsigned_domain:
|
| 32 |
+
rawfmt = "%llu"
|
| 33 |
+
dsttype = types.uint64
|
| 34 |
+
else:
|
| 35 |
+
rawfmt = "%lld"
|
| 36 |
+
dsttype = types.int64
|
| 37 |
+
lld = context.cast(builder, val, ty, dsttype)
|
| 38 |
+
return rawfmt, [lld]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@print_item.register(types.Float)
|
| 42 |
+
def real_print_impl(ty, context, builder, val):
|
| 43 |
+
lld = context.cast(builder, val, ty, types.float64)
|
| 44 |
+
return "%f", [lld]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@print_item.register(types.StringLiteral)
|
| 48 |
+
def const_print_impl(ty, context, builder, sigval):
|
| 49 |
+
pyval = ty.literal_value
|
| 50 |
+
assert isinstance(pyval, str) # Ensured by lowering
|
| 51 |
+
rawfmt = "%s"
|
| 52 |
+
val = context.insert_string_const_addrspace(builder, pyval)
|
| 53 |
+
return rawfmt, [val]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@lower(print, types.VarArg(types.Any))
|
| 57 |
+
def print_varargs(context, builder, sig, args):
|
| 58 |
+
"""This function is a generic 'print' wrapper for arbitrary types.
|
| 59 |
+
It dispatches to the appropriate 'print' implementations above
|
| 60 |
+
depending on the detected real types in the signature."""
|
| 61 |
+
|
| 62 |
+
vprint = nvvmutils.declare_vprint(builder.module)
|
| 63 |
+
|
| 64 |
+
formats = []
|
| 65 |
+
values = []
|
| 66 |
+
|
| 67 |
+
for i, (argtype, argval) in enumerate(zip(sig.args, args)):
|
| 68 |
+
argfmt, argvals = print_item(argtype, context, builder, argval)
|
| 69 |
+
formats.append(argfmt)
|
| 70 |
+
values.extend(argvals)
|
| 71 |
+
|
| 72 |
+
rawfmt = " ".join(formats) + "\n"
|
| 73 |
+
if len(args) > 32:
|
| 74 |
+
msg = ('CUDA print() cannot print more than 32 items. '
|
| 75 |
+
'The raw format string will be emitted by the kernel instead.')
|
| 76 |
+
warn(msg, NumbaWarning)
|
| 77 |
+
|
| 78 |
+
rawfmt = rawfmt.replace('%', '%%')
|
| 79 |
+
fmt = context.insert_string_const_addrspace(builder, rawfmt)
|
| 80 |
+
array = cgutils.make_anonymous_struct(builder, values)
|
| 81 |
+
arrayptr = cgutils.alloca_once_value(builder, array)
|
| 82 |
+
|
| 83 |
+
vprint = nvvmutils.declare_vprint(builder.module)
|
| 84 |
+
builder.call(vprint, (fmt, builder.bitcast(arrayptr, voidptr)))
|
| 85 |
+
|
| 86 |
+
return context.get_dummy_value()
|
lib/python3.10/site-packages/numba/cuda/random.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from numba import (config, cuda, float32, float64, uint32, int64, uint64,
|
| 4 |
+
from_dtype, jit)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
# This implementation is based upon the xoroshiro128+ and splitmix64 algorithms
|
| 9 |
+
# described at:
|
| 10 |
+
#
|
| 11 |
+
# http://xoroshiro.di.unimi.it/
|
| 12 |
+
#
|
| 13 |
+
# and originally implemented by David Blackman and Sebastiano Vigna.
|
| 14 |
+
#
|
| 15 |
+
# The implementations below are based on the C source code:
|
| 16 |
+
#
|
| 17 |
+
# * http://xoroshiro.di.unimi.it/xoroshiro128plus.c
|
| 18 |
+
# * http://xoroshiro.di.unimi.it/splitmix64.c
|
| 19 |
+
#
|
| 20 |
+
# Splitmix64 is used to generate the initial state of the xoroshiro128+
|
| 21 |
+
# generator to ensure that small seeds don't result in predictable output.
|
| 22 |
+
|
| 23 |
+
# **WARNING**: There is a lot of verbose casting in this file to ensure that
|
| 24 |
+
# NumPy casting conventions (which cast uint64 [op] int32 to float64) don't
|
| 25 |
+
# turn integers into floats when using these functions in the CUDA simulator.
|
| 26 |
+
#
|
| 27 |
+
# There are also no function type signatures to ensure that compilation is
|
| 28 |
+
# deferred so that import is quick, and Sphinx autodoc works. We are also
|
| 29 |
+
# using the CPU @jit decorator everywhere to create functions that work as
|
| 30 |
+
# both CPU and CUDA device functions.
|
| 31 |
+
|
| 32 |
+
xoroshiro128p_dtype = np.dtype([('s0', np.uint64), ('s1', np.uint64)],
|
| 33 |
+
align=True)
|
| 34 |
+
xoroshiro128p_type = from_dtype(xoroshiro128p_dtype)
|
| 35 |
+
|
| 36 |
+
# When cudasim is enabled, Fake CUDA arrays are passed to some of the
|
| 37 |
+
# @jit-decorated functions. This required fallback to object mode. With
|
| 38 |
+
# Numba 0.59.0 object mode must be explicitly enabled.
|
| 39 |
+
# https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit
|
| 40 |
+
# In order to avoid the warning / future error, we explicitly specify that
|
| 41 |
+
# object mode with loop lifting is acceptable when using the simulator.
|
| 42 |
+
_forceobj = _looplift = config.ENABLE_CUDASIM
|
| 43 |
+
_nopython = not config.ENABLE_CUDASIM
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 47 |
+
def init_xoroshiro128p_state(states, index, seed):
|
| 48 |
+
'''Use SplitMix64 to generate an xoroshiro128p state from 64-bit seed.
|
| 49 |
+
|
| 50 |
+
This ensures that manually set small seeds don't result in a predictable
|
| 51 |
+
initial sequence from the random number generator.
|
| 52 |
+
|
| 53 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 54 |
+
:param states: array of RNG states
|
| 55 |
+
:type index: uint64
|
| 56 |
+
:param index: offset in states to update
|
| 57 |
+
:type seed: int64
|
| 58 |
+
:param seed: seed value to use when initializing state
|
| 59 |
+
'''
|
| 60 |
+
index = int64(index)
|
| 61 |
+
seed = uint64(seed)
|
| 62 |
+
|
| 63 |
+
z = seed + uint64(0x9E3779B97F4A7C15)
|
| 64 |
+
z = (z ^ (z >> uint32(30))) * uint64(0xBF58476D1CE4E5B9)
|
| 65 |
+
z = (z ^ (z >> uint32(27))) * uint64(0x94D049BB133111EB)
|
| 66 |
+
z = z ^ (z >> uint32(31))
|
| 67 |
+
|
| 68 |
+
states[index]['s0'] = z
|
| 69 |
+
states[index]['s1'] = z
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 73 |
+
def rotl(x, k):
|
| 74 |
+
'''Left rotate x by k bits.'''
|
| 75 |
+
x = uint64(x)
|
| 76 |
+
k = uint32(k)
|
| 77 |
+
return (x << k) | (x >> uint32(64 - k))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 81 |
+
def xoroshiro128p_next(states, index):
|
| 82 |
+
'''Return the next random uint64 and advance the RNG in states[index].
|
| 83 |
+
|
| 84 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 85 |
+
:param states: array of RNG states
|
| 86 |
+
:type index: int64
|
| 87 |
+
:param index: offset in states to update
|
| 88 |
+
:rtype: uint64
|
| 89 |
+
'''
|
| 90 |
+
index = int64(index)
|
| 91 |
+
s0 = states[index]['s0']
|
| 92 |
+
s1 = states[index]['s1']
|
| 93 |
+
result = s0 + s1
|
| 94 |
+
|
| 95 |
+
s1 ^= s0
|
| 96 |
+
states[index]['s0'] = uint64(rotl(s0, uint32(55))) ^ s1 ^ (s1 << uint32(14))
|
| 97 |
+
states[index]['s1'] = uint64(rotl(s1, uint32(36)))
|
| 98 |
+
|
| 99 |
+
return result
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 103 |
+
def xoroshiro128p_jump(states, index):
|
| 104 |
+
'''Advance the RNG in ``states[index]`` by 2**64 steps.
|
| 105 |
+
|
| 106 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 107 |
+
:param states: array of RNG states
|
| 108 |
+
:type index: int64
|
| 109 |
+
:param index: offset in states to update
|
| 110 |
+
'''
|
| 111 |
+
index = int64(index)
|
| 112 |
+
|
| 113 |
+
jump = (uint64(0xbeac0467eba5facb), uint64(0xd86b048b86aa9922))
|
| 114 |
+
|
| 115 |
+
s0 = uint64(0)
|
| 116 |
+
s1 = uint64(0)
|
| 117 |
+
|
| 118 |
+
for i in range(2):
|
| 119 |
+
for b in range(64):
|
| 120 |
+
if jump[i] & (uint64(1) << uint32(b)):
|
| 121 |
+
s0 ^= states[index]['s0']
|
| 122 |
+
s1 ^= states[index]['s1']
|
| 123 |
+
xoroshiro128p_next(states, index)
|
| 124 |
+
|
| 125 |
+
states[index]['s0'] = s0
|
| 126 |
+
states[index]['s1'] = s1
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 130 |
+
def uint64_to_unit_float64(x):
|
| 131 |
+
'''Convert uint64 to float64 value in the range [0.0, 1.0)'''
|
| 132 |
+
x = uint64(x)
|
| 133 |
+
return (x >> uint32(11)) * (float64(1) / (uint64(1) << uint32(53)))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 137 |
+
def uint64_to_unit_float32(x):
|
| 138 |
+
'''Convert uint64 to float32 value in the range [0.0, 1.0)'''
|
| 139 |
+
x = uint64(x)
|
| 140 |
+
return float32(uint64_to_unit_float64(x))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 144 |
+
def xoroshiro128p_uniform_float32(states, index):
|
| 145 |
+
'''Return a float32 in range [0.0, 1.0) and advance ``states[index]``.
|
| 146 |
+
|
| 147 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 148 |
+
:param states: array of RNG states
|
| 149 |
+
:type index: int64
|
| 150 |
+
:param index: offset in states to update
|
| 151 |
+
:rtype: float32
|
| 152 |
+
'''
|
| 153 |
+
index = int64(index)
|
| 154 |
+
return uint64_to_unit_float32(xoroshiro128p_next(states, index))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 158 |
+
def xoroshiro128p_uniform_float64(states, index):
|
| 159 |
+
'''Return a float64 in range [0.0, 1.0) and advance ``states[index]``.
|
| 160 |
+
|
| 161 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 162 |
+
:param states: array of RNG states
|
| 163 |
+
:type index: int64
|
| 164 |
+
:param index: offset in states to update
|
| 165 |
+
:rtype: float64
|
| 166 |
+
'''
|
| 167 |
+
index = int64(index)
|
| 168 |
+
return uint64_to_unit_float64(xoroshiro128p_next(states, index))
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
TWO_PI_FLOAT32 = np.float32(2 * math.pi)
|
| 172 |
+
TWO_PI_FLOAT64 = np.float64(2 * math.pi)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 176 |
+
def xoroshiro128p_normal_float32(states, index):
|
| 177 |
+
'''Return a normally distributed float32 and advance ``states[index]``.
|
| 178 |
+
|
| 179 |
+
The return value is drawn from a Gaussian of mean=0 and sigma=1 using the
|
| 180 |
+
Box-Muller transform. This advances the RNG sequence by two steps.
|
| 181 |
+
|
| 182 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 183 |
+
:param states: array of RNG states
|
| 184 |
+
:type index: int64
|
| 185 |
+
:param index: offset in states to update
|
| 186 |
+
:rtype: float32
|
| 187 |
+
'''
|
| 188 |
+
index = int64(index)
|
| 189 |
+
|
| 190 |
+
u1 = xoroshiro128p_uniform_float32(states, index)
|
| 191 |
+
u2 = xoroshiro128p_uniform_float32(states, index)
|
| 192 |
+
|
| 193 |
+
z0 = math.sqrt(-float32(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT32 * u2)
|
| 194 |
+
# discarding second normal value
|
| 195 |
+
# z1 = math.sqrt(-float32(2.0) * math.log(u1))
|
| 196 |
+
# * math.sin(TWO_PI_FLOAT32 * u2)
|
| 197 |
+
return z0
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 201 |
+
def xoroshiro128p_normal_float64(states, index):
|
| 202 |
+
'''Return a normally distributed float32 and advance ``states[index]``.
|
| 203 |
+
|
| 204 |
+
The return value is drawn from a Gaussian of mean=0 and sigma=1 using the
|
| 205 |
+
Box-Muller transform. This advances the RNG sequence by two steps.
|
| 206 |
+
|
| 207 |
+
:type states: 1D array, dtype=xoroshiro128p_dtype
|
| 208 |
+
:param states: array of RNG states
|
| 209 |
+
:type index: int64
|
| 210 |
+
:param index: offset in states to update
|
| 211 |
+
:rtype: float64
|
| 212 |
+
'''
|
| 213 |
+
index = int64(index)
|
| 214 |
+
|
| 215 |
+
u1 = xoroshiro128p_uniform_float32(states, index)
|
| 216 |
+
u2 = xoroshiro128p_uniform_float32(states, index)
|
| 217 |
+
|
| 218 |
+
z0 = math.sqrt(-float64(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT64 * u2)
|
| 219 |
+
# discarding second normal value
|
| 220 |
+
# z1 = math.sqrt(-float64(2.0) * math.log(u1))
|
| 221 |
+
# * math.sin(TWO_PI_FLOAT64 * u2)
|
| 222 |
+
return z0
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython)
|
| 226 |
+
def init_xoroshiro128p_states_cpu(states, seed, subsequence_start):
|
| 227 |
+
n = states.shape[0]
|
| 228 |
+
seed = uint64(seed)
|
| 229 |
+
subsequence_start = uint64(subsequence_start)
|
| 230 |
+
|
| 231 |
+
if n >= 1:
|
| 232 |
+
init_xoroshiro128p_state(states, 0, seed)
|
| 233 |
+
|
| 234 |
+
# advance to starting subsequence number
|
| 235 |
+
for _ in range(subsequence_start):
|
| 236 |
+
xoroshiro128p_jump(states, 0)
|
| 237 |
+
|
| 238 |
+
# populate the rest of the array
|
| 239 |
+
for i in range(1, n):
|
| 240 |
+
states[i] = states[i - 1] # take state of previous generator
|
| 241 |
+
xoroshiro128p_jump(states, i) # and jump forward 2**64 steps
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def init_xoroshiro128p_states(states, seed, subsequence_start=0, stream=0):
|
| 245 |
+
'''Initialize RNG states on the GPU for parallel generators.
|
| 246 |
+
|
| 247 |
+
This initializes the RNG states so that each state in the array corresponds
|
| 248 |
+
subsequences in the separated by 2**64 steps from each other in the main
|
| 249 |
+
sequence. Therefore, as long no CUDA thread requests more than 2**64
|
| 250 |
+
random numbers, all of the RNG states produced by this function are
|
| 251 |
+
guaranteed to be independent.
|
| 252 |
+
|
| 253 |
+
The subsequence_start parameter can be used to advance the first RNG state
|
| 254 |
+
by a multiple of 2**64 steps.
|
| 255 |
+
|
| 256 |
+
:type states: 1D DeviceNDArray, dtype=xoroshiro128p_dtype
|
| 257 |
+
:param states: array of RNG states
|
| 258 |
+
:type seed: uint64
|
| 259 |
+
:param seed: starting seed for list of generators
|
| 260 |
+
'''
|
| 261 |
+
|
| 262 |
+
# Initialization on CPU is much faster than the GPU
|
| 263 |
+
states_cpu = np.empty(shape=states.shape, dtype=xoroshiro128p_dtype)
|
| 264 |
+
init_xoroshiro128p_states_cpu(states_cpu, seed, subsequence_start)
|
| 265 |
+
|
| 266 |
+
states.copy_to_device(states_cpu, stream=stream)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def create_xoroshiro128p_states(n, seed, subsequence_start=0, stream=0):
|
| 270 |
+
'''Returns a new device array initialized for n random number generators.
|
| 271 |
+
|
| 272 |
+
This initializes the RNG states so that each state in the array corresponds
|
| 273 |
+
subsequences in the separated by 2**64 steps from each other in the main
|
| 274 |
+
sequence. Therefore, as long no CUDA thread requests more than 2**64
|
| 275 |
+
random numbers, all of the RNG states produced by this function are
|
| 276 |
+
guaranteed to be independent.
|
| 277 |
+
|
| 278 |
+
The subsequence_start parameter can be used to advance the first RNG state
|
| 279 |
+
by a multiple of 2**64 steps.
|
| 280 |
+
|
| 281 |
+
:type n: int
|
| 282 |
+
:param n: number of RNG states to create
|
| 283 |
+
:type seed: uint64
|
| 284 |
+
:param seed: starting seed for list of generators
|
| 285 |
+
:type subsequence_start: uint64
|
| 286 |
+
:param subsequence_start:
|
| 287 |
+
:type stream: CUDA stream
|
| 288 |
+
:param stream: stream to run initialization kernel on
|
| 289 |
+
'''
|
| 290 |
+
states = cuda.device_array(n, dtype=xoroshiro128p_dtype, stream=stream)
|
| 291 |
+
init_xoroshiro128p_states(states, seed, subsequence_start, stream)
|
| 292 |
+
return states
|
lib/python3.10/site-packages/numba/cuda/simulator_init.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# We import * from simulator here because * is imported from simulator_init by
|
| 2 |
+
# numba.cuda.__init__.
|
| 3 |
+
from .simulator import * # noqa: F403, F401
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def is_available():
|
| 7 |
+
"""Returns a boolean to indicate the availability of a CUDA GPU.
|
| 8 |
+
"""
|
| 9 |
+
# Simulator is always available
|
| 10 |
+
return True
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def cuda_error():
|
| 14 |
+
"""Returns None or an exception if the CUDA driver fails to initialize.
|
| 15 |
+
"""
|
| 16 |
+
# Simulator never fails to initialize
|
| 17 |
+
return None
|
lib/python3.10/site-packages/numba/cuda/stubs.py
ADDED
|
@@ -0,0 +1,902 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This scripts specifies all PTX special objects.
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
import functools
|
| 7 |
+
import itertools
|
| 8 |
+
from inspect import Signature, Parameter
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Stub(object):
|
| 12 |
+
'''
|
| 13 |
+
A stub object to represent special objects that are meaningless
|
| 14 |
+
outside the context of a CUDA kernel
|
| 15 |
+
'''
|
| 16 |
+
_description_ = '<ptx special value>'
|
| 17 |
+
__slots__ = () # don't allocate __dict__
|
| 18 |
+
|
| 19 |
+
def __new__(cls):
|
| 20 |
+
raise NotImplementedError("%s is not instantiable" % cls)
|
| 21 |
+
|
| 22 |
+
def __repr__(self):
|
| 23 |
+
return self._description_
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def stub_function(fn):
|
| 27 |
+
'''
|
| 28 |
+
A stub function to represent special functions that are meaningless
|
| 29 |
+
outside the context of a CUDA kernel
|
| 30 |
+
'''
|
| 31 |
+
@functools.wraps(fn)
|
| 32 |
+
def wrapped(*args, **kwargs):
|
| 33 |
+
raise NotImplementedError("%s cannot be called from host code" % fn)
|
| 34 |
+
return wrapped
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
#-------------------------------------------------------------------------------
|
| 38 |
+
# Thread and grid indices and dimensions
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Dim3(Stub):
|
| 42 |
+
'''A triple, (x, y, z)'''
|
| 43 |
+
_description_ = '<Dim3>'
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def x(self):
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def y(self):
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def z(self):
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class threadIdx(Dim3):
|
| 59 |
+
'''
|
| 60 |
+
The thread indices in the current thread block. Each index is an integer
|
| 61 |
+
spanning the range from 0 inclusive to the corresponding value of the
|
| 62 |
+
attribute in :attr:`numba.cuda.blockDim` exclusive.
|
| 63 |
+
'''
|
| 64 |
+
_description_ = '<threadIdx.{x,y,z}>'
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class blockIdx(Dim3):
|
| 68 |
+
'''
|
| 69 |
+
The block indices in the grid of thread blocks. Each index is an integer
|
| 70 |
+
spanning the range from 0 inclusive to the corresponding value of the
|
| 71 |
+
attribute in :attr:`numba.cuda.gridDim` exclusive.
|
| 72 |
+
'''
|
| 73 |
+
_description_ = '<blockIdx.{x,y,z}>'
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class blockDim(Dim3):
|
| 77 |
+
'''
|
| 78 |
+
The shape of a block of threads, as declared when instantiating the kernel.
|
| 79 |
+
This value is the same for all threads in a given kernel launch, even if
|
| 80 |
+
they belong to different blocks (i.e. each block is "full").
|
| 81 |
+
'''
|
| 82 |
+
_description_ = '<blockDim.{x,y,z}>'
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class gridDim(Dim3):
|
| 86 |
+
'''
|
| 87 |
+
The shape of the grid of blocks. This value is the same for all threads in
|
| 88 |
+
a given kernel launch.
|
| 89 |
+
'''
|
| 90 |
+
_description_ = '<gridDim.{x,y,z}>'
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class warpsize(Stub):
|
| 94 |
+
'''
|
| 95 |
+
The size of a warp. All architectures implemented to date have a warp size
|
| 96 |
+
of 32.
|
| 97 |
+
'''
|
| 98 |
+
_description_ = '<warpsize>'
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class laneid(Stub):
|
| 102 |
+
'''
|
| 103 |
+
This thread's lane within a warp. Ranges from 0 to
|
| 104 |
+
:attr:`numba.cuda.warpsize` - 1.
|
| 105 |
+
'''
|
| 106 |
+
_description_ = '<laneid>'
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
#-------------------------------------------------------------------------------
|
| 110 |
+
# Array creation
|
| 111 |
+
|
| 112 |
+
class shared(Stub):
|
| 113 |
+
'''
|
| 114 |
+
Shared memory namespace
|
| 115 |
+
'''
|
| 116 |
+
_description_ = '<shared>'
|
| 117 |
+
|
| 118 |
+
@stub_function
|
| 119 |
+
def array(shape, dtype):
|
| 120 |
+
'''
|
| 121 |
+
Allocate a shared array of the given *shape* and *type*. *shape* is
|
| 122 |
+
either an integer or a tuple of integers representing the array's
|
| 123 |
+
dimensions. *type* is a :ref:`Numba type <numba-types>` of the
|
| 124 |
+
elements needing to be stored in the array.
|
| 125 |
+
|
| 126 |
+
The returned array-like object can be read and written to like any
|
| 127 |
+
normal device array (e.g. through indexing).
|
| 128 |
+
'''
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class local(Stub):
|
| 132 |
+
'''
|
| 133 |
+
Local memory namespace
|
| 134 |
+
'''
|
| 135 |
+
_description_ = '<local>'
|
| 136 |
+
|
| 137 |
+
@stub_function
|
| 138 |
+
def array(shape, dtype):
|
| 139 |
+
'''
|
| 140 |
+
Allocate a local array of the given *shape* and *type*. The array is
|
| 141 |
+
private to the current thread, and resides in global memory. An
|
| 142 |
+
array-like object is returned which can be read and written to like any
|
| 143 |
+
standard array (e.g. through indexing).
|
| 144 |
+
'''
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class const(Stub):
|
| 148 |
+
'''
|
| 149 |
+
Constant memory namespace
|
| 150 |
+
'''
|
| 151 |
+
|
| 152 |
+
@stub_function
|
| 153 |
+
def array_like(ndarray):
|
| 154 |
+
'''
|
| 155 |
+
Create a const array from *ndarry*. The resulting const array will have
|
| 156 |
+
the same shape, type, and values as *ndarray*.
|
| 157 |
+
'''
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# -------------------------------------------------------------------------------
|
| 161 |
+
# warp level operations
|
| 162 |
+
|
| 163 |
+
class syncwarp(Stub):
|
| 164 |
+
'''
|
| 165 |
+
syncwarp(mask=0xFFFFFFFF)
|
| 166 |
+
|
| 167 |
+
Synchronizes a masked subset of threads in a warp.
|
| 168 |
+
'''
|
| 169 |
+
_description_ = '<warp_sync()>'
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class shfl_sync_intrinsic(Stub):
|
| 173 |
+
'''
|
| 174 |
+
shfl_sync_intrinsic(mask, mode, value, mode_offset, clamp)
|
| 175 |
+
|
| 176 |
+
Nvvm intrinsic for shuffling data across a warp
|
| 177 |
+
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-datamove
|
| 178 |
+
'''
|
| 179 |
+
_description_ = '<shfl_sync()>'
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class vote_sync_intrinsic(Stub):
|
| 183 |
+
'''
|
| 184 |
+
vote_sync_intrinsic(mask, mode, predictate)
|
| 185 |
+
|
| 186 |
+
Nvvm intrinsic for performing a reduce and broadcast across a warp
|
| 187 |
+
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-vote
|
| 188 |
+
'''
|
| 189 |
+
_description_ = '<vote_sync()>'
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class match_any_sync(Stub):
|
| 193 |
+
'''
|
| 194 |
+
match_any_sync(mask, value)
|
| 195 |
+
|
| 196 |
+
Nvvm intrinsic for performing a compare and broadcast across a warp.
|
| 197 |
+
Returns a mask of threads that have same value as the given value from
|
| 198 |
+
within the masked warp.
|
| 199 |
+
'''
|
| 200 |
+
_description_ = '<match_any_sync()>'
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class match_all_sync(Stub):
|
| 204 |
+
'''
|
| 205 |
+
match_all_sync(mask, value)
|
| 206 |
+
|
| 207 |
+
Nvvm intrinsic for performing a compare and broadcast across a warp.
|
| 208 |
+
Returns a tuple of (mask, pred), where mask is a mask of threads that have
|
| 209 |
+
same value as the given value from within the masked warp, if they
|
| 210 |
+
all have the same value, otherwise it is 0. Pred is a boolean of whether
|
| 211 |
+
or not all threads in the mask warp have the same warp.
|
| 212 |
+
'''
|
| 213 |
+
_description_ = '<match_all_sync()>'
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class activemask(Stub):
|
| 217 |
+
'''
|
| 218 |
+
activemask()
|
| 219 |
+
|
| 220 |
+
Returns a 32-bit integer mask of all currently active threads in the
|
| 221 |
+
calling warp. The Nth bit is set if the Nth lane in the warp is active when
|
| 222 |
+
activemask() is called. Inactive threads are represented by 0 bits in the
|
| 223 |
+
returned mask. Threads which have exited the kernel are always marked as
|
| 224 |
+
inactive.
|
| 225 |
+
'''
|
| 226 |
+
_description_ = '<activemask()>'
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class lanemask_lt(Stub):
|
| 230 |
+
'''
|
| 231 |
+
lanemask_lt()
|
| 232 |
+
|
| 233 |
+
Returns a 32-bit integer mask of all lanes (including inactive ones) with
|
| 234 |
+
ID less than the current lane.
|
| 235 |
+
'''
|
| 236 |
+
_description_ = '<lanemask_lt()>'
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# -------------------------------------------------------------------------------
|
| 240 |
+
# memory fences
|
| 241 |
+
|
| 242 |
+
class threadfence_block(Stub):
|
| 243 |
+
'''
|
| 244 |
+
A memory fence at thread block level
|
| 245 |
+
'''
|
| 246 |
+
_description_ = '<threadfence_block()>'
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class threadfence_system(Stub):
|
| 250 |
+
'''
|
| 251 |
+
A memory fence at system level: across devices
|
| 252 |
+
'''
|
| 253 |
+
_description_ = '<threadfence_system()>'
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class threadfence(Stub):
|
| 257 |
+
'''
|
| 258 |
+
A memory fence at device level
|
| 259 |
+
'''
|
| 260 |
+
_description_ = '<threadfence()>'
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
#-------------------------------------------------------------------------------
|
| 264 |
+
# bit manipulation
|
| 265 |
+
|
| 266 |
+
class popc(Stub):
|
| 267 |
+
"""
|
| 268 |
+
popc(x)
|
| 269 |
+
|
| 270 |
+
Returns the number of set bits in x.
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class brev(Stub):
|
| 275 |
+
"""
|
| 276 |
+
brev(x)
|
| 277 |
+
|
| 278 |
+
Returns the reverse of the bit pattern of x. For example, 0b10110110
|
| 279 |
+
becomes 0b01101101.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class clz(Stub):
|
| 284 |
+
"""
|
| 285 |
+
clz(x)
|
| 286 |
+
|
| 287 |
+
Returns the number of leading zeros in z.
|
| 288 |
+
"""
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class ffs(Stub):
|
| 292 |
+
"""
|
| 293 |
+
ffs(x)
|
| 294 |
+
|
| 295 |
+
Returns the position of the first (least significant) bit set to 1 in x,
|
| 296 |
+
where the least significant bit position is 1. ffs(0) returns 0.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
#-------------------------------------------------------------------------------
|
| 301 |
+
# comparison and selection instructions
|
| 302 |
+
|
| 303 |
+
class selp(Stub):
|
| 304 |
+
"""
|
| 305 |
+
selp(a, b, c)
|
| 306 |
+
|
| 307 |
+
Select between source operands, based on the value of the predicate source
|
| 308 |
+
operand.
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
#-------------------------------------------------------------------------------
|
| 313 |
+
# single / double precision arithmetic
|
| 314 |
+
|
| 315 |
+
class fma(Stub):
|
| 316 |
+
"""
|
| 317 |
+
fma(a, b, c)
|
| 318 |
+
|
| 319 |
+
Perform the fused multiply-add operation.
|
| 320 |
+
"""
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
class cbrt(Stub):
|
| 324 |
+
""""
|
| 325 |
+
cbrt(a)
|
| 326 |
+
|
| 327 |
+
Perform the cube root operation.
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
#-------------------------------------------------------------------------------
|
| 332 |
+
# atomic
|
| 333 |
+
|
| 334 |
+
class atomic(Stub):
|
| 335 |
+
"""Namespace for atomic operations
|
| 336 |
+
"""
|
| 337 |
+
_description_ = '<atomic>'
|
| 338 |
+
|
| 339 |
+
class add(Stub):
|
| 340 |
+
"""add(ary, idx, val)
|
| 341 |
+
|
| 342 |
+
Perform atomic ``ary[idx] += val``. Supported on int32, float32, and
|
| 343 |
+
float64 operands only.
|
| 344 |
+
|
| 345 |
+
Returns the old value at the index location as if it is loaded
|
| 346 |
+
atomically.
|
| 347 |
+
"""
|
| 348 |
+
|
| 349 |
+
class sub(Stub):
|
| 350 |
+
"""sub(ary, idx, val)
|
| 351 |
+
|
| 352 |
+
Perform atomic ``ary[idx] -= val``. Supported on int32, float32, and
|
| 353 |
+
float64 operands only.
|
| 354 |
+
|
| 355 |
+
Returns the old value at the index location as if it is loaded
|
| 356 |
+
atomically.
|
| 357 |
+
"""
|
| 358 |
+
|
| 359 |
+
class and_(Stub):
|
| 360 |
+
"""and_(ary, idx, val)
|
| 361 |
+
|
| 362 |
+
Perform atomic ``ary[idx] &= val``. Supported on int32, int64, uint32
|
| 363 |
+
and uint64 operands only.
|
| 364 |
+
|
| 365 |
+
Returns the old value at the index location as if it is loaded
|
| 366 |
+
atomically.
|
| 367 |
+
"""
|
| 368 |
+
|
| 369 |
+
class or_(Stub):
|
| 370 |
+
"""or_(ary, idx, val)
|
| 371 |
+
|
| 372 |
+
Perform atomic ``ary[idx] |= val``. Supported on int32, int64, uint32
|
| 373 |
+
and uint64 operands only.
|
| 374 |
+
|
| 375 |
+
Returns the old value at the index location as if it is loaded
|
| 376 |
+
atomically.
|
| 377 |
+
"""
|
| 378 |
+
|
| 379 |
+
class xor(Stub):
|
| 380 |
+
"""xor(ary, idx, val)
|
| 381 |
+
|
| 382 |
+
Perform atomic ``ary[idx] ^= val``. Supported on int32, int64, uint32
|
| 383 |
+
and uint64 operands only.
|
| 384 |
+
|
| 385 |
+
Returns the old value at the index location as if it is loaded
|
| 386 |
+
atomically.
|
| 387 |
+
"""
|
| 388 |
+
|
| 389 |
+
class inc(Stub):
|
| 390 |
+
"""inc(ary, idx, val)
|
| 391 |
+
|
| 392 |
+
Perform atomic ``ary[idx] += 1`` up to val, then reset to 0. Supported
|
| 393 |
+
on uint32, and uint64 operands only.
|
| 394 |
+
|
| 395 |
+
Returns the old value at the index location as if it is loaded
|
| 396 |
+
atomically.
|
| 397 |
+
"""
|
| 398 |
+
|
| 399 |
+
class dec(Stub):
|
| 400 |
+
"""dec(ary, idx, val)
|
| 401 |
+
|
| 402 |
+
Performs::
|
| 403 |
+
|
| 404 |
+
ary[idx] = (value if (array[idx] == 0) or
|
| 405 |
+
(array[idx] > value) else array[idx] - 1)
|
| 406 |
+
|
| 407 |
+
Supported on uint32, and uint64 operands only.
|
| 408 |
+
|
| 409 |
+
Returns the old value at the index location as if it is loaded
|
| 410 |
+
atomically.
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
class exch(Stub):
|
| 414 |
+
"""exch(ary, idx, val)
|
| 415 |
+
|
| 416 |
+
Perform atomic ``ary[idx] = val``. Supported on int32, int64, uint32 and
|
| 417 |
+
uint64 operands only.
|
| 418 |
+
|
| 419 |
+
Returns the old value at the index location as if it is loaded
|
| 420 |
+
atomically.
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
class max(Stub):
|
| 424 |
+
"""max(ary, idx, val)
|
| 425 |
+
|
| 426 |
+
Perform atomic ``ary[idx] = max(ary[idx], val)``.
|
| 427 |
+
|
| 428 |
+
Supported on int32, int64, uint32, uint64, float32, float64 operands
|
| 429 |
+
only.
|
| 430 |
+
|
| 431 |
+
Returns the old value at the index location as if it is loaded
|
| 432 |
+
atomically.
|
| 433 |
+
"""
|
| 434 |
+
|
| 435 |
+
class min(Stub):
|
| 436 |
+
"""min(ary, idx, val)
|
| 437 |
+
|
| 438 |
+
Perform atomic ``ary[idx] = min(ary[idx], val)``.
|
| 439 |
+
|
| 440 |
+
Supported on int32, int64, uint32, uint64, float32, float64 operands
|
| 441 |
+
only.
|
| 442 |
+
|
| 443 |
+
Returns the old value at the index location as if it is loaded
|
| 444 |
+
atomically.
|
| 445 |
+
"""
|
| 446 |
+
|
| 447 |
+
class nanmax(Stub):
|
| 448 |
+
"""nanmax(ary, idx, val)
|
| 449 |
+
|
| 450 |
+
Perform atomic ``ary[idx] = max(ary[idx], val)``.
|
| 451 |
+
|
| 452 |
+
NOTE: NaN is treated as a missing value such that:
|
| 453 |
+
nanmax(NaN, n) == n, nanmax(n, NaN) == n
|
| 454 |
+
|
| 455 |
+
Supported on int32, int64, uint32, uint64, float32, float64 operands
|
| 456 |
+
only.
|
| 457 |
+
|
| 458 |
+
Returns the old value at the index location as if it is loaded
|
| 459 |
+
atomically.
|
| 460 |
+
"""
|
| 461 |
+
|
| 462 |
+
class nanmin(Stub):
|
| 463 |
+
"""nanmin(ary, idx, val)
|
| 464 |
+
|
| 465 |
+
Perform atomic ``ary[idx] = min(ary[idx], val)``.
|
| 466 |
+
|
| 467 |
+
NOTE: NaN is treated as a missing value, such that:
|
| 468 |
+
nanmin(NaN, n) == n, nanmin(n, NaN) == n
|
| 469 |
+
|
| 470 |
+
Supported on int32, int64, uint32, uint64, float32, float64 operands
|
| 471 |
+
only.
|
| 472 |
+
|
| 473 |
+
Returns the old value at the index location as if it is loaded
|
| 474 |
+
atomically.
|
| 475 |
+
"""
|
| 476 |
+
|
| 477 |
+
class compare_and_swap(Stub):
|
| 478 |
+
"""compare_and_swap(ary, old, val)
|
| 479 |
+
|
| 480 |
+
Conditionally assign ``val`` to the first element of an 1D array ``ary``
|
| 481 |
+
if the current value matches ``old``.
|
| 482 |
+
|
| 483 |
+
Supported on int32, int64, uint32, uint64 operands only.
|
| 484 |
+
|
| 485 |
+
Returns the old value as if it is loaded atomically.
|
| 486 |
+
"""
|
| 487 |
+
|
| 488 |
+
class cas(Stub):
|
| 489 |
+
"""cas(ary, idx, old, val)
|
| 490 |
+
|
| 491 |
+
Conditionally assign ``val`` to the element ``idx`` of an array
|
| 492 |
+
``ary`` if the current value of ``ary[idx]`` matches ``old``.
|
| 493 |
+
|
| 494 |
+
Supported on int32, int64, uint32, uint64 operands only.
|
| 495 |
+
|
| 496 |
+
Returns the old value as if it is loaded atomically.
|
| 497 |
+
"""
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
#-------------------------------------------------------------------------------
|
| 501 |
+
# timers
|
| 502 |
+
|
| 503 |
+
class nanosleep(Stub):
|
| 504 |
+
'''
|
| 505 |
+
nanosleep(ns)
|
| 506 |
+
|
| 507 |
+
Suspends the thread for a sleep duration approximately close to the delay
|
| 508 |
+
`ns`, specified in nanoseconds.
|
| 509 |
+
'''
|
| 510 |
+
_description_ = '<nansleep()>'
|
| 511 |
+
|
| 512 |
+
#-------------------------------------------------------------------------------
|
| 513 |
+
# Floating point 16
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
class fp16(Stub):
|
| 517 |
+
"""Namespace for fp16 operations
|
| 518 |
+
"""
|
| 519 |
+
_description_ = '<fp16>'
|
| 520 |
+
|
| 521 |
+
class hadd(Stub):
|
| 522 |
+
"""hadd(a, b)
|
| 523 |
+
|
| 524 |
+
Perform fp16 addition, (a + b) in round to nearest mode. Supported
|
| 525 |
+
on fp16 operands only.
|
| 526 |
+
|
| 527 |
+
Returns the fp16 result of the addition.
|
| 528 |
+
|
| 529 |
+
"""
|
| 530 |
+
|
| 531 |
+
class hsub(Stub):
|
| 532 |
+
"""hsub(a, b)
|
| 533 |
+
|
| 534 |
+
Perform fp16 subtraction, (a - b) in round to nearest mode. Supported
|
| 535 |
+
on fp16 operands only.
|
| 536 |
+
|
| 537 |
+
Returns the fp16 result of the subtraction.
|
| 538 |
+
|
| 539 |
+
"""
|
| 540 |
+
|
| 541 |
+
class hmul(Stub):
|
| 542 |
+
"""hmul(a, b)
|
| 543 |
+
|
| 544 |
+
Perform fp16 multiplication, (a * b) in round to nearest mode. Supported
|
| 545 |
+
on fp16 operands only.
|
| 546 |
+
|
| 547 |
+
Returns the fp16 result of the multiplication.
|
| 548 |
+
|
| 549 |
+
"""
|
| 550 |
+
|
| 551 |
+
class hdiv(Stub):
|
| 552 |
+
"""hdiv(a, b)
|
| 553 |
+
|
| 554 |
+
Perform fp16 division, (a / b) in round to nearest mode. Supported
|
| 555 |
+
on fp16 operands only.
|
| 556 |
+
|
| 557 |
+
Returns the fp16 result of the division
|
| 558 |
+
|
| 559 |
+
"""
|
| 560 |
+
|
| 561 |
+
class hfma(Stub):
|
| 562 |
+
"""hfma(a, b, c)
|
| 563 |
+
|
| 564 |
+
Perform fp16 multiply and accumulate, (a * b) + c in round to nearest
|
| 565 |
+
mode. Supported on fp16 operands only.
|
| 566 |
+
|
| 567 |
+
Returns the fp16 result of the multiplication.
|
| 568 |
+
|
| 569 |
+
"""
|
| 570 |
+
|
| 571 |
+
class hneg(Stub):
|
| 572 |
+
"""hneg(a)
|
| 573 |
+
|
| 574 |
+
Perform fp16 negation, -(a). Supported on fp16 operands only.
|
| 575 |
+
|
| 576 |
+
Returns the fp16 result of the negation.
|
| 577 |
+
|
| 578 |
+
"""
|
| 579 |
+
|
| 580 |
+
class habs(Stub):
|
| 581 |
+
"""habs(a)
|
| 582 |
+
|
| 583 |
+
Perform fp16 absolute value, |a|. Supported on fp16 operands only.
|
| 584 |
+
|
| 585 |
+
Returns the fp16 result of the absolute value.
|
| 586 |
+
|
| 587 |
+
"""
|
| 588 |
+
|
| 589 |
+
class hsin(Stub):
|
| 590 |
+
"""hsin(a)
|
| 591 |
+
|
| 592 |
+
Calculate sine in round to nearest even mode. Supported on fp16
|
| 593 |
+
operands only.
|
| 594 |
+
|
| 595 |
+
Returns the sine result.
|
| 596 |
+
|
| 597 |
+
"""
|
| 598 |
+
|
| 599 |
+
class hcos(Stub):
|
| 600 |
+
"""hsin(a)
|
| 601 |
+
|
| 602 |
+
Calculate cosine in round to nearest even mode. Supported on fp16
|
| 603 |
+
operands only.
|
| 604 |
+
|
| 605 |
+
Returns the cosine result.
|
| 606 |
+
|
| 607 |
+
"""
|
| 608 |
+
|
| 609 |
+
class hlog(Stub):
|
| 610 |
+
"""hlog(a)
|
| 611 |
+
|
| 612 |
+
Calculate natural logarithm in round to nearest even mode. Supported
|
| 613 |
+
on fp16 operands only.
|
| 614 |
+
|
| 615 |
+
Returns the natural logarithm result.
|
| 616 |
+
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
class hlog10(Stub):
|
| 620 |
+
"""hlog10(a)
|
| 621 |
+
|
| 622 |
+
Calculate logarithm base 10 in round to nearest even mode. Supported
|
| 623 |
+
on fp16 operands only.
|
| 624 |
+
|
| 625 |
+
Returns the logarithm base 10 result.
|
| 626 |
+
|
| 627 |
+
"""
|
| 628 |
+
|
| 629 |
+
class hlog2(Stub):
|
| 630 |
+
"""hlog2(a)
|
| 631 |
+
|
| 632 |
+
Calculate logarithm base 2 in round to nearest even mode. Supported
|
| 633 |
+
on fp16 operands only.
|
| 634 |
+
|
| 635 |
+
Returns the logarithm base 2 result.
|
| 636 |
+
|
| 637 |
+
"""
|
| 638 |
+
|
| 639 |
+
class hexp(Stub):
|
| 640 |
+
"""hexp(a)
|
| 641 |
+
|
| 642 |
+
Calculate natural exponential, exp(a), in round to nearest mode.
|
| 643 |
+
Supported on fp16 operands only.
|
| 644 |
+
|
| 645 |
+
Returns the natural exponential result.
|
| 646 |
+
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
class hexp10(Stub):
|
| 650 |
+
"""hexp10(a)
|
| 651 |
+
|
| 652 |
+
Calculate exponential base 10 (10 ** a) in round to nearest mode.
|
| 653 |
+
Supported on fp16 operands only.
|
| 654 |
+
|
| 655 |
+
Returns the exponential base 10 result.
|
| 656 |
+
|
| 657 |
+
"""
|
| 658 |
+
|
| 659 |
+
class hexp2(Stub):
|
| 660 |
+
"""hexp2(a)
|
| 661 |
+
|
| 662 |
+
Calculate exponential base 2 (2 ** a) in round to nearest mode.
|
| 663 |
+
Supported on fp16 operands only.
|
| 664 |
+
|
| 665 |
+
Returns the exponential base 2 result.
|
| 666 |
+
|
| 667 |
+
"""
|
| 668 |
+
|
| 669 |
+
class hfloor(Stub):
|
| 670 |
+
"""hfloor(a)
|
| 671 |
+
|
| 672 |
+
Calculate the floor, the largest integer less than or equal to 'a'.
|
| 673 |
+
Supported on fp16 operands only.
|
| 674 |
+
|
| 675 |
+
Returns the floor result.
|
| 676 |
+
|
| 677 |
+
"""
|
| 678 |
+
|
| 679 |
+
class hceil(Stub):
|
| 680 |
+
"""hceil(a)
|
| 681 |
+
|
| 682 |
+
Calculate the ceil, the smallest integer greater than or equal to 'a'.
|
| 683 |
+
Supported on fp16 operands only.
|
| 684 |
+
|
| 685 |
+
Returns the ceil result.
|
| 686 |
+
|
| 687 |
+
"""
|
| 688 |
+
|
| 689 |
+
class hsqrt(Stub):
|
| 690 |
+
"""hsqrt(a)
|
| 691 |
+
|
| 692 |
+
Calculate the square root of the input argument in round to nearest
|
| 693 |
+
mode. Supported on fp16 operands only.
|
| 694 |
+
|
| 695 |
+
Returns the square root result.
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
|
| 699 |
+
class hrsqrt(Stub):
|
| 700 |
+
"""hrsqrt(a)
|
| 701 |
+
|
| 702 |
+
Calculate the reciprocal square root of the input argument in round
|
| 703 |
+
to nearest even mode. Supported on fp16 operands only.
|
| 704 |
+
|
| 705 |
+
Returns the reciprocal square root result.
|
| 706 |
+
|
| 707 |
+
"""
|
| 708 |
+
|
| 709 |
+
class hrcp(Stub):
|
| 710 |
+
"""hrcp(a)
|
| 711 |
+
|
| 712 |
+
Calculate the reciprocal of the input argument in round to nearest
|
| 713 |
+
even mode. Supported on fp16 operands only.
|
| 714 |
+
|
| 715 |
+
Returns the reciprocal result.
|
| 716 |
+
|
| 717 |
+
"""
|
| 718 |
+
|
| 719 |
+
class hrint(Stub):
|
| 720 |
+
"""hrint(a)
|
| 721 |
+
|
| 722 |
+
Round the input argument to nearest integer value. Supported on fp16
|
| 723 |
+
operands only.
|
| 724 |
+
|
| 725 |
+
Returns the rounded result.
|
| 726 |
+
|
| 727 |
+
"""
|
| 728 |
+
|
| 729 |
+
class htrunc(Stub):
|
| 730 |
+
"""htrunc(a)
|
| 731 |
+
|
| 732 |
+
Truncate the input argument to its integer portion. Supported
|
| 733 |
+
on fp16 operands only.
|
| 734 |
+
|
| 735 |
+
Returns the truncated result.
|
| 736 |
+
|
| 737 |
+
"""
|
| 738 |
+
|
| 739 |
+
class heq(Stub):
|
| 740 |
+
"""heq(a, b)
|
| 741 |
+
|
| 742 |
+
Perform fp16 comparison, (a == b). Supported
|
| 743 |
+
on fp16 operands only.
|
| 744 |
+
|
| 745 |
+
Returns True if a and b are equal and False otherwise.
|
| 746 |
+
|
| 747 |
+
"""
|
| 748 |
+
|
| 749 |
+
class hne(Stub):
|
| 750 |
+
"""hne(a, b)
|
| 751 |
+
|
| 752 |
+
Perform fp16 comparison, (a != b). Supported
|
| 753 |
+
on fp16 operands only.
|
| 754 |
+
|
| 755 |
+
Returns True if a and b are not equal and False otherwise.
|
| 756 |
+
|
| 757 |
+
"""
|
| 758 |
+
|
| 759 |
+
class hge(Stub):
|
| 760 |
+
"""hge(a, b)
|
| 761 |
+
|
| 762 |
+
Perform fp16 comparison, (a >= b). Supported
|
| 763 |
+
on fp16 operands only.
|
| 764 |
+
|
| 765 |
+
Returns True if a is >= b and False otherwise.
|
| 766 |
+
|
| 767 |
+
"""
|
| 768 |
+
|
| 769 |
+
class hgt(Stub):
|
| 770 |
+
"""hgt(a, b)
|
| 771 |
+
|
| 772 |
+
Perform fp16 comparison, (a > b). Supported
|
| 773 |
+
on fp16 operands only.
|
| 774 |
+
|
| 775 |
+
Returns True if a is > b and False otherwise.
|
| 776 |
+
|
| 777 |
+
"""
|
| 778 |
+
|
| 779 |
+
class hle(Stub):
|
| 780 |
+
"""hle(a, b)
|
| 781 |
+
|
| 782 |
+
Perform fp16 comparison, (a <= b). Supported
|
| 783 |
+
on fp16 operands only.
|
| 784 |
+
|
| 785 |
+
Returns True if a is <= b and False otherwise.
|
| 786 |
+
|
| 787 |
+
"""
|
| 788 |
+
|
| 789 |
+
class hlt(Stub):
|
| 790 |
+
"""hlt(a, b)
|
| 791 |
+
|
| 792 |
+
Perform fp16 comparison, (a < b). Supported
|
| 793 |
+
on fp16 operands only.
|
| 794 |
+
|
| 795 |
+
Returns True if a is < b and False otherwise.
|
| 796 |
+
|
| 797 |
+
"""
|
| 798 |
+
|
| 799 |
+
class hmax(Stub):
|
| 800 |
+
"""hmax(a, b)
|
| 801 |
+
|
| 802 |
+
Perform fp16 maximum operation, max(a,b) Supported
|
| 803 |
+
on fp16 operands only.
|
| 804 |
+
|
| 805 |
+
Returns a if a is greater than b, returns b otherwise.
|
| 806 |
+
|
| 807 |
+
"""
|
| 808 |
+
|
| 809 |
+
class hmin(Stub):
|
| 810 |
+
"""hmin(a, b)
|
| 811 |
+
|
| 812 |
+
Perform fp16 minimum operation, min(a,b). Supported
|
| 813 |
+
on fp16 operands only.
|
| 814 |
+
|
| 815 |
+
Returns a if a is less than b, returns b otherwise.
|
| 816 |
+
|
| 817 |
+
"""
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
#-------------------------------------------------------------------------------
|
| 821 |
+
# vector types
|
| 822 |
+
|
| 823 |
+
def make_vector_type_stubs():
|
| 824 |
+
"""Make user facing objects for vector types"""
|
| 825 |
+
vector_type_stubs = []
|
| 826 |
+
vector_type_prefix = (
|
| 827 |
+
"int8",
|
| 828 |
+
"int16",
|
| 829 |
+
"int32",
|
| 830 |
+
"int64",
|
| 831 |
+
"uint8",
|
| 832 |
+
"uint16",
|
| 833 |
+
"uint32",
|
| 834 |
+
"uint64",
|
| 835 |
+
"float32",
|
| 836 |
+
"float64"
|
| 837 |
+
)
|
| 838 |
+
vector_type_element_counts = (1, 2, 3, 4)
|
| 839 |
+
vector_type_attribute_names = ("x", "y", "z", "w")
|
| 840 |
+
|
| 841 |
+
for prefix, nelem in itertools.product(
|
| 842 |
+
vector_type_prefix, vector_type_element_counts
|
| 843 |
+
):
|
| 844 |
+
type_name = f"{prefix}x{nelem}"
|
| 845 |
+
attr_names = vector_type_attribute_names[:nelem]
|
| 846 |
+
|
| 847 |
+
vector_type_stub = type(
|
| 848 |
+
type_name, (Stub,),
|
| 849 |
+
{
|
| 850 |
+
**{attr: lambda self: None for attr in attr_names},
|
| 851 |
+
**{
|
| 852 |
+
"_description_": f"<{type_name}>",
|
| 853 |
+
"__signature__": Signature(parameters=[
|
| 854 |
+
Parameter(
|
| 855 |
+
name=attr_name, kind=Parameter.POSITIONAL_ONLY
|
| 856 |
+
) for attr_name in attr_names[:nelem]
|
| 857 |
+
]),
|
| 858 |
+
"__doc__": f"A stub for {type_name} to be used in "
|
| 859 |
+
"CUDA kernels."
|
| 860 |
+
},
|
| 861 |
+
**{"aliases": []}
|
| 862 |
+
}
|
| 863 |
+
)
|
| 864 |
+
vector_type_stubs.append(vector_type_stub)
|
| 865 |
+
return vector_type_stubs
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
def map_vector_type_stubs_to_alias(vector_type_stubs):
|
| 869 |
+
"""For each of the stubs, create its aliases.
|
| 870 |
+
|
| 871 |
+
For example: float64x3 -> double3
|
| 872 |
+
"""
|
| 873 |
+
# C-compatible type mapping, see:
|
| 874 |
+
# https://numpy.org/devdocs/reference/arrays.scalars.html#integer-types
|
| 875 |
+
base_type_to_alias = {
|
| 876 |
+
"char": f"int{np.dtype(np.byte).itemsize * 8}",
|
| 877 |
+
"short": f"int{np.dtype(np.short).itemsize * 8}",
|
| 878 |
+
"int": f"int{np.dtype(np.intc).itemsize * 8}",
|
| 879 |
+
"long": f"int{np.dtype(np.int_).itemsize * 8}",
|
| 880 |
+
"longlong": f"int{np.dtype(np.longlong).itemsize * 8}",
|
| 881 |
+
"uchar": f"uint{np.dtype(np.ubyte).itemsize * 8}",
|
| 882 |
+
"ushort": f"uint{np.dtype(np.ushort).itemsize * 8}",
|
| 883 |
+
"uint": f"uint{np.dtype(np.uintc).itemsize * 8}",
|
| 884 |
+
"ulong": f"uint{np.dtype(np.uint).itemsize * 8}",
|
| 885 |
+
"ulonglong": f"uint{np.dtype(np.ulonglong).itemsize * 8}",
|
| 886 |
+
"float": f"float{np.dtype(np.single).itemsize * 8}",
|
| 887 |
+
"double": f"float{np.dtype(np.double).itemsize * 8}"
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
base_type_to_vector_type = defaultdict(list)
|
| 891 |
+
for stub in vector_type_stubs:
|
| 892 |
+
base_type_to_vector_type[stub.__name__[:-2]].append(stub)
|
| 893 |
+
|
| 894 |
+
for alias, base_type in base_type_to_alias.items():
|
| 895 |
+
vector_type_stubs = base_type_to_vector_type[base_type]
|
| 896 |
+
for stub in vector_type_stubs:
|
| 897 |
+
nelem = stub.__name__[-1]
|
| 898 |
+
stub.aliases.append(f"{alias}{nelem}")
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
_vector_type_stubs = make_vector_type_stubs()
|
| 902 |
+
map_vector_type_stubs_to_alias(_vector_type_stubs)
|
lib/python3.10/site-packages/numba/cuda/target.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from functools import cached_property
|
| 3 |
+
import llvmlite.binding as ll
|
| 4 |
+
from llvmlite import ir
|
| 5 |
+
|
| 6 |
+
from numba.core import (cgutils, config, debuginfo, itanium_mangler, types,
|
| 7 |
+
typing, utils)
|
| 8 |
+
from numba.core.dispatcher import Dispatcher
|
| 9 |
+
from numba.core.base import BaseContext
|
| 10 |
+
from numba.core.callconv import BaseCallConv, MinimalCallConv
|
| 11 |
+
from numba.core.typing import cmathdecl
|
| 12 |
+
from numba.core import datamodel
|
| 13 |
+
|
| 14 |
+
from .cudadrv import nvvm
|
| 15 |
+
from numba.cuda import codegen, nvvmutils, ufuncs
|
| 16 |
+
from numba.cuda.models import cuda_data_manager
|
| 17 |
+
|
| 18 |
+
# -----------------------------------------------------------------------------
|
| 19 |
+
# Typing
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class CUDATypingContext(typing.BaseContext):
|
| 23 |
+
def load_additional_registries(self):
|
| 24 |
+
from . import cudadecl, cudamath, libdevicedecl, vector_types
|
| 25 |
+
from numba.core.typing import enumdecl, cffi_utils
|
| 26 |
+
|
| 27 |
+
self.install_registry(cudadecl.registry)
|
| 28 |
+
self.install_registry(cffi_utils.registry)
|
| 29 |
+
self.install_registry(cudamath.registry)
|
| 30 |
+
self.install_registry(cmathdecl.registry)
|
| 31 |
+
self.install_registry(libdevicedecl.registry)
|
| 32 |
+
self.install_registry(enumdecl.registry)
|
| 33 |
+
self.install_registry(vector_types.typing_registry)
|
| 34 |
+
|
| 35 |
+
def resolve_value_type(self, val):
|
| 36 |
+
# treat other dispatcher object as another device function
|
| 37 |
+
from numba.cuda.dispatcher import CUDADispatcher
|
| 38 |
+
if (isinstance(val, Dispatcher) and not
|
| 39 |
+
isinstance(val, CUDADispatcher)):
|
| 40 |
+
try:
|
| 41 |
+
# use cached device function
|
| 42 |
+
val = val.__dispatcher
|
| 43 |
+
except AttributeError:
|
| 44 |
+
if not val._can_compile:
|
| 45 |
+
raise ValueError('using cpu function on device '
|
| 46 |
+
'but its compilation is disabled')
|
| 47 |
+
targetoptions = val.targetoptions.copy()
|
| 48 |
+
targetoptions['device'] = True
|
| 49 |
+
targetoptions['debug'] = targetoptions.get('debug', False)
|
| 50 |
+
targetoptions['opt'] = targetoptions.get('opt', True)
|
| 51 |
+
disp = CUDADispatcher(val.py_func, targetoptions)
|
| 52 |
+
# cache the device function for future use and to avoid
|
| 53 |
+
# duplicated copy of the same function.
|
| 54 |
+
val.__dispatcher = disp
|
| 55 |
+
val = disp
|
| 56 |
+
|
| 57 |
+
# continue with parent logic
|
| 58 |
+
return super(CUDATypingContext, self).resolve_value_type(val)
|
| 59 |
+
|
| 60 |
+
# -----------------------------------------------------------------------------
|
| 61 |
+
# Implementation
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
VALID_CHARS = re.compile(r'[^a-z0-9]', re.I)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class CUDATargetContext(BaseContext):
|
| 68 |
+
implement_powi_as_math_call = True
|
| 69 |
+
strict_alignment = True
|
| 70 |
+
|
| 71 |
+
def __init__(self, typingctx, target='cuda'):
|
| 72 |
+
super().__init__(typingctx, target)
|
| 73 |
+
self.data_model_manager = cuda_data_manager.chain(
|
| 74 |
+
datamodel.default_manager
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def DIBuilder(self):
|
| 79 |
+
return debuginfo.DIBuilder
|
| 80 |
+
|
| 81 |
+
@property
|
| 82 |
+
def enable_boundscheck(self):
|
| 83 |
+
# Unconditionally disabled
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
# Overrides
|
| 87 |
+
def create_module(self, name):
|
| 88 |
+
return self._internal_codegen._create_empty_module(name)
|
| 89 |
+
|
| 90 |
+
def init(self):
|
| 91 |
+
self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit")
|
| 92 |
+
self._target_data = None
|
| 93 |
+
|
| 94 |
+
def load_additional_registries(self):
|
| 95 |
+
# side effect of import needed for numba.cpython.*, the builtins
|
| 96 |
+
# registry is updated at import time.
|
| 97 |
+
from numba.cpython import numbers, tupleobj, slicing # noqa: F401
|
| 98 |
+
from numba.cpython import rangeobj, iterators, enumimpl # noqa: F401
|
| 99 |
+
from numba.cpython import unicode, charseq # noqa: F401
|
| 100 |
+
from numba.cpython import cmathimpl
|
| 101 |
+
from numba.misc import cffiimpl
|
| 102 |
+
from numba.np import arrayobj # noqa: F401
|
| 103 |
+
from numba.np import npdatetime # noqa: F401
|
| 104 |
+
from . import (
|
| 105 |
+
cudaimpl, printimpl, libdeviceimpl, mathimpl, vector_types
|
| 106 |
+
)
|
| 107 |
+
# fix for #8940
|
| 108 |
+
from numba.np.unsafe import ndarray # noqa F401
|
| 109 |
+
|
| 110 |
+
self.install_registry(cudaimpl.registry)
|
| 111 |
+
self.install_registry(cffiimpl.registry)
|
| 112 |
+
self.install_registry(printimpl.registry)
|
| 113 |
+
self.install_registry(libdeviceimpl.registry)
|
| 114 |
+
self.install_registry(cmathimpl.registry)
|
| 115 |
+
self.install_registry(mathimpl.registry)
|
| 116 |
+
self.install_registry(vector_types.impl_registry)
|
| 117 |
+
|
| 118 |
+
def codegen(self):
|
| 119 |
+
return self._internal_codegen
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def target_data(self):
|
| 123 |
+
if self._target_data is None:
|
| 124 |
+
self._target_data = ll.create_target_data(nvvm.NVVM().data_layout)
|
| 125 |
+
return self._target_data
|
| 126 |
+
|
| 127 |
+
@cached_property
|
| 128 |
+
def nonconst_module_attrs(self):
|
| 129 |
+
"""
|
| 130 |
+
Some CUDA intrinsics are at the module level, but cannot be treated as
|
| 131 |
+
constants, because they are loaded from a special register in the PTX.
|
| 132 |
+
These include threadIdx, blockDim, etc.
|
| 133 |
+
"""
|
| 134 |
+
from numba import cuda
|
| 135 |
+
nonconsts = ('threadIdx', 'blockDim', 'blockIdx', 'gridDim', 'laneid',
|
| 136 |
+
'warpsize')
|
| 137 |
+
nonconsts_with_mod = tuple([(types.Module(cuda), nc)
|
| 138 |
+
for nc in nonconsts])
|
| 139 |
+
return nonconsts_with_mod
|
| 140 |
+
|
| 141 |
+
@cached_property
|
| 142 |
+
def call_conv(self):
|
| 143 |
+
return CUDACallConv(self)
|
| 144 |
+
|
| 145 |
+
def mangler(self, name, argtypes, *, abi_tags=(), uid=None):
|
| 146 |
+
return itanium_mangler.mangle(name, argtypes, abi_tags=abi_tags,
|
| 147 |
+
uid=uid)
|
| 148 |
+
|
| 149 |
+
def prepare_cuda_kernel(self, codelib, fndesc, debug, lineinfo,
|
| 150 |
+
nvvm_options, filename, linenum,
|
| 151 |
+
max_registers=None):
|
| 152 |
+
"""
|
| 153 |
+
Adapt a code library ``codelib`` with the numba compiled CUDA kernel
|
| 154 |
+
with name ``fname`` and arguments ``argtypes`` for NVVM.
|
| 155 |
+
A new library is created with a wrapper function that can be used as
|
| 156 |
+
the kernel entry point for the given kernel.
|
| 157 |
+
|
| 158 |
+
Returns the new code library and the wrapper function.
|
| 159 |
+
|
| 160 |
+
Parameters:
|
| 161 |
+
|
| 162 |
+
codelib: The CodeLibrary containing the device function to wrap
|
| 163 |
+
in a kernel call.
|
| 164 |
+
fndesc: The FunctionDescriptor of the source function.
|
| 165 |
+
debug: Whether to compile with debug.
|
| 166 |
+
lineinfo: Whether to emit line info.
|
| 167 |
+
nvvm_options: Dict of NVVM options used when compiling the new library.
|
| 168 |
+
filename: The source filename that the function is contained in.
|
| 169 |
+
linenum: The source line that the function is on.
|
| 170 |
+
max_registers: The max_registers argument for the code library.
|
| 171 |
+
"""
|
| 172 |
+
kernel_name = itanium_mangler.prepend_namespace(
|
| 173 |
+
fndesc.llvm_func_name, ns='cudapy',
|
| 174 |
+
)
|
| 175 |
+
library = self.codegen().create_library(f'{codelib.name}_kernel_',
|
| 176 |
+
entry_name=kernel_name,
|
| 177 |
+
nvvm_options=nvvm_options,
|
| 178 |
+
max_registers=max_registers)
|
| 179 |
+
library.add_linking_library(codelib)
|
| 180 |
+
wrapper = self.generate_kernel_wrapper(library, fndesc, kernel_name,
|
| 181 |
+
debug, lineinfo, filename,
|
| 182 |
+
linenum)
|
| 183 |
+
return library, wrapper
|
| 184 |
+
|
| 185 |
+
def generate_kernel_wrapper(self, library, fndesc, kernel_name, debug,
|
| 186 |
+
lineinfo, filename, linenum):
|
| 187 |
+
"""
|
| 188 |
+
Generate the kernel wrapper in the given ``library``.
|
| 189 |
+
The function being wrapped is described by ``fndesc``.
|
| 190 |
+
The wrapper function is returned.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
argtypes = fndesc.argtypes
|
| 194 |
+
arginfo = self.get_arg_packer(argtypes)
|
| 195 |
+
argtys = list(arginfo.argument_types)
|
| 196 |
+
wrapfnty = ir.FunctionType(ir.VoidType(), argtys)
|
| 197 |
+
wrapper_module = self.create_module("cuda.kernel.wrapper")
|
| 198 |
+
fnty = ir.FunctionType(ir.IntType(32),
|
| 199 |
+
[self.call_conv.get_return_type(types.pyobject)]
|
| 200 |
+
+ argtys)
|
| 201 |
+
func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
|
| 202 |
+
|
| 203 |
+
prefixed = itanium_mangler.prepend_namespace(func.name, ns='cudapy')
|
| 204 |
+
wrapfn = ir.Function(wrapper_module, wrapfnty, prefixed)
|
| 205 |
+
builder = ir.IRBuilder(wrapfn.append_basic_block(''))
|
| 206 |
+
|
| 207 |
+
if debug or lineinfo:
|
| 208 |
+
directives_only = lineinfo and not debug
|
| 209 |
+
debuginfo = self.DIBuilder(module=wrapper_module,
|
| 210 |
+
filepath=filename,
|
| 211 |
+
cgctx=self,
|
| 212 |
+
directives_only=directives_only)
|
| 213 |
+
debuginfo.mark_subprogram(
|
| 214 |
+
wrapfn, kernel_name, fndesc.args, argtypes, linenum,
|
| 215 |
+
)
|
| 216 |
+
debuginfo.mark_location(builder, linenum)
|
| 217 |
+
|
| 218 |
+
# Define error handling variable
|
| 219 |
+
def define_error_gv(postfix):
|
| 220 |
+
name = wrapfn.name + postfix
|
| 221 |
+
gv = cgutils.add_global_variable(wrapper_module, ir.IntType(32),
|
| 222 |
+
name)
|
| 223 |
+
gv.initializer = ir.Constant(gv.type.pointee, None)
|
| 224 |
+
return gv
|
| 225 |
+
|
| 226 |
+
gv_exc = define_error_gv("__errcode__")
|
| 227 |
+
gv_tid = []
|
| 228 |
+
gv_ctaid = []
|
| 229 |
+
for i in 'xyz':
|
| 230 |
+
gv_tid.append(define_error_gv("__tid%s__" % i))
|
| 231 |
+
gv_ctaid.append(define_error_gv("__ctaid%s__" % i))
|
| 232 |
+
|
| 233 |
+
callargs = arginfo.from_arguments(builder, wrapfn.args)
|
| 234 |
+
status, _ = self.call_conv.call_function(
|
| 235 |
+
builder, func, types.void, argtypes, callargs)
|
| 236 |
+
|
| 237 |
+
if debug:
|
| 238 |
+
# Check error status
|
| 239 |
+
with cgutils.if_likely(builder, status.is_ok):
|
| 240 |
+
builder.ret_void()
|
| 241 |
+
|
| 242 |
+
with builder.if_then(builder.not_(status.is_python_exc)):
|
| 243 |
+
# User exception raised
|
| 244 |
+
old = ir.Constant(gv_exc.type.pointee, None)
|
| 245 |
+
|
| 246 |
+
# Use atomic cmpxchg to prevent rewriting the error status
|
| 247 |
+
# Only the first error is recorded
|
| 248 |
+
|
| 249 |
+
xchg = builder.cmpxchg(gv_exc, old, status.code,
|
| 250 |
+
'monotonic', 'monotonic')
|
| 251 |
+
changed = builder.extract_value(xchg, 1)
|
| 252 |
+
|
| 253 |
+
# If the xchange is successful, save the thread ID.
|
| 254 |
+
sreg = nvvmutils.SRegBuilder(builder)
|
| 255 |
+
with builder.if_then(changed):
|
| 256 |
+
for dim, ptr, in zip("xyz", gv_tid):
|
| 257 |
+
val = sreg.tid(dim)
|
| 258 |
+
builder.store(val, ptr)
|
| 259 |
+
|
| 260 |
+
for dim, ptr, in zip("xyz", gv_ctaid):
|
| 261 |
+
val = sreg.ctaid(dim)
|
| 262 |
+
builder.store(val, ptr)
|
| 263 |
+
|
| 264 |
+
builder.ret_void()
|
| 265 |
+
|
| 266 |
+
nvvm.set_cuda_kernel(wrapfn)
|
| 267 |
+
library.add_ir_module(wrapper_module)
|
| 268 |
+
if debug or lineinfo:
|
| 269 |
+
debuginfo.finalize()
|
| 270 |
+
library.finalize()
|
| 271 |
+
|
| 272 |
+
if config.DUMP_LLVM:
|
| 273 |
+
utils.dump_llvm(fndesc, wrapper_module)
|
| 274 |
+
|
| 275 |
+
return library.get_function(wrapfn.name)
|
| 276 |
+
|
| 277 |
+
def make_constant_array(self, builder, aryty, arr):
|
| 278 |
+
"""
|
| 279 |
+
Unlike the parent version. This returns a a pointer in the constant
|
| 280 |
+
addrspace.
|
| 281 |
+
"""
|
| 282 |
+
|
| 283 |
+
lmod = builder.module
|
| 284 |
+
|
| 285 |
+
constvals = [
|
| 286 |
+
self.get_constant(types.byte, i)
|
| 287 |
+
for i in iter(arr.tobytes(order='A'))
|
| 288 |
+
]
|
| 289 |
+
constaryty = ir.ArrayType(ir.IntType(8), len(constvals))
|
| 290 |
+
constary = ir.Constant(constaryty, constvals)
|
| 291 |
+
|
| 292 |
+
addrspace = nvvm.ADDRSPACE_CONSTANT
|
| 293 |
+
gv = cgutils.add_global_variable(lmod, constary.type, "_cudapy_cmem",
|
| 294 |
+
addrspace=addrspace)
|
| 295 |
+
gv.linkage = 'internal'
|
| 296 |
+
gv.global_constant = True
|
| 297 |
+
gv.initializer = constary
|
| 298 |
+
|
| 299 |
+
# Preserve the underlying alignment
|
| 300 |
+
lldtype = self.get_data_type(aryty.dtype)
|
| 301 |
+
align = self.get_abi_sizeof(lldtype)
|
| 302 |
+
gv.align = 2 ** (align - 1).bit_length()
|
| 303 |
+
|
| 304 |
+
# Convert to generic address-space
|
| 305 |
+
ptrty = ir.PointerType(ir.IntType(8))
|
| 306 |
+
genptr = builder.addrspacecast(gv, ptrty, 'generic')
|
| 307 |
+
|
| 308 |
+
# Create array object
|
| 309 |
+
ary = self.make_array(aryty)(self, builder)
|
| 310 |
+
kshape = [self.get_constant(types.intp, s) for s in arr.shape]
|
| 311 |
+
kstrides = [self.get_constant(types.intp, s) for s in arr.strides]
|
| 312 |
+
self.populate_array(ary, data=builder.bitcast(genptr, ary.data.type),
|
| 313 |
+
shape=kshape,
|
| 314 |
+
strides=kstrides,
|
| 315 |
+
itemsize=ary.itemsize, parent=ary.parent,
|
| 316 |
+
meminfo=None)
|
| 317 |
+
|
| 318 |
+
return ary._getvalue()
|
| 319 |
+
|
| 320 |
+
def insert_const_string(self, mod, string):
|
| 321 |
+
"""
|
| 322 |
+
Unlike the parent version. This returns a a pointer in the constant
|
| 323 |
+
addrspace.
|
| 324 |
+
"""
|
| 325 |
+
text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00")
|
| 326 |
+
name = '$'.join(["__conststring__",
|
| 327 |
+
itanium_mangler.mangle_identifier(string)])
|
| 328 |
+
# Try to reuse existing global
|
| 329 |
+
gv = mod.globals.get(name)
|
| 330 |
+
if gv is None:
|
| 331 |
+
# Not defined yet
|
| 332 |
+
gv = cgutils.add_global_variable(mod, text.type, name,
|
| 333 |
+
addrspace=nvvm.ADDRSPACE_CONSTANT)
|
| 334 |
+
gv.linkage = 'internal'
|
| 335 |
+
gv.global_constant = True
|
| 336 |
+
gv.initializer = text
|
| 337 |
+
|
| 338 |
+
# Cast to a i8* pointer
|
| 339 |
+
charty = gv.type.pointee.element
|
| 340 |
+
return gv.bitcast(charty.as_pointer(nvvm.ADDRSPACE_CONSTANT))
|
| 341 |
+
|
| 342 |
+
def insert_string_const_addrspace(self, builder, string):
|
| 343 |
+
"""
|
| 344 |
+
Insert a constant string in the constant addresspace and return a
|
| 345 |
+
generic i8 pointer to the data.
|
| 346 |
+
|
| 347 |
+
This function attempts to deduplicate.
|
| 348 |
+
"""
|
| 349 |
+
lmod = builder.module
|
| 350 |
+
gv = self.insert_const_string(lmod, string)
|
| 351 |
+
charptrty = ir.PointerType(ir.IntType(8))
|
| 352 |
+
return builder.addrspacecast(gv, charptrty, 'generic')
|
| 353 |
+
|
| 354 |
+
def optimize_function(self, func):
|
| 355 |
+
"""Run O1 function passes
|
| 356 |
+
"""
|
| 357 |
+
pass
|
| 358 |
+
## XXX skipped for now
|
| 359 |
+
# fpm = lp.FunctionPassManager.new(func.module)
|
| 360 |
+
#
|
| 361 |
+
# lp.PassManagerBuilder.new().populate(fpm)
|
| 362 |
+
#
|
| 363 |
+
# fpm.initialize()
|
| 364 |
+
# fpm.run(func)
|
| 365 |
+
# fpm.finalize()
|
| 366 |
+
|
| 367 |
+
def get_ufunc_info(self, ufunc_key):
|
| 368 |
+
return ufuncs.get_ufunc_info(ufunc_key)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class CUDACallConv(MinimalCallConv):
|
| 372 |
+
pass
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class CUDACABICallConv(BaseCallConv):
|
| 376 |
+
"""
|
| 377 |
+
Calling convention aimed at matching the CUDA C/C++ ABI. The implemented
|
| 378 |
+
function signature is:
|
| 379 |
+
|
| 380 |
+
<Python return type> (<Python arguments>)
|
| 381 |
+
|
| 382 |
+
Exceptions are unsupported in this convention.
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
def _make_call_helper(self, builder):
|
| 386 |
+
# Call helpers are used to help report exceptions back to Python, so
|
| 387 |
+
# none is required here.
|
| 388 |
+
return None
|
| 389 |
+
|
| 390 |
+
def return_value(self, builder, retval):
|
| 391 |
+
return builder.ret(retval)
|
| 392 |
+
|
| 393 |
+
def return_user_exc(self, builder, exc, exc_args=None, loc=None,
|
| 394 |
+
func_name=None):
|
| 395 |
+
msg = "Python exceptions are unsupported in the CUDA C/C++ ABI"
|
| 396 |
+
raise NotImplementedError(msg)
|
| 397 |
+
|
| 398 |
+
def return_status_propagate(self, builder, status):
|
| 399 |
+
msg = "Return status is unsupported in the CUDA C/C++ ABI"
|
| 400 |
+
raise NotImplementedError(msg)
|
| 401 |
+
|
| 402 |
+
def get_function_type(self, restype, argtypes):
|
| 403 |
+
"""
|
| 404 |
+
Get the LLVM IR Function type for *restype* and *argtypes*.
|
| 405 |
+
"""
|
| 406 |
+
arginfo = self._get_arg_packer(argtypes)
|
| 407 |
+
argtypes = list(arginfo.argument_types)
|
| 408 |
+
fnty = ir.FunctionType(self.get_return_type(restype), argtypes)
|
| 409 |
+
return fnty
|
| 410 |
+
|
| 411 |
+
def decorate_function(self, fn, args, fe_argtypes, noalias=False):
|
| 412 |
+
"""
|
| 413 |
+
Set names and attributes of function arguments.
|
| 414 |
+
"""
|
| 415 |
+
assert not noalias
|
| 416 |
+
arginfo = self._get_arg_packer(fe_argtypes)
|
| 417 |
+
arginfo.assign_names(self.get_arguments(fn),
|
| 418 |
+
['arg.' + a for a in args])
|
| 419 |
+
|
| 420 |
+
def get_arguments(self, func):
|
| 421 |
+
"""
|
| 422 |
+
Get the Python-level arguments of LLVM *func*.
|
| 423 |
+
"""
|
| 424 |
+
return func.args
|
| 425 |
+
|
| 426 |
+
def call_function(self, builder, callee, resty, argtys, args):
|
| 427 |
+
"""
|
| 428 |
+
Call the Numba-compiled *callee*.
|
| 429 |
+
"""
|
| 430 |
+
arginfo = self._get_arg_packer(argtys)
|
| 431 |
+
realargs = arginfo.as_arguments(builder, args)
|
| 432 |
+
code = builder.call(callee, realargs)
|
| 433 |
+
# No status required as we don't support exceptions or a distinct None
|
| 434 |
+
# value in a C ABI.
|
| 435 |
+
status = None
|
| 436 |
+
out = self.context.get_returned_value(builder, resty, code)
|
| 437 |
+
return status, out
|
| 438 |
+
|
| 439 |
+
def get_return_type(self, ty):
|
| 440 |
+
return self.context.data_model_manager[ty].get_return_type()
|
lib/python3.10/site-packages/numba/cuda/testing.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import platform
|
| 3 |
+
import shutil
|
| 4 |
+
|
| 5 |
+
from numba.tests.support import SerialMixin
|
| 6 |
+
from numba.cuda.cuda_paths import get_conda_ctk
|
| 7 |
+
from numba.cuda.cudadrv import driver, devices, libs
|
| 8 |
+
from numba.core import config
|
| 9 |
+
from numba.tests.support import TestCase
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import unittest
|
| 12 |
+
|
| 13 |
+
numba_cuda_dir = Path(__file__).parent
|
| 14 |
+
test_data_dir = numba_cuda_dir / 'tests' / 'data'
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CUDATestCase(SerialMixin, TestCase):
|
| 18 |
+
"""
|
| 19 |
+
For tests that use a CUDA device. Test methods in a CUDATestCase must not
|
| 20 |
+
be run out of module order, because the ContextResettingTestCase may reset
|
| 21 |
+
the context and destroy resources used by a normal CUDATestCase if any of
|
| 22 |
+
its tests are run between tests from a CUDATestCase.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def setUp(self):
|
| 26 |
+
self._low_occupancy_warnings = config.CUDA_LOW_OCCUPANCY_WARNINGS
|
| 27 |
+
self._warn_on_implicit_copy = config.CUDA_WARN_ON_IMPLICIT_COPY
|
| 28 |
+
|
| 29 |
+
# Disable warnings about low gpu utilization in the test suite
|
| 30 |
+
config.CUDA_LOW_OCCUPANCY_WARNINGS = 0
|
| 31 |
+
# Disable warnings about host arrays in the test suite
|
| 32 |
+
config.CUDA_WARN_ON_IMPLICIT_COPY = 0
|
| 33 |
+
|
| 34 |
+
def tearDown(self):
|
| 35 |
+
config.CUDA_LOW_OCCUPANCY_WARNINGS = self._low_occupancy_warnings
|
| 36 |
+
config.CUDA_WARN_ON_IMPLICIT_COPY = self._warn_on_implicit_copy
|
| 37 |
+
|
| 38 |
+
def skip_if_lto(self, reason):
|
| 39 |
+
# Some linkers need the compute capability to be specified, so we
|
| 40 |
+
# always specify it here.
|
| 41 |
+
cc = devices.get_context().device.compute_capability
|
| 42 |
+
linker = driver.Linker.new(cc=cc)
|
| 43 |
+
if linker.lto:
|
| 44 |
+
self.skipTest(reason)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class ContextResettingTestCase(CUDATestCase):
|
| 48 |
+
"""
|
| 49 |
+
For tests where the context needs to be reset after each test. Typically
|
| 50 |
+
these inspect or modify parts of the context that would usually be expected
|
| 51 |
+
to be internal implementation details (such as the state of allocations and
|
| 52 |
+
deallocations, etc.).
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def tearDown(self):
|
| 56 |
+
super().tearDown()
|
| 57 |
+
from numba.cuda.cudadrv.devices import reset
|
| 58 |
+
reset()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def ensure_supported_ccs_initialized():
|
| 62 |
+
from numba.cuda import is_available as cuda_is_available
|
| 63 |
+
from numba.cuda.cudadrv import nvvm
|
| 64 |
+
|
| 65 |
+
if cuda_is_available():
|
| 66 |
+
# Ensure that cudart.so is loaded and the list of supported compute
|
| 67 |
+
# capabilities in the nvvm module is populated before a fork. This is
|
| 68 |
+
# needed because some compilation tests don't require a CUDA context,
|
| 69 |
+
# but do use NVVM, and it is required that libcudart.so should be
|
| 70 |
+
# loaded before a fork (note that the requirement is not explicitly
|
| 71 |
+
# documented).
|
| 72 |
+
nvvm.get_supported_ccs()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def skip_on_cudasim(reason):
|
| 76 |
+
"""Skip this test if running on the CUDA simulator"""
|
| 77 |
+
return unittest.skipIf(config.ENABLE_CUDASIM, reason)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def skip_unless_cudasim(reason):
|
| 81 |
+
"""Skip this test if running on CUDA hardware"""
|
| 82 |
+
return unittest.skipUnless(config.ENABLE_CUDASIM, reason)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def skip_unless_conda_cudatoolkit(reason):
|
| 86 |
+
"""Skip test if the CUDA toolkit was not installed by Conda"""
|
| 87 |
+
return unittest.skipUnless(get_conda_ctk() is not None, reason)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def skip_if_external_memmgr(reason):
|
| 91 |
+
"""Skip test if an EMM Plugin is in use"""
|
| 92 |
+
return unittest.skipIf(config.CUDA_MEMORY_MANAGER != 'default', reason)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def skip_under_cuda_memcheck(reason):
|
| 96 |
+
return unittest.skipIf(os.environ.get('CUDA_MEMCHECK') is not None, reason)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def skip_without_nvdisasm(reason):
|
| 100 |
+
nvdisasm_path = shutil.which('nvdisasm')
|
| 101 |
+
return unittest.skipIf(nvdisasm_path is None, reason)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def skip_with_nvdisasm(reason):
|
| 105 |
+
nvdisasm_path = shutil.which('nvdisasm')
|
| 106 |
+
return unittest.skipIf(nvdisasm_path is not None, reason)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def skip_on_arm(reason):
|
| 110 |
+
cpu = platform.processor()
|
| 111 |
+
is_arm = cpu.startswith('arm') or cpu.startswith('aarch')
|
| 112 |
+
return unittest.skipIf(is_arm, reason)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def skip_if_cuda_includes_missing(fn):
|
| 116 |
+
# Skip when cuda.h is not available - generally this should indicate
|
| 117 |
+
# whether the CUDA includes are available or not
|
| 118 |
+
cuda_h = os.path.join(config.CUDA_INCLUDE_PATH, 'cuda.h')
|
| 119 |
+
cuda_h_file = (os.path.exists(cuda_h) and os.path.isfile(cuda_h))
|
| 120 |
+
reason = 'CUDA include dir not available on this system'
|
| 121 |
+
return unittest.skipUnless(cuda_h_file, reason)(fn)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def skip_if_mvc_enabled(reason):
|
| 125 |
+
"""Skip a test if Minor Version Compatibility is enabled"""
|
| 126 |
+
return unittest.skipIf(config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY,
|
| 127 |
+
reason)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def skip_if_mvc_libraries_unavailable(fn):
|
| 131 |
+
libs_available = False
|
| 132 |
+
try:
|
| 133 |
+
import cubinlinker # noqa: F401
|
| 134 |
+
import ptxcompiler # noqa: F401
|
| 135 |
+
libs_available = True
|
| 136 |
+
except ImportError:
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
return unittest.skipUnless(libs_available,
|
| 140 |
+
"Requires cubinlinker and ptxcompiler")(fn)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def cc_X_or_above(major, minor):
|
| 144 |
+
if not config.ENABLE_CUDASIM:
|
| 145 |
+
cc = devices.get_context().device.compute_capability
|
| 146 |
+
return cc >= (major, minor)
|
| 147 |
+
else:
|
| 148 |
+
return True
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def skip_unless_cc_50(fn):
|
| 152 |
+
return unittest.skipUnless(cc_X_or_above(5, 0), "requires cc >= 5.0")(fn)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def skip_unless_cc_53(fn):
|
| 156 |
+
return unittest.skipUnless(cc_X_or_above(5, 3), "requires cc >= 5.3")(fn)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def skip_unless_cc_60(fn):
|
| 160 |
+
return unittest.skipUnless(cc_X_or_above(6, 0), "requires cc >= 6.0")(fn)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def skip_unless_cc_75(fn):
|
| 164 |
+
return unittest.skipUnless(cc_X_or_above(7, 5), "requires cc >= 7.5")(fn)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def xfail_unless_cudasim(fn):
|
| 168 |
+
if config.ENABLE_CUDASIM:
|
| 169 |
+
return fn
|
| 170 |
+
else:
|
| 171 |
+
return unittest.expectedFailure(fn)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def skip_with_cuda_python(reason):
|
| 175 |
+
return unittest.skipIf(driver.USE_NV_BINDING, reason)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def cudadevrt_missing():
|
| 179 |
+
if config.ENABLE_CUDASIM:
|
| 180 |
+
return False
|
| 181 |
+
try:
|
| 182 |
+
path = libs.get_cudalib('cudadevrt', static=True)
|
| 183 |
+
libs.check_static_lib(path)
|
| 184 |
+
except FileNotFoundError:
|
| 185 |
+
return True
|
| 186 |
+
return False
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def skip_if_cudadevrt_missing(fn):
|
| 190 |
+
return unittest.skipIf(cudadevrt_missing(), 'cudadevrt missing')(fn)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class ForeignArray(object):
|
| 194 |
+
"""
|
| 195 |
+
Class for emulating an array coming from another library through the CUDA
|
| 196 |
+
Array interface. This just hides a DeviceNDArray so that it doesn't look
|
| 197 |
+
like a DeviceNDArray.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def __init__(self, arr):
|
| 201 |
+
self._arr = arr
|
| 202 |
+
self.__cuda_array_interface__ = arr.__cuda_array_interface__
|
lib/python3.10/site-packages/numba/cuda/tests/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.testing import ensure_supported_ccs_initialized
|
| 2 |
+
from numba.testing import unittest
|
| 3 |
+
from numba.testing import load_testsuite
|
| 4 |
+
from numba import cuda
|
| 5 |
+
from os.path import dirname, join
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def load_tests(loader, tests, pattern):
|
| 9 |
+
suite = unittest.TestSuite()
|
| 10 |
+
this_dir = dirname(__file__)
|
| 11 |
+
ensure_supported_ccs_initialized()
|
| 12 |
+
suite.addTests(load_testsuite(loader, join(this_dir, 'nocuda')))
|
| 13 |
+
if cuda.is_available():
|
| 14 |
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudasim')))
|
| 15 |
+
gpus = cuda.list_devices()
|
| 16 |
+
if gpus and gpus[0].compute_capability >= (2, 0):
|
| 17 |
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudadrv')))
|
| 18 |
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudapy')))
|
| 19 |
+
suite.addTests(load_testsuite(loader, join(this_dir, 'doc_examples')))
|
| 20 |
+
else:
|
| 21 |
+
print("skipped CUDA tests because GPU CC < 2.0")
|
| 22 |
+
else:
|
| 23 |
+
print("skipped CUDA tests")
|
| 24 |
+
return suite
|
lib/python3.10/site-packages/numba/cuda/tests/data/__init__.py
ADDED
|
File without changes
|
lib/python3.10/site-packages/numba/cuda/tests/data/cuda_include.cu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Not all CUDA includes are safe to include in device code compiled by NVRTC,
|
| 2 |
+
// because it does not have paths to all system include directories. Headers
|
| 3 |
+
// such as cuda_device_runtime_api.h are safe to use in NVRTC without adding
|
| 4 |
+
// additional includes.
|
| 5 |
+
#include <cuda_device_runtime_api.h>
|
lib/python3.10/site-packages/numba/cuda/tests/data/error.cu
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
extern "C" __device__
|
| 2 |
+
int bar(int* out, int a) {
|
| 3 |
+
// Explicitly placed to generate an error
|
| 4 |
+
SYNTAX ERROR
|
| 5 |
+
*out = a * 2;
|
| 6 |
+
return 0;
|
| 7 |
+
}
|
lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.cu
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Compile with:
|
| 2 |
+
//
|
| 3 |
+
// nvcc -gencode arch=compute_50,code=compute_50 -rdc true -ptx jitlink.cu
|
| 4 |
+
//
|
| 5 |
+
// using the oldest supported toolkit version (10.2 at the time of writing).
|
| 6 |
+
|
| 7 |
+
extern "C" __device__
|
| 8 |
+
int bar(int *out, int a)
|
| 9 |
+
{
|
| 10 |
+
*out = a * 2;
|
| 11 |
+
return 0;
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
// The out argument is necessary due to Numba's CUDA calling convention, which
|
| 16 |
+
// always reserves the first parameter for a pointer to a returned value, even
|
| 17 |
+
// if there is no return value.
|
| 18 |
+
extern "C" __device__
|
| 19 |
+
int array_mutator(void *out, int *a)
|
| 20 |
+
{
|
| 21 |
+
a[0] = a[1];
|
| 22 |
+
return 0;
|
| 23 |
+
}
|
lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.ptx
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//
|
| 2 |
+
// Generated by NVIDIA NVVM Compiler
|
| 3 |
+
//
|
| 4 |
+
// Compiler Build ID: CL-27506705
|
| 5 |
+
// Cuda compilation tools, release 10.2, V10.2.89
|
| 6 |
+
// Based on LLVM 3.4svn
|
| 7 |
+
//
|
| 8 |
+
|
| 9 |
+
.version 6.5
|
| 10 |
+
.target sm_50
|
| 11 |
+
.address_size 64
|
| 12 |
+
|
| 13 |
+
// .globl bar
|
| 14 |
+
|
| 15 |
+
.visible .func (.param .b32 func_retval0) bar(
|
| 16 |
+
.param .b64 bar_param_0,
|
| 17 |
+
.param .b32 bar_param_1
|
| 18 |
+
)
|
| 19 |
+
{
|
| 20 |
+
.reg .b32 %r<4>;
|
| 21 |
+
.reg .b64 %rd<2>;
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
ld.param.u64 %rd1, [bar_param_0];
|
| 25 |
+
ld.param.u32 %r1, [bar_param_1];
|
| 26 |
+
shl.b32 %r2, %r1, 1;
|
| 27 |
+
st.u32 [%rd1], %r2;
|
| 28 |
+
mov.u32 %r3, 0;
|
| 29 |
+
st.param.b32 [func_retval0+0], %r3;
|
| 30 |
+
ret;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
// .globl array_mutator
|
| 34 |
+
.visible .func (.param .b32 func_retval0) array_mutator(
|
| 35 |
+
.param .b64 array_mutator_param_0,
|
| 36 |
+
.param .b64 array_mutator_param_1
|
| 37 |
+
)
|
| 38 |
+
{
|
| 39 |
+
.reg .b32 %r<3>;
|
| 40 |
+
.reg .b64 %rd<2>;
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
ld.param.u64 %rd1, [array_mutator_param_1];
|
| 44 |
+
ld.u32 %r1, [%rd1+4];
|
| 45 |
+
st.u32 [%rd1], %r1;
|
| 46 |
+
mov.u32 %r2, 0;
|
| 47 |
+
st.param.b32 [func_retval0+0], %r2;
|
| 48 |
+
ret;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
lib/python3.10/site-packages/numba/cuda/tests/data/warn.cu
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
extern "C" __device__
|
| 2 |
+
int bar(int* out, int a) {
|
| 3 |
+
// Explicitly placed to generate a warning for testing the NVRTC program log
|
| 4 |
+
int unused;
|
| 5 |
+
*out = a * 2;
|
| 6 |
+
return 0;
|
| 7 |
+
}
|
lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from numba.cuda.testing import CUDATestCase, skip_on_cudasim
|
| 4 |
+
from numba.tests.support import captured_stdout
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level")
|
| 9 |
+
class TestCpuGpuCompat(CUDATestCase):
|
| 10 |
+
"""
|
| 11 |
+
Test compatibility of CPU and GPU functions
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def setUp(self):
|
| 15 |
+
# Prevent output from this test showing up when running the test suite
|
| 16 |
+
self._captured_stdout = captured_stdout()
|
| 17 |
+
self._captured_stdout.__enter__()
|
| 18 |
+
super().setUp()
|
| 19 |
+
|
| 20 |
+
def tearDown(self):
|
| 21 |
+
# No exception type, value, or traceback
|
| 22 |
+
self._captured_stdout.__exit__(None, None, None)
|
| 23 |
+
super().tearDown()
|
| 24 |
+
|
| 25 |
+
def test_ex_cpu_gpu_compat(self):
|
| 26 |
+
# ex_cpu_gpu_compat.import.begin
|
| 27 |
+
from math import pi
|
| 28 |
+
|
| 29 |
+
import numba
|
| 30 |
+
from numba import cuda
|
| 31 |
+
# ex_cpu_gpu_compat.import.end
|
| 32 |
+
|
| 33 |
+
# ex_cpu_gpu_compat.allocate.begin
|
| 34 |
+
X = cuda.to_device([1, 10, 234])
|
| 35 |
+
Y = cuda.to_device([2, 2, 4014])
|
| 36 |
+
Z = cuda.to_device([3, 14, 2211])
|
| 37 |
+
results = cuda.to_device([0.0, 0.0, 0.0])
|
| 38 |
+
# ex_cpu_gpu_compat.allocate.end
|
| 39 |
+
|
| 40 |
+
# ex_cpu_gpu_compat.define.begin
|
| 41 |
+
@numba.jit
|
| 42 |
+
def business_logic(x, y, z):
|
| 43 |
+
return 4 * z * (2 * x - (4 * y) / 2 * pi)
|
| 44 |
+
# ex_cpu_gpu_compat.define.end
|
| 45 |
+
|
| 46 |
+
# ex_cpu_gpu_compat.cpurun.begin
|
| 47 |
+
print(business_logic(1, 2, 3)) # -126.79644737231007
|
| 48 |
+
# ex_cpu_gpu_compat.cpurun.end
|
| 49 |
+
|
| 50 |
+
# ex_cpu_gpu_compat.usegpu.begin
|
| 51 |
+
@cuda.jit
|
| 52 |
+
def f(res, xarr, yarr, zarr):
|
| 53 |
+
tid = cuda.grid(1)
|
| 54 |
+
if tid < len(xarr):
|
| 55 |
+
# The function decorated with numba.jit may be directly reused
|
| 56 |
+
res[tid] = business_logic(xarr[tid], yarr[tid], zarr[tid])
|
| 57 |
+
# ex_cpu_gpu_compat.usegpu.end
|
| 58 |
+
|
| 59 |
+
# ex_cpu_gpu_compat.launch.begin
|
| 60 |
+
f.forall(len(X))(results, X, Y, Z)
|
| 61 |
+
print(results)
|
| 62 |
+
# [-126.79644737231007, 416.28324559588634, -218912930.2987788]
|
| 63 |
+
# ex_cpu_gpu_compat.launch.end
|
| 64 |
+
|
| 65 |
+
expect = [
|
| 66 |
+
business_logic(x, y, z) for x, y, z in zip(X, Y, Z)
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
np.testing.assert_equal(
|
| 70 |
+
expect,
|
| 71 |
+
results.copy_to_host()
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_montecarlo.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from numba.cuda.testing import CUDATestCase, skip_on_cudasim
|
| 4 |
+
from numba.tests.support import captured_stdout
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level")
|
| 8 |
+
class TestMonteCarlo(CUDATestCase):
|
| 9 |
+
"""
|
| 10 |
+
Test monte-carlo integration
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def setUp(self):
|
| 14 |
+
# Prevent output from this test showing up when running the test suite
|
| 15 |
+
self._captured_stdout = captured_stdout()
|
| 16 |
+
self._captured_stdout.__enter__()
|
| 17 |
+
super().setUp()
|
| 18 |
+
|
| 19 |
+
def tearDown(self):
|
| 20 |
+
# No exception type, value, or traceback
|
| 21 |
+
self._captured_stdout.__exit__(None, None, None)
|
| 22 |
+
super().tearDown()
|
| 23 |
+
|
| 24 |
+
def test_ex_montecarlo(self):
|
| 25 |
+
# ex_montecarlo.import.begin
|
| 26 |
+
import numba
|
| 27 |
+
import numpy as np
|
| 28 |
+
from numba import cuda
|
| 29 |
+
from numba.cuda.random import (
|
| 30 |
+
create_xoroshiro128p_states,
|
| 31 |
+
xoroshiro128p_uniform_float32,
|
| 32 |
+
)
|
| 33 |
+
# ex_montecarlo.import.end
|
| 34 |
+
|
| 35 |
+
# ex_montecarlo.define.begin
|
| 36 |
+
# number of samples, higher will lead to a more accurate answer
|
| 37 |
+
nsamps = 1000000
|
| 38 |
+
# ex_montecarlo.define.end
|
| 39 |
+
|
| 40 |
+
# ex_montecarlo.kernel.begin
|
| 41 |
+
@cuda.jit
|
| 42 |
+
def mc_integrator_kernel(out, rng_states, lower_lim, upper_lim):
|
| 43 |
+
"""
|
| 44 |
+
kernel to draw random samples and evaluate the function to
|
| 45 |
+
be integrated at those sample values
|
| 46 |
+
"""
|
| 47 |
+
size = len(out)
|
| 48 |
+
|
| 49 |
+
gid = cuda.grid(1)
|
| 50 |
+
if gid < size:
|
| 51 |
+
# draw a sample between 0 and 1 on this thread
|
| 52 |
+
samp = xoroshiro128p_uniform_float32(rng_states, gid)
|
| 53 |
+
|
| 54 |
+
# normalize this sample to the limit range
|
| 55 |
+
samp = samp * (upper_lim - lower_lim) + lower_lim
|
| 56 |
+
|
| 57 |
+
# evaluate the function to be
|
| 58 |
+
# integrated at the normalized
|
| 59 |
+
# value of the sample
|
| 60 |
+
y = func(samp)
|
| 61 |
+
out[gid] = y
|
| 62 |
+
# ex_montecarlo.kernel.end
|
| 63 |
+
|
| 64 |
+
# ex_montecarlo.callfunc.begin
|
| 65 |
+
@cuda.reduce
|
| 66 |
+
def sum_reduce(a, b):
|
| 67 |
+
return a + b
|
| 68 |
+
|
| 69 |
+
def mc_integrate(lower_lim, upper_lim, nsamps):
|
| 70 |
+
"""
|
| 71 |
+
approximate the definite integral of `func` from
|
| 72 |
+
`lower_lim` to `upper_lim`
|
| 73 |
+
"""
|
| 74 |
+
out = cuda.to_device(np.zeros(nsamps, dtype="float32"))
|
| 75 |
+
rng_states = create_xoroshiro128p_states(nsamps, seed=42)
|
| 76 |
+
|
| 77 |
+
# jit the function for use in CUDA kernels
|
| 78 |
+
|
| 79 |
+
mc_integrator_kernel.forall(nsamps)(
|
| 80 |
+
out, rng_states, lower_lim, upper_lim
|
| 81 |
+
)
|
| 82 |
+
# normalization factor to convert
|
| 83 |
+
# to the average: (b - a)/(N - 1)
|
| 84 |
+
factor = (upper_lim - lower_lim) / (nsamps - 1)
|
| 85 |
+
|
| 86 |
+
return sum_reduce(out) * factor
|
| 87 |
+
# ex_montecarlo.callfunc.end
|
| 88 |
+
|
| 89 |
+
# ex_montecarlo.launch.begin
|
| 90 |
+
# define a function to integrate
|
| 91 |
+
@numba.jit
|
| 92 |
+
def func(x):
|
| 93 |
+
return 1.0 / x
|
| 94 |
+
|
| 95 |
+
mc_integrate(1, 2, nsamps) # array(0.6929643, dtype=float32)
|
| 96 |
+
mc_integrate(2, 3, nsamps) # array(0.4054021, dtype=float32)
|
| 97 |
+
# ex_montecarlo.launch.end
|
| 98 |
+
|
| 99 |
+
# values computed independently using maple
|
| 100 |
+
np.testing.assert_allclose(
|
| 101 |
+
mc_integrate(1, 2, nsamps), 0.69315, atol=0.001
|
| 102 |
+
)
|
| 103 |
+
np.testing.assert_allclose(
|
| 104 |
+
mc_integrate(2, 3, nsamps), 0.4055, atol=0.001
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_ufunc.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from numba.cuda.testing import CUDATestCase, skip_on_cudasim
|
| 4 |
+
from numba.tests.support import captured_stdout
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level")
|
| 8 |
+
class TestUFunc(CUDATestCase):
|
| 9 |
+
"""
|
| 10 |
+
Test calling a UFunc
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def setUp(self):
|
| 14 |
+
# Prevent output from this test showing
|
| 15 |
+
# up when running the test suite
|
| 16 |
+
self._captured_stdout = captured_stdout()
|
| 17 |
+
self._captured_stdout.__enter__()
|
| 18 |
+
super().setUp()
|
| 19 |
+
|
| 20 |
+
def tearDown(self):
|
| 21 |
+
# No exception type, value, or traceback
|
| 22 |
+
self._captured_stdout.__exit__(None, None, None)
|
| 23 |
+
super().tearDown()
|
| 24 |
+
|
| 25 |
+
def test_ex_cuda_ufunc_call(self):
|
| 26 |
+
# ex_cuda_ufunc.begin
|
| 27 |
+
import numpy as np
|
| 28 |
+
from numba import cuda
|
| 29 |
+
|
| 30 |
+
# A kernel calling a ufunc (sin, in this case)
|
| 31 |
+
@cuda.jit
|
| 32 |
+
def f(r, x):
|
| 33 |
+
# Compute sin(x) with result written to r
|
| 34 |
+
np.sin(x, r)
|
| 35 |
+
|
| 36 |
+
# Declare input and output arrays
|
| 37 |
+
x = np.arange(10, dtype=np.float32) - 5
|
| 38 |
+
r = np.zeros_like(x)
|
| 39 |
+
|
| 40 |
+
# Launch kernel that calls the ufunc
|
| 41 |
+
f[1, 1](r, x)
|
| 42 |
+
|
| 43 |
+
# A quick sanity check demonstrating equality of the sine computed by
|
| 44 |
+
# the sin ufunc inside the kernel, and NumPy's sin ufunc
|
| 45 |
+
np.testing.assert_allclose(r, np.sin(x))
|
| 46 |
+
# ex_cuda_ufunc.end
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if __name__ == "__main__":
|
| 50 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/nocuda/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.testing import ensure_supported_ccs_initialized
|
| 2 |
+
from numba.testing import load_testsuite
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_tests(loader, tests, pattern):
|
| 7 |
+
ensure_supported_ccs_initialized()
|
| 8 |
+
return load_testsuite(loader, os.path.dirname(__file__))
|