diff --git a/.gitattributes b/.gitattributes index a3e13237418019e31344a094416d170a8b7c456d..f43a70b5b412b28a35ddf00030bd2c628d105229 100644 --- a/.gitattributes +++ b/.gitattributes @@ -91,3 +91,4 @@ lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.s lib/python3.10/site-packages/av/data/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a9e58ba18ac9fe86edc88bee9ec6195265dab5bd --- /dev/null +++ b/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08baa2d768f5ab5f417106d6d927bffd6d7f46d05db674e4db0cb0ba38ce2027 +size 753129 diff --git a/lib/python3.10/site-packages/numba/cuda/__init__.py b/lib/python3.10/site-packages/numba/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a30c316c4a524b3a41a080dd7e82fe597e6f130 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/__init__.py @@ -0,0 +1,22 @@ +from numba import runtests +from numba.core import config + +if config.ENABLE_CUDASIM: + from .simulator_init import * +else: + from .device_init import * + from .device_init import _auto_device + +from numba.cuda.compiler import (compile, compile_for_current_device, + compile_ptx, compile_ptx_for_current_device) + +# Are we the numba.cuda built in to upstream Numba, or the out-of-tree +# NVIDIA-maintained target? +implementation = "Built-in" + + +def test(*args, **kwargs): + if not is_available(): + raise cuda_error() + + return runtests.main("numba.cuda.tests", *args, **kwargs) diff --git a/lib/python3.10/site-packages/numba/cuda/api.py b/lib/python3.10/site-packages/numba/cuda/api.py new file mode 100644 index 0000000000000000000000000000000000000000..43847496fa17e618fbdbf4e9478360e008f88ab5 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/api.py @@ -0,0 +1,525 @@ +""" +API that are reported to numba.cuda +""" + + +import contextlib +import os + +import numpy as np + +from .cudadrv import devicearray, devices, driver +from numba.core import config +from numba.cuda.api_util import prepare_shape_strides_dtype + +# NDarray device helper + +require_context = devices.require_context +current_context = devices.get_context +gpus = devices.gpus + + +@require_context +def from_cuda_array_interface(desc, owner=None, sync=True): + """Create a DeviceNDArray from a cuda-array-interface description. + The ``owner`` is the owner of the underlying memory. + The resulting DeviceNDArray will acquire a reference from it. + + If ``sync`` is ``True``, then the imported stream (if present) will be + synchronized. + """ + version = desc.get('version') + # Mask introduced in version 1 + if 1 <= version: + mask = desc.get('mask') + # Would ideally be better to detect if the mask is all valid + if mask is not None: + raise NotImplementedError('Masked arrays are not supported') + + shape = desc['shape'] + strides = desc.get('strides') + dtype = np.dtype(desc['typestr']) + + shape, strides, dtype = prepare_shape_strides_dtype( + shape, strides, dtype, order='C') + size = driver.memory_size_from_info(shape, strides, dtype.itemsize) + + devptr = driver.get_devptr_for_active_ctx(desc['data'][0]) + data = driver.MemoryPointer( + current_context(), devptr, size=size, owner=owner) + stream_ptr = desc.get('stream', None) + if stream_ptr is not None: + stream = external_stream(stream_ptr) + if sync and config.CUDA_ARRAY_INTERFACE_SYNC: + stream.synchronize() + else: + stream = 0 # No "Numba default stream", not the CUDA default stream + da = devicearray.DeviceNDArray(shape=shape, strides=strides, + dtype=dtype, gpu_data=data, + stream=stream) + return da + + +def as_cuda_array(obj, sync=True): + """Create a DeviceNDArray from any object that implements + the :ref:`cuda array interface `. + + A view of the underlying GPU buffer is created. No copying of the data + is done. The resulting DeviceNDArray will acquire a reference from `obj`. + + If ``sync`` is ``True``, then the imported stream (if present) will be + synchronized. + """ + if not is_cuda_array(obj): + raise TypeError("*obj* doesn't implement the cuda array interface.") + else: + return from_cuda_array_interface(obj.__cuda_array_interface__, + owner=obj, sync=sync) + + +def is_cuda_array(obj): + """Test if the object has defined the `__cuda_array_interface__` attribute. + + Does not verify the validity of the interface. + """ + return hasattr(obj, '__cuda_array_interface__') + + +def is_float16_supported(): + """Whether 16-bit floats are supported. + + float16 is always supported in current versions of Numba - returns True. + """ + return True + + +@require_context +def to_device(obj, stream=0, copy=True, to=None): + """to_device(obj, stream=0, copy=True, to=None) + + Allocate and transfer a numpy ndarray or structured scalar to the device. + + To copy host->device a numpy array:: + + ary = np.arange(10) + d_ary = cuda.to_device(ary) + + To enqueue the transfer to a stream:: + + stream = cuda.stream() + d_ary = cuda.to_device(ary, stream=stream) + + The resulting ``d_ary`` is a ``DeviceNDArray``. + + To copy device->host:: + + hary = d_ary.copy_to_host() + + To copy device->host to an existing array:: + + ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype) + d_ary.copy_to_host(ary) + + To enqueue the transfer to a stream:: + + hary = d_ary.copy_to_host(stream=stream) + """ + if to is None: + to, new = devicearray.auto_device(obj, stream=stream, copy=copy, + user_explicit=True) + return to + if copy: + to.copy_to_device(obj, stream=stream) + return to + + +@require_context +def device_array(shape, dtype=np.float64, strides=None, order='C', stream=0): + """device_array(shape, dtype=np.float64, strides=None, order='C', stream=0) + + Allocate an empty device ndarray. Similar to :meth:`numpy.empty`. + """ + shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype, + order) + return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype, + stream=stream) + + +@require_context +def managed_array(shape, dtype=np.float64, strides=None, order='C', stream=0, + attach_global=True): + """managed_array(shape, dtype=np.float64, strides=None, order='C', stream=0, + attach_global=True) + + Allocate a np.ndarray with a buffer that is managed. + Similar to np.empty(). + + Managed memory is supported on Linux / x86 and PowerPC, and is considered + experimental on Windows and Linux / AArch64. + + :param attach_global: A flag indicating whether to attach globally. Global + attachment implies that the memory is accessible from + any stream on any device. If ``False``, attachment is + *host*, and memory is only accessible by devices + with Compute Capability 6.0 and later. + """ + shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype, + order) + bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize) + buffer = current_context().memallocmanaged(bytesize, + attach_global=attach_global) + npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order, + buffer=buffer) + managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray) + managedview.device_setup(buffer, stream=stream) + return managedview + + +@require_context +def pinned_array(shape, dtype=np.float64, strides=None, order='C'): + """pinned_array(shape, dtype=np.float64, strides=None, order='C') + + Allocate an :class:`ndarray ` with a buffer that is pinned + (pagelocked). Similar to :func:`np.empty() `. + """ + shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype, + order) + bytesize = driver.memory_size_from_info(shape, strides, + dtype.itemsize) + buffer = current_context().memhostalloc(bytesize) + return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order, + buffer=buffer) + + +@require_context +def mapped_array(shape, dtype=np.float64, strides=None, order='C', stream=0, + portable=False, wc=False): + """mapped_array(shape, dtype=np.float64, strides=None, order='C', stream=0, + portable=False, wc=False) + + Allocate a mapped ndarray with a buffer that is pinned and mapped on + to the device. Similar to np.empty() + + :param portable: a boolean flag to allow the allocated device memory to be + usable in multiple devices. + :param wc: a boolean flag to enable writecombined allocation which is faster + to write by the host and to read by the device, but slower to + write by the host and slower to write by the device. + """ + shape, strides, dtype = prepare_shape_strides_dtype(shape, strides, dtype, + order) + bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize) + buffer = current_context().memhostalloc(bytesize, mapped=True) + npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order, + buffer=buffer) + mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray) + mappedview.device_setup(buffer, stream=stream) + return mappedview + + +@contextlib.contextmanager +@require_context +def open_ipc_array(handle, shape, dtype, strides=None, offset=0): + """ + A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is + represented as a sequence of bytes (e.g. *bytes*, tuple of int) + and represent it as an array of the given *shape*, *strides* and *dtype*. + The *strides* can be omitted. In that case, it is assumed to be a 1D + C contiguous array. + + Yields a device array. + + The IPC handle is closed automatically when context manager exits. + """ + dtype = np.dtype(dtype) + # compute size + size = np.prod(shape) * dtype.itemsize + # manually recreate the IPC mem handle + if driver.USE_NV_BINDING: + driver_handle = driver.binding.CUipcMemHandle() + driver_handle.reserved = handle + else: + driver_handle = driver.drvapi.cu_ipc_mem_handle(*handle) + # use *IpcHandle* to open the IPC memory + ipchandle = driver.IpcHandle(None, driver_handle, size, offset=offset) + yield ipchandle.open_array(current_context(), shape=shape, + strides=strides, dtype=dtype) + ipchandle.close() + + +def synchronize(): + "Synchronize the current context." + return current_context().synchronize() + + +def _contiguous_strides_like_array(ary): + """ + Given an array, compute strides for a new contiguous array of the same + shape. + """ + # Don't recompute strides if the default strides will be sufficient to + # create a contiguous array. + if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1: + return None + + # Otherwise, we need to compute new strides using an algorithm adapted from + # NumPy v1.17.4's PyArray_NewLikeArrayWithShape in + # core/src/multiarray/ctors.c. We permute the strides in ascending order + # then compute the stride for the dimensions with the same permutation. + + # Stride permutation. E.g. a stride array (4, -2, 12) becomes + # [(1, -2), (0, 4), (2, 12)] + strideperm = [ x for x in enumerate(ary.strides) ] + strideperm.sort(key=lambda x: x[1]) + + # Compute new strides using permutation + strides = [0] * len(ary.strides) + stride = ary.dtype.itemsize + for i_perm, _ in strideperm: + strides[i_perm] = stride + stride *= ary.shape[i_perm] + return tuple(strides) + + +def _order_like_array(ary): + if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']: + return 'F' + else: + return 'C' + + +def device_array_like(ary, stream=0): + """ + Call :func:`device_array() ` with information from + the array. + """ + strides = _contiguous_strides_like_array(ary) + order = _order_like_array(ary) + return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides, + order=order, stream=stream) + + +def mapped_array_like(ary, stream=0, portable=False, wc=False): + """ + Call :func:`mapped_array() ` with the information + from the array. + """ + strides = _contiguous_strides_like_array(ary) + order = _order_like_array(ary) + return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides, + order=order, stream=stream, portable=portable, wc=wc) + + +def pinned_array_like(ary): + """ + Call :func:`pinned_array() ` with the information + from the array. + """ + strides = _contiguous_strides_like_array(ary) + order = _order_like_array(ary) + return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides, + order=order) + + +# Stream helper +@require_context +def stream(): + """ + Create a CUDA stream that represents a command queue for the device. + """ + return current_context().create_stream() + + +@require_context +def default_stream(): + """ + Get the default CUDA stream. CUDA semantics in general are that the default + stream is either the legacy default stream or the per-thread default stream + depending on which CUDA APIs are in use. In Numba, the APIs for the legacy + default stream are always the ones in use, but an option to use APIs for + the per-thread default stream may be provided in future. + """ + return current_context().get_default_stream() + + +@require_context +def legacy_default_stream(): + """ + Get the legacy default CUDA stream. + """ + return current_context().get_legacy_default_stream() + + +@require_context +def per_thread_default_stream(): + """ + Get the per-thread default CUDA stream. + """ + return current_context().get_per_thread_default_stream() + + +@require_context +def external_stream(ptr): + """Create a Numba stream object for a stream allocated outside Numba. + + :param ptr: Pointer to the external stream to wrap in a Numba Stream + :type ptr: int + """ + return current_context().create_external_stream(ptr) + + +# Page lock +@require_context +@contextlib.contextmanager +def pinned(*arylist): + """A context manager for temporary pinning a sequence of host ndarrays. + """ + pmlist = [] + for ary in arylist: + pm = current_context().mempin(ary, driver.host_pointer(ary), + driver.host_memory_size(ary), + mapped=False) + pmlist.append(pm) + yield + + +@require_context +@contextlib.contextmanager +def mapped(*arylist, **kws): + """A context manager for temporarily mapping a sequence of host ndarrays. + """ + assert not kws or 'stream' in kws, "Only accept 'stream' as keyword." + stream = kws.get('stream', 0) + pmlist = [] + devarylist = [] + for ary in arylist: + pm = current_context().mempin(ary, driver.host_pointer(ary), + driver.host_memory_size(ary), + mapped=True) + pmlist.append(pm) + devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream) + devarylist.append(devary) + try: + if len(devarylist) == 1: + yield devarylist[0] + else: + yield devarylist + finally: + # When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name + # `mapped_arrs` stays in scope, blocking automatic unmapping based on + # reference count. We therefore invoke the finalizer manually. + for pm in pmlist: + pm.free() + + +def event(timing=True): + """ + Create a CUDA event. Timing data is only recorded by the event if it is + created with ``timing=True``. + """ + evt = current_context().create_event(timing=timing) + return evt + + +event_elapsed_time = driver.event_elapsed_time + + +# Device selection + +def select_device(device_id): + """ + Make the context associated with device *device_id* the current context. + + Returns a Device instance. + + Raises exception on error. + """ + context = devices.get_context(device_id) + return context.device + + +def get_current_device(): + "Get current device associated with the current thread" + return current_context().device + + +def list_devices(): + "Return a list of all detected devices" + return devices.gpus + + +def close(): + """ + Explicitly clears all contexts in the current thread, and destroys all + contexts if the current thread is the main thread. + """ + devices.reset() + + +def _auto_device(ary, stream=0, copy=True): + return devicearray.auto_device(ary, stream=stream, copy=copy) + + +def detect(): + """ + Detect supported CUDA hardware and print a summary of the detected hardware. + + Returns a boolean indicating whether any supported devices were detected. + """ + devlist = list_devices() + print('Found %d CUDA devices' % len(devlist)) + supported_count = 0 + for dev in devlist: + attrs = [] + cc = dev.compute_capability + kernel_timeout = dev.KERNEL_EXEC_TIMEOUT + tcc = dev.TCC_DRIVER + fp32_to_fp64_ratio = dev.SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO + attrs += [('Compute Capability', '%d.%d' % cc)] + attrs += [('PCI Device ID', dev.PCI_DEVICE_ID)] + attrs += [('PCI Bus ID', dev.PCI_BUS_ID)] + attrs += [('UUID', dev.uuid)] + attrs += [('Watchdog', 'Enabled' if kernel_timeout else 'Disabled')] + if os.name == "nt": + attrs += [('Compute Mode', 'TCC' if tcc else 'WDDM')] + attrs += [('FP32/FP64 Performance Ratio', fp32_to_fp64_ratio)] + if cc < (3, 5): + support = '[NOT SUPPORTED: CC < 3.5]' + elif cc < (5, 0): + support = '[SUPPORTED (DEPRECATED)]' + supported_count += 1 + else: + support = '[SUPPORTED]' + supported_count += 1 + + print('id %d %20s %40s' % (dev.id, dev.name, support)) + for key, val in attrs: + print('%40s: %s' % (key, val)) + + print('Summary:') + print('\t%d/%d devices are supported' % (supported_count, len(devlist))) + return supported_count > 0 + + +@contextlib.contextmanager +def defer_cleanup(): + """ + Temporarily disable memory deallocation. + Use this to prevent resource deallocation breaking asynchronous execution. + + For example:: + + with defer_cleanup(): + # all cleanup is deferred in here + do_speed_critical_code() + # cleanup can occur here + + Note: this context manager can be nested. + """ + with current_context().defer_cleanup(): + yield + + +profiling = require_context(driver.profiling) +profile_start = require_context(driver.profile_start) +profile_stop = require_context(driver.profile_stop) diff --git a/lib/python3.10/site-packages/numba/cuda/api_util.py b/lib/python3.10/site-packages/numba/cuda/api_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b8bffb7c108bde4335793599434bf1465360758f --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/api_util.py @@ -0,0 +1,30 @@ +import numpy as np + + +def prepare_shape_strides_dtype(shape, strides, dtype, order): + dtype = np.dtype(dtype) + if isinstance(shape, int): + shape = (shape,) + if isinstance(strides, int): + strides = (strides,) + else: + strides = strides or _fill_stride_by_order(shape, dtype, order) + return shape, strides, dtype + + +def _fill_stride_by_order(shape, dtype, order): + nd = len(shape) + if nd == 0: + return () + strides = [0] * nd + if order == 'C': + strides[-1] = dtype.itemsize + for d in reversed(range(nd - 1)): + strides[d] = strides[d + 1] * shape[d + 1] + elif order == 'F': + strides[0] = dtype.itemsize + for d in range(1, nd): + strides[d] = strides[d - 1] * shape[d - 1] + else: + raise ValueError('must be either C/F order') + return tuple(strides) diff --git a/lib/python3.10/site-packages/numba/cuda/args.py b/lib/python3.10/site-packages/numba/cuda/args.py new file mode 100644 index 0000000000000000000000000000000000000000..472bd0b873f46d0bbe8b091e7dc35ffa7ed6d077 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/args.py @@ -0,0 +1,77 @@ +""" +Hints to wrap Kernel arguments to indicate how to manage host-device +memory transfers before & after the kernel call. +""" +import abc + +from numba.core.typing.typeof import typeof, Purpose + + +class ArgHint(metaclass=abc.ABCMeta): + def __init__(self, value): + self.value = value + + @abc.abstractmethod + def to_device(self, retr, stream=0): + """ + :param stream: a stream to use when copying data + :param retr: + a list of clean-up work to do after the kernel's been run. + Append 0-arg lambdas to it! + :return: a value (usually an `DeviceNDArray`) to be passed to + the kernel + """ + pass + + @property + def _numba_type_(self): + return typeof(self.value, Purpose.argument) + + +class In(ArgHint): + def to_device(self, retr, stream=0): + from .cudadrv.devicearray import auto_device + devary, _ = auto_device( + self.value, + stream=stream) + # A dummy writeback functor to keep devary alive until the kernel + # is called. + retr.append(lambda: devary) + return devary + + +class Out(ArgHint): + def to_device(self, retr, stream=0): + from .cudadrv.devicearray import auto_device + devary, conv = auto_device( + self.value, + copy=False, + stream=stream) + if conv: + retr.append(lambda: devary.copy_to_host(self.value, stream=stream)) + return devary + + +class InOut(ArgHint): + def to_device(self, retr, stream=0): + from .cudadrv.devicearray import auto_device + devary, conv = auto_device( + self.value, + stream=stream) + if conv: + retr.append(lambda: devary.copy_to_host(self.value, stream=stream)) + return devary + + +def wrap_arg(value, default=InOut): + return value if isinstance(value, ArgHint) else default(value) + + +__all__ = [ + 'In', + 'Out', + 'InOut', + + 'ArgHint', + 'wrap_arg', +] diff --git a/lib/python3.10/site-packages/numba/cuda/cg.py b/lib/python3.10/site-packages/numba/cuda/cg.py new file mode 100644 index 0000000000000000000000000000000000000000..00d55704befe578fcc529586bc8696ca9261c6db --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cg.py @@ -0,0 +1,62 @@ +from numba.core import types +from numba.core.extending import overload, overload_method +from numba.core.typing import signature +from numba.cuda import nvvmutils +from numba.cuda.extending import intrinsic +from numba.cuda.types import grid_group, GridGroup as GridGroupClass + + +class GridGroup: + """A cooperative group representing the entire grid""" + + def sync() -> None: + """Synchronize this grid group""" + + +def this_grid() -> GridGroup: + """Get the current grid group.""" + return GridGroup() + + +@intrinsic +def _this_grid(typingctx): + sig = signature(grid_group) + + def codegen(context, builder, sig, args): + one = context.get_constant(types.int32, 1) + mod = builder.module + return builder.call( + nvvmutils.declare_cudaCGGetIntrinsicHandle(mod), + (one,)) + + return sig, codegen + + +@overload(this_grid, target='cuda') +def _ol_this_grid(): + def impl(): + return _this_grid() + + return impl + + +@intrinsic +def _grid_group_sync(typingctx, group): + sig = signature(types.int32, group) + + def codegen(context, builder, sig, args): + flags = context.get_constant(types.int32, 0) + mod = builder.module + return builder.call( + nvvmutils.declare_cudaCGSynchronize(mod), + (*args, flags)) + + return sig, codegen + + +@overload_method(GridGroupClass, 'sync', target='cuda') +def _ol_grid_group_sync(group): + def impl(group): + return _grid_group_sync(group) + + return impl diff --git a/lib/python3.10/site-packages/numba/cuda/codegen.py b/lib/python3.10/site-packages/numba/cuda/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..6009dcbdcc7d3a7c5aaf51b106126fa8fbb6696b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/codegen.py @@ -0,0 +1,378 @@ +from llvmlite import ir + +from numba.core import config, serialize +from numba.core.codegen import Codegen, CodeLibrary +from .cudadrv import devices, driver, nvvm, runtime +from numba.cuda.cudadrv.libs import get_cudalib + +import os +import subprocess +import tempfile + + +CUDA_TRIPLE = 'nvptx64-nvidia-cuda' + + +def run_nvdisasm(cubin, flags): + # nvdisasm only accepts input from a file, so we need to write out to a + # temp file and clean up afterwards. + fd = None + fname = None + try: + fd, fname = tempfile.mkstemp() + with open(fname, 'wb') as f: + f.write(cubin) + + try: + cp = subprocess.run(['nvdisasm', *flags, fname], check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except FileNotFoundError as e: + msg = ("nvdisasm has not been found. You may need " + "to install the CUDA toolkit and ensure that " + "it is available on your PATH.\n") + raise RuntimeError(msg) from e + return cp.stdout.decode('utf-8') + finally: + if fd is not None: + os.close(fd) + if fname is not None: + os.unlink(fname) + + +def disassemble_cubin(cubin): + # Request lineinfo in disassembly + flags = ['-gi'] + return run_nvdisasm(cubin, flags) + + +def disassemble_cubin_for_cfg(cubin): + # Request control flow graph in disassembly + flags = ['-cfg'] + return run_nvdisasm(cubin, flags) + + +class CUDACodeLibrary(serialize.ReduceMixin, CodeLibrary): + """ + The CUDACodeLibrary generates PTX, SASS, cubins for multiple different + compute capabilities. It also loads cubins to multiple devices (via + get_cufunc), which may be of different compute capabilities. + """ + + def __init__(self, codegen, name, entry_name=None, max_registers=None, + nvvm_options=None): + """ + codegen: + Codegen object. + name: + Name of the function in the source. + entry_name: + Name of the kernel function in the binary, if this is a global + kernel and not a device function. + max_registers: + The maximum register usage to aim for when linking. + nvvm_options: + Dict of options to pass to NVVM. + """ + super().__init__(codegen, name) + + # The llvmlite module for this library. + self._module = None + # CodeLibrary objects that will be "linked" into this library. The + # modules within them are compiled from NVVM IR to PTX along with the + # IR from this module - in that sense they are "linked" by NVVM at PTX + # generation time, rather than at link time. + self._linking_libraries = set() + # Files to link with the generated PTX. These are linked using the + # Driver API at link time. + self._linking_files = set() + # Should we link libcudadevrt? + self.needs_cudadevrt = False + + # Cache the LLVM IR string + self._llvm_strs = None + # Maps CC -> PTX string + self._ptx_cache = {} + # Maps CC -> LTO-IR + self._ltoir_cache = {} + # Maps CC -> cubin + self._cubin_cache = {} + # Maps CC -> linker info output for cubin + self._linkerinfo_cache = {} + # Maps Device numeric ID -> cufunc + self._cufunc_cache = {} + + self._max_registers = max_registers + if nvvm_options is None: + nvvm_options = {} + self._nvvm_options = nvvm_options + self._entry_name = entry_name + + @property + def llvm_strs(self): + if self._llvm_strs is None: + self._llvm_strs = [str(mod) for mod in self.modules] + return self._llvm_strs + + def get_llvm_str(self): + return "\n\n".join(self.llvm_strs) + + def _ensure_cc(self, cc): + if cc is not None: + return cc + + device = devices.get_context().device + return device.compute_capability + + def get_asm_str(self, cc=None): + cc = self._ensure_cc(cc) + + ptxes = self._ptx_cache.get(cc, None) + if ptxes: + return ptxes + + arch = nvvm.get_arch_option(*cc) + options = self._nvvm_options.copy() + options['arch'] = arch + + irs = self.llvm_strs + + ptx = nvvm.compile_ir(irs, **options) + + # Sometimes the result from NVVM contains trailing whitespace and + # nulls, which we strip so that the assembly dump looks a little + # tidier. + ptx = ptx.decode().strip('\x00').strip() + + if config.DUMP_ASSEMBLY: + print(("ASSEMBLY %s" % self._name).center(80, '-')) + print(ptx) + print('=' * 80) + + self._ptx_cache[cc] = ptx + + return ptx + + def get_ltoir(self, cc=None): + cc = self._ensure_cc(cc) + + ltoir = self._ltoir_cache.get(cc, None) + if ltoir is not None: + return ltoir + + arch = nvvm.get_arch_option(*cc) + options = self._nvvm_options.copy() + options['arch'] = arch + options['gen-lto'] = None + + irs = self.llvm_strs + ltoir = nvvm.compile_ir(irs, **options) + self._ltoir_cache[cc] = ltoir + + return ltoir + + def get_cubin(self, cc=None): + cc = self._ensure_cc(cc) + + cubin = self._cubin_cache.get(cc, None) + if cubin: + return cubin + + linker = driver.Linker.new(max_registers=self._max_registers, cc=cc) + + if linker.lto: + ltoir = self.get_ltoir(cc=cc) + linker.add_ltoir(ltoir) + else: + ptx = self.get_asm_str(cc=cc) + linker.add_ptx(ptx.encode()) + + for path in self._linking_files: + linker.add_file_guess_ext(path) + if self.needs_cudadevrt: + linker.add_file_guess_ext(get_cudalib('cudadevrt', static=True)) + + cubin = linker.complete() + self._cubin_cache[cc] = cubin + self._linkerinfo_cache[cc] = linker.info_log + + return cubin + + def get_cufunc(self): + if self._entry_name is None: + msg = "Missing entry_name - are you trying to get the cufunc " \ + "for a device function?" + raise RuntimeError(msg) + + ctx = devices.get_context() + device = ctx.device + + cufunc = self._cufunc_cache.get(device.id, None) + if cufunc: + return cufunc + + cubin = self.get_cubin(cc=device.compute_capability) + module = ctx.create_module_image(cubin) + + # Load + cufunc = module.get_function(self._entry_name) + + # Populate caches + self._cufunc_cache[device.id] = cufunc + + return cufunc + + def get_linkerinfo(self, cc): + try: + return self._linkerinfo_cache[cc] + except KeyError: + raise KeyError(f'No linkerinfo for CC {cc}') + + def get_sass(self, cc=None): + return disassemble_cubin(self.get_cubin(cc=cc)) + + def get_sass_cfg(self, cc=None): + return disassemble_cubin_for_cfg(self.get_cubin(cc=cc)) + + def add_ir_module(self, mod): + self._raise_if_finalized() + if self._module is not None: + raise RuntimeError('CUDACodeLibrary only supports one module') + self._module = mod + + def add_linking_library(self, library): + library._ensure_finalized() + + # We don't want to allow linking more libraries in after finalization + # because our linked libraries are modified by the finalization, and we + # won't be able to finalize again after adding new ones + self._raise_if_finalized() + + self._linking_libraries.add(library) + + def add_linking_file(self, filepath): + self._linking_files.add(filepath) + + def get_function(self, name): + for fn in self._module.functions: + if fn.name == name: + return fn + raise KeyError(f'Function {name} not found') + + @property + def modules(self): + return [self._module] + [mod for lib in self._linking_libraries + for mod in lib.modules] + + @property + def linking_libraries(self): + # Libraries we link to may link to other libraries, so we recursively + # traverse the linking libraries property to build up a list of all + # linked libraries. + libs = [] + for lib in self._linking_libraries: + libs.extend(lib.linking_libraries) + libs.append(lib) + return libs + + def finalize(self): + # Unlike the CPUCodeLibrary, we don't invoke the binding layer here - + # we only adjust the linkage of functions. Global kernels (with + # external linkage) have their linkage untouched. Device functions are + # set linkonce_odr to prevent them appearing in the PTX. + + self._raise_if_finalized() + + # Note in-place modification of the linkage of functions in linked + # libraries. This presently causes no issues as only device functions + # are shared across code libraries, so they would always need their + # linkage set to linkonce_odr. If in a future scenario some code + # libraries require linkonce_odr linkage of functions in linked + # modules, and another code library requires another linkage, each code + # library will need to take its own private copy of its linked modules. + # + # See also discussion on PR #890: + # https://github.com/numba/numba/pull/890 + for library in self._linking_libraries: + for mod in library.modules: + for fn in mod.functions: + if not fn.is_declaration: + fn.linkage = 'linkonce_odr' + + self._finalized = True + + def _reduce_states(self): + """ + Reduce the instance for serialization. We retain the PTX and cubins, + but loaded functions are discarded. They are recreated when needed + after deserialization. + """ + if self._linking_files: + msg = 'Cannot pickle CUDACodeLibrary with linking files' + raise RuntimeError(msg) + if not self._finalized: + raise RuntimeError('Cannot pickle unfinalized CUDACodeLibrary') + return dict( + codegen=None, + name=self.name, + entry_name=self._entry_name, + llvm_strs=self.llvm_strs, + ptx_cache=self._ptx_cache, + cubin_cache=self._cubin_cache, + linkerinfo_cache=self._linkerinfo_cache, + max_registers=self._max_registers, + nvvm_options=self._nvvm_options, + needs_cudadevrt=self.needs_cudadevrt + ) + + @classmethod + def _rebuild(cls, codegen, name, entry_name, llvm_strs, ptx_cache, + cubin_cache, linkerinfo_cache, max_registers, nvvm_options, + needs_cudadevrt): + """ + Rebuild an instance. + """ + instance = cls(codegen, name, entry_name=entry_name) + + instance._llvm_strs = llvm_strs + instance._ptx_cache = ptx_cache + instance._cubin_cache = cubin_cache + instance._linkerinfo_cache = linkerinfo_cache + + instance._max_registers = max_registers + instance._nvvm_options = nvvm_options + instance.needs_cudadevrt = needs_cudadevrt + + instance._finalized = True + + return instance + + +class JITCUDACodegen(Codegen): + """ + This codegen implementation for CUDA only generates optimized LLVM IR. + Generation of PTX code is done separately (see numba.cuda.compiler). + """ + + _library_class = CUDACodeLibrary + + def __init__(self, module_name): + pass + + def _create_empty_module(self, name): + ir_module = ir.Module(name) + ir_module.triple = CUDA_TRIPLE + ir_module.data_layout = nvvm.NVVM().data_layout + nvvm.add_ir_version(ir_module) + return ir_module + + def _add_module(self, module): + pass + + def magic_tuple(self): + """ + Return a tuple unambiguously describing the codegen behaviour. + """ + ctx = devices.get_context() + cc = ctx.device.compute_capability + return (runtime.runtime.get_version(), cc) diff --git a/lib/python3.10/site-packages/numba/cuda/compiler.py b/lib/python3.10/site-packages/numba/cuda/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..e2bcb08919eddff275b89e03a19fee4b817fe0e2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/compiler.py @@ -0,0 +1,422 @@ +from llvmlite import ir +from numba.core.typing.templates import ConcreteTemplate +from numba.core import types, typing, funcdesc, config, compiler, sigutils +from numba.core.compiler import (sanitize_compile_result_entries, CompilerBase, + DefaultPassBuilder, Flags, Option, + CompileResult) +from numba.core.compiler_lock import global_compiler_lock +from numba.core.compiler_machinery import (LoweringPass, + PassManager, register_pass) +from numba.core.errors import NumbaInvalidConfigWarning +from numba.core.typed_passes import (IRLegalization, NativeLowering, + AnnotateTypes) +from warnings import warn +from numba.cuda.api import get_current_device +from numba.cuda.target import CUDACABICallConv + + +def _nvvm_options_type(x): + if x is None: + return None + + else: + assert isinstance(x, dict) + return x + + +class CUDAFlags(Flags): + nvvm_options = Option( + type=_nvvm_options_type, + default=None, + doc="NVVM options", + ) + compute_capability = Option( + type=tuple, + default=None, + doc="Compute Capability", + ) + + +# The CUDACompileResult (CCR) has a specially-defined entry point equal to its +# id. This is because the entry point is used as a key into a dict of +# overloads by the base dispatcher. The id of the CCR is the only small and +# unique property of a CompileResult in the CUDA target (cf. the CPU target, +# which uses its entry_point, which is a pointer value). +# +# This does feel a little hackish, and there are two ways in which this could +# be improved: +# +# 1. We could change the core of Numba so that each CompileResult has its own +# unique ID that can be used as a key - e.g. a count, similar to the way in +# which types have unique counts. +# 2. At some future time when kernel launch uses a compiled function, the entry +# point will no longer need to be a synthetic value, but will instead be a +# pointer to the compiled function as in the CPU target. + +class CUDACompileResult(CompileResult): + @property + def entry_point(self): + return id(self) + + +def cuda_compile_result(**entries): + entries = sanitize_compile_result_entries(entries) + return CUDACompileResult(**entries) + + +@register_pass(mutates_CFG=True, analysis_only=False) +class CUDABackend(LoweringPass): + + _name = "cuda_backend" + + def __init__(self): + LoweringPass.__init__(self) + + def run_pass(self, state): + """ + Back-end: Packages lowering output in a compile result + """ + lowered = state['cr'] + signature = typing.signature(state.return_type, *state.args) + + state.cr = cuda_compile_result( + typing_context=state.typingctx, + target_context=state.targetctx, + typing_error=state.status.fail_reason, + type_annotation=state.type_annotation, + library=state.library, + call_helper=lowered.call_helper, + signature=signature, + fndesc=lowered.fndesc, + ) + return True + + +@register_pass(mutates_CFG=False, analysis_only=False) +class CreateLibrary(LoweringPass): + """ + Create a CUDACodeLibrary for the NativeLowering pass to populate. The + NativeLowering pass will create a code library if none exists, but we need + to set it up with nvvm_options from the flags if they are present. + """ + + _name = "create_library" + + def __init__(self): + LoweringPass.__init__(self) + + def run_pass(self, state): + codegen = state.targetctx.codegen() + name = state.func_id.func_qualname + nvvm_options = state.flags.nvvm_options + state.library = codegen.create_library(name, nvvm_options=nvvm_options) + # Enable object caching upfront so that the library can be serialized. + state.library.enable_object_caching() + + return True + + +class CUDACompiler(CompilerBase): + def define_pipelines(self): + dpb = DefaultPassBuilder + pm = PassManager('cuda') + + untyped_passes = dpb.define_untyped_pipeline(self.state) + pm.passes.extend(untyped_passes.passes) + + typed_passes = dpb.define_typed_pipeline(self.state) + pm.passes.extend(typed_passes.passes) + + lowering_passes = self.define_cuda_lowering_pipeline(self.state) + pm.passes.extend(lowering_passes.passes) + + pm.finalize() + return [pm] + + def define_cuda_lowering_pipeline(self, state): + pm = PassManager('cuda_lowering') + # legalise + pm.add_pass(IRLegalization, + "ensure IR is legal prior to lowering") + pm.add_pass(AnnotateTypes, "annotate types") + + # lower + pm.add_pass(CreateLibrary, "create library") + pm.add_pass(NativeLowering, "native lowering") + pm.add_pass(CUDABackend, "cuda backend") + + pm.finalize() + return pm + + +@global_compiler_lock +def compile_cuda(pyfunc, return_type, args, debug=False, lineinfo=False, + inline=False, fastmath=False, nvvm_options=None, + cc=None): + if cc is None: + raise ValueError('Compute Capability must be supplied') + + from .descriptor import cuda_target + typingctx = cuda_target.typing_context + targetctx = cuda_target.target_context + + flags = CUDAFlags() + # Do not compile (generate native code), just lower (to LLVM) + flags.no_compile = True + flags.no_cpython_wrapper = True + flags.no_cfunc_wrapper = True + + # Both debug and lineinfo turn on debug information in the compiled code, + # but we keep them separate arguments in case we later want to overload + # some other behavior on the debug flag. In particular, -opt=3 is not + # supported with debug enabled, and enabling only lineinfo should not + # affect the error model. + if debug or lineinfo: + flags.debuginfo = True + + if lineinfo: + flags.dbg_directives_only = True + + if debug: + flags.error_model = 'python' + else: + flags.error_model = 'numpy' + + if inline: + flags.forceinline = True + if fastmath: + flags.fastmath = True + if nvvm_options: + flags.nvvm_options = nvvm_options + flags.compute_capability = cc + + # Run compilation pipeline + from numba.core.target_extension import target_override + with target_override('cuda'): + cres = compiler.compile_extra(typingctx=typingctx, + targetctx=targetctx, + func=pyfunc, + args=args, + return_type=return_type, + flags=flags, + locals={}, + pipeline_class=CUDACompiler) + + library = cres.library + library.finalize() + + return cres + + +def cabi_wrap_function(context, lib, fndesc, wrapper_function_name, + nvvm_options): + """ + Wrap a Numba ABI function in a C ABI wrapper at the NVVM IR level. + + The C ABI wrapper will have the same name as the source Python function. + """ + # The wrapper will be contained in a new library that links to the wrapped + # function's library + library = lib.codegen.create_library(f'{lib.name}_function_', + entry_name=wrapper_function_name, + nvvm_options=nvvm_options) + library.add_linking_library(lib) + + # Determine the caller (C ABI) and wrapper (Numba ABI) function types + argtypes = fndesc.argtypes + restype = fndesc.restype + c_call_conv = CUDACABICallConv(context) + wrapfnty = c_call_conv.get_function_type(restype, argtypes) + fnty = context.call_conv.get_function_type(fndesc.restype, argtypes) + + # Create a new module and declare the callee + wrapper_module = context.create_module("cuda.cabi.wrapper") + func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name) + + # Define the caller - populate it with a call to the callee and return + # its return value + + wrapfn = ir.Function(wrapper_module, wrapfnty, wrapper_function_name) + builder = ir.IRBuilder(wrapfn.append_basic_block('')) + + arginfo = context.get_arg_packer(argtypes) + callargs = arginfo.from_arguments(builder, wrapfn.args) + # We get (status, return_value), but we ignore the status since we + # can't propagate it through the C ABI anyway + _, return_value = context.call_conv.call_function( + builder, func, restype, argtypes, callargs) + builder.ret(return_value) + + library.add_ir_module(wrapper_module) + library.finalize() + return library + + +@global_compiler_lock +def compile(pyfunc, sig, debug=False, lineinfo=False, device=True, + fastmath=False, cc=None, opt=True, abi="c", abi_info=None, + output='ptx'): + """Compile a Python function to PTX or LTO-IR for a given set of argument + types. + + :param pyfunc: The Python function to compile. + :param sig: The signature representing the function's input and output + types. If this is a tuple of argument types without a return + type, the inferred return type is returned by this function. If + a signature including a return type is passed, the compiled code + will include a cast from the inferred return type to the + specified return type, and this function will return the + specified return type. + :param debug: Whether to include debug info in the compiled code. + :type debug: bool + :param lineinfo: Whether to include a line mapping from the compiled code + to the source code. Usually this is used with optimized + code (since debug mode would automatically include this), + so we want debug info in the LLVM IR but only the line + mapping in the final output. + :type lineinfo: bool + :param device: Whether to compile a device function. + :type device: bool + :param fastmath: Whether to enable fast math flags (ftz=1, prec_sqrt=0, + prec_div=, and fma=1) + :type fastmath: bool + :param cc: Compute capability to compile for, as a tuple + ``(MAJOR, MINOR)``. Defaults to ``(5, 0)``. + :type cc: tuple + :param opt: Enable optimizations. Defaults to ``True``. + :type opt: bool + :param abi: The ABI for a compiled function - either ``"numba"`` or + ``"c"``. Note that the Numba ABI is not considered stable. + The C ABI is only supported for device functions at present. + :type abi: str + :param abi_info: A dict of ABI-specific options. The ``"c"`` ABI supports + one option, ``"abi_name"``, for providing the wrapper + function's name. The ``"numba"`` ABI has no options. + :type abi_info: dict + :param output: Type of output to generate, either ``"ptx"`` or ``"ltoir"``. + :type output: str + :return: (code, resty): The compiled code and inferred return type + :rtype: tuple + """ + if abi not in ("numba", "c"): + raise NotImplementedError(f'Unsupported ABI: {abi}') + + if abi == 'c' and not device: + raise NotImplementedError('The C ABI is not supported for kernels') + + if output not in ("ptx", "ltoir"): + raise NotImplementedError(f'Unsupported output type: {output}') + + if debug and opt: + msg = ("debug=True with opt=True (the default) " + "is not supported by CUDA. This may result in a crash" + " - set debug=False or opt=False.") + warn(NumbaInvalidConfigWarning(msg)) + + lto = (output == 'ltoir') + abi_info = abi_info or dict() + + nvvm_options = { + 'fastmath': fastmath, + 'opt': 3 if opt else 0 + } + + if lto: + nvvm_options['gen-lto'] = None + + args, return_type = sigutils.normalize_signature(sig) + + cc = cc or config.CUDA_DEFAULT_PTX_CC + cres = compile_cuda(pyfunc, return_type, args, debug=debug, + lineinfo=lineinfo, fastmath=fastmath, + nvvm_options=nvvm_options, cc=cc) + resty = cres.signature.return_type + + if resty and not device and resty != types.void: + raise TypeError("CUDA kernel must have void return type.") + + tgt = cres.target_context + + if device: + lib = cres.library + if abi == "c": + wrapper_name = abi_info.get('abi_name', pyfunc.__name__) + lib = cabi_wrap_function(tgt, lib, cres.fndesc, wrapper_name, + nvvm_options) + else: + code = pyfunc.__code__ + filename = code.co_filename + linenum = code.co_firstlineno + + lib, kernel = tgt.prepare_cuda_kernel(cres.library, cres.fndesc, debug, + lineinfo, nvvm_options, filename, + linenum) + + if lto: + code = lib.get_ltoir(cc=cc) + else: + code = lib.get_asm_str(cc=cc) + return code, resty + + +def compile_for_current_device(pyfunc, sig, debug=False, lineinfo=False, + device=True, fastmath=False, opt=True, + abi="c", abi_info=None, output='ptx'): + """Compile a Python function to PTX or LTO-IR for a given signature for the + current device's compute capabilility. This calls :func:`compile` with an + appropriate ``cc`` value for the current device.""" + cc = get_current_device().compute_capability + return compile(pyfunc, sig, debug=debug, lineinfo=lineinfo, device=device, + fastmath=fastmath, cc=cc, opt=opt, abi=abi, + abi_info=abi_info, output=output) + + +def compile_ptx(pyfunc, sig, debug=False, lineinfo=False, device=False, + fastmath=False, cc=None, opt=True, abi="numba", abi_info=None): + """Compile a Python function to PTX for a given signature. See + :func:`compile`. The defaults for this function are to compile a kernel + with the Numba ABI, rather than :func:`compile`'s default of compiling a + device function with the C ABI.""" + return compile(pyfunc, sig, debug=debug, lineinfo=lineinfo, device=device, + fastmath=fastmath, cc=cc, opt=opt, abi=abi, + abi_info=abi_info, output='ptx') + + +def compile_ptx_for_current_device(pyfunc, sig, debug=False, lineinfo=False, + device=False, fastmath=False, opt=True, + abi="numba", abi_info=None): + """Compile a Python function to PTX for a given signature for the current + device's compute capabilility. See :func:`compile_ptx`.""" + cc = get_current_device().compute_capability + return compile_ptx(pyfunc, sig, debug=debug, lineinfo=lineinfo, + device=device, fastmath=fastmath, cc=cc, opt=opt, + abi=abi, abi_info=abi_info) + + +def declare_device_function(name, restype, argtypes): + return declare_device_function_template(name, restype, argtypes).key + + +def declare_device_function_template(name, restype, argtypes): + from .descriptor import cuda_target + typingctx = cuda_target.typing_context + targetctx = cuda_target.target_context + sig = typing.signature(restype, *argtypes) + extfn = ExternFunction(name, sig) + + class device_function_template(ConcreteTemplate): + key = extfn + cases = [sig] + + fndesc = funcdesc.ExternalFunctionDescriptor( + name=name, restype=restype, argtypes=argtypes) + typingctx.insert_user_function(extfn, device_function_template) + targetctx.insert_user_function(extfn, fndesc) + + return device_function_template + + +class ExternFunction(object): + def __init__(self, name, sig): + self.name = name + self.sig = sig diff --git a/lib/python3.10/site-packages/numba/cuda/cpp_function_wrappers.cu b/lib/python3.10/site-packages/numba/cuda/cpp_function_wrappers.cu new file mode 100644 index 0000000000000000000000000000000000000000..a2cd1e054c40ebce33016fff1bcd9d869be9f07c --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cpp_function_wrappers.cu @@ -0,0 +1,47 @@ +#include "cuda_fp16.h" + +#define FNDEF(fname) __numba_wrapper_ ## fname + +#define UNARY_FUNCTION(fname) extern "C" __device__ int\ + FNDEF(fname)( \ + short* return_value,\ + short x\ +)\ +{\ + __half retval = fname(__short_as_half (x));\ +\ + *return_value = __half_as_short (retval);\ + /* Signal that no Python exception occurred */ \ + return 0;\ +}\ + +extern "C" __device__ int +FNDEF(hdiv)( + short* return_value, + short x, + short y +) +{ + __half retval = __hdiv(__short_as_half (x), __short_as_half (y)); + + *return_value = __half_as_short (retval); + // Signal that no Python exception occurred + return 0; +} + +UNARY_FUNCTION(hsin) +UNARY_FUNCTION(hcos) +UNARY_FUNCTION(hlog) +UNARY_FUNCTION(hlog10) +UNARY_FUNCTION(hlog2) +UNARY_FUNCTION(hexp) +UNARY_FUNCTION(hexp10) +UNARY_FUNCTION(hexp2) +UNARY_FUNCTION(hsqrt) +UNARY_FUNCTION(hrsqrt) +UNARY_FUNCTION(hfloor) +UNARY_FUNCTION(hceil) +UNARY_FUNCTION(hrcp) +UNARY_FUNCTION(hrint) +UNARY_FUNCTION(htrunc) + diff --git a/lib/python3.10/site-packages/numba/cuda/cuda_fp16.h b/lib/python3.10/site-packages/numba/cuda/cuda_fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..3001595e9a4cbe2d20a3c81ffe00bd981e82c9b6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cuda_fp16.h @@ -0,0 +1,3631 @@ +/* +* Copyright 1993-2021 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +/** +* \defgroup CUDA_MATH_INTRINSIC_HALF Half Precision Intrinsics +* This section describes half precision intrinsic functions that are +* only supported in device code. +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_ARITHMETIC Half Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_ARITHMETIC Half2 Arithmetic Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_COMPARISON Half Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_COMPARISON Half2 Comparison Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_MISC Half Precision Conversion and Data Movement +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF_FUNCTIONS Half Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +/** +* \defgroup CUDA_MATH__HALF2_FUNCTIONS Half2 Math Functions +* \ingroup CUDA_MATH_INTRINSIC_HALF +* To use these functions, include the header file \p cuda_fp16.h in your program. +*/ + +#ifndef __CUDA_FP16_H__ +#define __CUDA_FP16_H__ + +#if defined(__cplusplus) +#if defined(__CUDACC__) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__CUDACC__) */ + +#define __CUDA_FP16_TYPES_EXIST__ + +/* Forward-declaration of structures defined in "cuda_fp16.hpp" */ + +/** + * \brief half datatype + * + * \details This structure implements the datatype for storing + * half-precision floating-point numbers. The structure implements + * assignment operators and type conversions. + * 16 bits are being used in total: 1 sign bit, 5 bits for the exponent, + * and the significand is being stored in 10 bits. + * The total precision is 11 bits. There are 15361 representable + * numbers within the interval [0.0, 1.0], endpoints included. + * On average we have log10(2**11) ~ 3.311 decimal digits. + * + * \internal + * \req IEEE 754-2008 compliant implementation of half-precision + * floating-point numbers. + * \endinternal + */ +struct __half; + +/** + * \brief half2 datatype + * + * \details This structure implements the datatype for storing two + * half-precision floating-point numbers. + * The structure implements assignment operators and type conversions. + * + * \internal + * \req Vectorified version of half. + * \endinternal + */ +struct __half2; + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts double number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts double number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - double. Is only being read. +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-to-nearest-even mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-to-nearest-even mode. +* \param[in] a - float. Is only being read. +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-towards-zero mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-towards-zero mode. +* \param[in] a - float. Is only being read. +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-down mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-down mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts float number to half precision in round-up mode +* and returns \p half with converted value. +* +* \details Converts float number \p a to half precision in round-up mode. +* \param[in] a - float. Is only being read. +* +* \returns half +* \retval a converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts \p half number to float. +* +* \details Converts half number \p a to float. +* \param[in] a - float. Is only being read. +* +* \returns float +* \retval a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts input to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* +* \details Converts input \p a to half precision in round-to-nearest-even mode and +* populates both halves of \p half2 with converted value. +* \param[in] a - float. Is only being read. +* +* \returns half2 +* \retval The \p half2 value with both halves equal to the converted half +* precision number. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both input floats to half precision in round-to-nearest-even +* mode and returns \p half2 with converted values. +* +* \details Converts both input floats to half precision in round-to-nearest-even mode +* and combines the results into one \p half2 number. Low 16 bits of the return +* value correspond to the input \p a, high 16 bits correspond to the input \p +* b. +* \param[in] a - float. Is only being read. +* \param[in] b - float. Is only being read. +* +* \returns half2 +* \retval The \p half2 value with corresponding halves equal to the +* converted input floats. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts low 16 bits of \p half2 to float and returns the result +* +* \details Converts low 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* \retval The low 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts high 16 bits of \p half2 to float and returns the result +* +* \details Converts high 16 bits of \p half2 input \p a to 32-bit floating-point number +* and returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns float +* \retval The high 16 bits of \p a converted to float. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a); + +#if defined(__CUDACC__) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both components of float2 number to half precision in +* round-to-nearest-even mode and returns \p half2 with converted values. +* +* \details Converts both components of float2 to half precision in round-to-nearest +* mode and combines the results into one \p half2 number. Low 16 bits of the +* return value correspond to \p a.x and high 16 bits of the return value +* correspond to \p a.y. +* \param[in] a - float2. Is only being read. +* +* \returns half2 +* \retval The \p half2 which has corresponding halves equal to the +* converted float2 components. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Converts both halves of \p half2 to float2 and returns the result. +* +* \details Converts both halves of \p half2 input \p a to float2 and returns the +* result. +* \param[in] a - half2. Is only being read. +* +* \returns float2 +* \retval a converted to float2. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns int +* \retval h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns int +* \retval h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns int +* \retval h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed integer in +* round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns int +* \retval h converted to a signed integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __half2int_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-to-nearest-even mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-towards-zero mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rz(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-down mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_rd(const int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed integer to a half in round-up mode. +* +* \details Convert the signed integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __int2half_ru(const int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns short int +* \retval h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns short int +* \retval h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns short int +* \retval h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed short +* integer in round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns short int +* \retval h converted to a signed short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-towards-zero mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-down mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed short integer to a half in round-up mode. +* +* \details Convert the signed short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-to-nearest-even mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* \retval h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* \retval h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* \retval h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned integer +* in round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned int +* \retval h converted to an unsigned integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-to-nearest-even mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-towards-zero mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-down mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned integer to a half in round-up mode. +* +* \details Convert the unsigned integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* \retval h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* \retval h converted to an unsigned short integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* \retval h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned short integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned short +* integer in round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* \retval h converted to an unsigned short integer. +*/ +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-down mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned short integer to a half in round-up mode. +* +* \details Convert the unsigned short integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* \retval h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-towards-zero +* mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* \retval h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* \retval h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to an unsigned 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to an unsigned 64-bit +* integer in round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns unsigned long long int +* \retval h converted to an unsigned 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-towards-zero +* mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-down mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert an unsigned 64-bit integer to a half in round-up mode. +* +* \details Convert the unsigned 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - unsigned long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-to-nearest-even +* mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-to-nearest-even mode. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* \retval h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-towards-zero mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-towards-zero mode. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* \retval h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-down mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-down mode. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* \retval h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a half to a signed 64-bit integer in round-up mode. +* +* \details Convert the half-precision floating-point value \p h to a signed 64-bit +* integer in round-up mode. +* \param[in] h - half. Is only being read. +* +* \returns long long int +* \retval h converted to a signed 64-bit integer. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-to-nearest-even +* mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-to-nearest-even mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-towards-zero mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-towards-zero mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +*/ +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-down mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-down mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Convert a signed 64-bit integer to a half in round-up mode. +* +* \details Convert the signed 64-bit integer value \p i to a half-precision floating-point +* value in round-up mode. +* \param[in] i - long long int. Is only being read. +* +* \returns half +* \retval i converted to half. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i); + +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Truncate input argument to the integral part. +* +* \details Round \p h to the nearest integer value that does not exceed \p h in +* magnitude. +* \param[in] h - half. Is only being read. +* +* \returns half +* \retval The truncated integer value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half htrunc(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate ceiling of the input argument. +* +* \details Compute the smallest integer value not less than \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* \retval The smallest integer value not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hceil(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details Calculate the largest integer value which is less than or equal to \p h. +* \param[in] h - half. Is only being read. +* +* \returns half +* \retval The largest integer value which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hfloor(const __half h); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round \p h to the nearest integer value in half-precision floating-point +* format, with halfway cases rounded to the nearest even integer value. +* \param[in] h - half. Is only being read. +* +* \returns half +* \retval The nearest integer to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrint(const __half h); + +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Truncate \p half2 vector input argument to the integral part. +* +* \details Round each component of vector \p h to the nearest integer value that does +* not exceed \p h in magnitude. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* \retval The truncated \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate \p half2 vector ceiling of the input argument. +* +* \details For each component of vector \p h compute the smallest integer value not less +* than \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* \retval The vector of smallest integers not less than \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculate the largest integer less than or equal to \p h. +* +* \details For each component of vector \p h calculate the largest integer value which +* is less than or equal to \p h. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* \retval The vector of largest integers which is less than or equal to \p h. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Round input to nearest integer value in half-precision floating-point +* number. +* +* \details Round each component of \p half2 vector \p h to the nearest integer value in +* half-precision floating-point format, with halfway cases rounded to the +* nearest even integer value. +* \param[in] h - half2. Is only being read. +* +* \returns half2 +* \retval The vector of rounded integer values. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns \p half2 with both halves equal to the input value. +* +* \details Returns \p half2 number with both halves equal to the input \p a \p half +* number. +* \param[in] a - half. Is only being read. +* +* \returns half2 +* \retval The vector which has both its halves equal to the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Swaps both halves of the \p half2 input. +* +* \details Swaps both halves of the \p half2 input and returns a new \p half2 number +* with swapped halves. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval a with its halves being swapped. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from each of the two \p half2 inputs and combines +* into one \p half2 number. +* +* \details Extracts low 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. Low 16 bits from input \p a is stored in low 16 bits of +* the return value, low 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The low 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from each of the two \p half2 inputs and +* combines into one \p half2 number. +* +* \details Extracts high 16 bits from each of the two \p half2 inputs and combines into +* one \p half2 number. High 16 bits from input \p a is stored in low 16 bits of +* the return value, high 16 bits from input \p b is stored in high 16 bits of +* the return value. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The high 16 bits of \p a and of \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns high 16 bits of \p half2 input. +* +* \details Returns high 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* \retval The high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __high2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Returns low 16 bits of \p half2 input. +* +* \details Returns low 16 bits of \p half2 input \p a. +* \param[in] a - half2. Is only being read. +* +* \returns half +* \retval Returns \p half which contains low 16 bits of the input \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __low2half(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Checks if the input \p half number is infinite. +* +* \details Checks if the input \p half number \p a is infinite. +* \param[in] a - half. Is only being read. +* +* \returns int +* \retval -1 iff \p a is equal to negative infinity, +* \retval 1 iff \p a is equal to positive infinity, +* \retval 0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ int __hisinf(const __half a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Combines two \p half numbers into one \p half2 number. +* +* \details Combines two input \p half number \p a and \p b into one \p half2 number. +* Input \p a is stored in low 16 bits of the return value, input \p b is stored +* in high 16 bits of the return value. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half2 +* \retval The half2 with one half equal to \p a and the other to \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts low 16 bits from \p half2 input. +* +* \details Extracts low 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The half2 with both halves equal to the low 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Extracts high 16 bits from \p half2 input. +* +* \details Extracts high 16 bits from \p half2 input \p a and returns a new \p half2 +* number which has both halves equal to the extracted bits. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The half2 with both halves equal to the high 16 bits of the input. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a); + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as a signed short integer. +* +* \details Reinterprets the bits in the half-precision floating-point number \p h +* as a signed short integer. +* \param[in] h - half. Is only being read. +* +* \returns short int +* \retval The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ short int __half_as_short(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a \p half as an unsigned short integer. +* +* \details Reinterprets the bits in the half-precision floating-point \p h +* as an unsigned short number. +* \param[in] h - half. Is only being read. +* +* \returns unsigned short int +* \retval The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in a signed short integer as a \p half. +* +* \details Reinterprets the bits in the signed short integer \p i as a +* half-precision floating-point number. +* \param[in] i - short int. Is only being read. +* +* \returns half +* \retval The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __short_as_half(const short int i); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Reinterprets bits in an unsigned short integer as a \p half. +* +* \details Reinterprets the bits in the unsigned short integer \p i as a +* half-precision floating-point number. +* \param[in] i - unsigned short int. Is only being read. +* +* \returns half +* \retval The reinterpreted value. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i); + +#if __CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__) +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." + +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half2 __shfl(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down))__half2 __shfl_down(const __half2 var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half2 __shfl_xor(const __half2 var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) __half __shfl(const __half var, const int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) __half __shfl_up(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) __half __shfl_down(const __half var, const unsigned int delta, const int width = warpSize); +__CUDA_FP16_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) __half __shfl_xor(const __half var, const int delta, const int width = warpSize); +#endif + +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half2. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 4-byte word referenced by var from the source thread ID as half2. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Direct copy from indexed thread. +* +* \details Returns the value of var held by the thread whose ID is given by delta. +* If width is less than warpSize then each subsection of the warp behaves as a separate +* entity with a starting logical thread ID of 0. If delta is outside the range [0:width-1], +* the value returned corresponds to the value of var held by the delta modulo width (i.e. +* within the same subsection). width must have a value which is a power of 2; +* results are undefined if width is not a power of 2, or is a number greater than +* warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with lower ID relative to the caller. +* \details Calculates a source thread ID by subtracting delta from the caller's lane ID. +* The value of var held by the resulting lane ID is returned: in effect, var is shifted up +* the warp by delta threads. If width is less than warpSize then each subsection of the warp +* behaves as a separate entity with a starting logical thread ID of 0. The source thread index +* will not wrap around the value of width, so effectively the lower delta threads will be unchanged. +* width must have a value which is a power of 2; results are undefined if width is not a power of 2, +* or is a number greater than warpSize. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread with higher ID relative to the caller. +* +* \details Calculates a source thread ID by adding delta to the caller's thread ID. +* The value of var held by the resulting thread ID is returned: this has the effect +* of shifting var down the warp by delta threads. If width is less than warpSize then +* each subsection of the warp behaves as a separate entity with a starting logical +* thread ID of 0. As for __shfl_up_sync(), the ID number of the source thread +* will not wrap around the value of width and so the upper delta threads +* will remain unchanged. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width = warpSize); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Exchange a variable between threads within a warp. Copy from a thread based on bitwise XOR of own thread ID. +* +* \details Calculates a source thread ID by performing a bitwise XOR of the caller's thread ID with mask: +* the value of var held by the resulting thread ID is returned. If width is less than warpSize then each +* group of width consecutive threads are able to access elements from earlier groups of threads, +* however if they attempt to access elements from later groups of threads their own value of var +* will be returned. This mode implements a butterfly addressing pattern such as is used in tree +* reduction and broadcast. +* \param[in] mask - unsigned int. Is only being read. +* \param[in] var - half. Is only being read. +* \param[in] delta - int. Is only being read. +* \param[in] width - int. Is only being read. +* +* \returns Returns the 2-byte word referenced by var from the source thread ID as half. +* If the source thread ID is out of range or the source thread has exited, the calling thread's own var is returned. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior not reentrant, not thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width = warpSize); + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif +#endif /*__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__) */ + +#if defined(__cplusplus) && ( __CUDA_ARCH__ >=320 || !defined(__CUDA_ARCH__) ) +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.nc` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cg` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.ca` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cs` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.lu` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `ld.global.cv` load instruction. +* \param[in] ptr - memory location +* \returns The value pointed by `ptr` +*/ +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wb` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cg` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.cs` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value); +/** +* \ingroup CUDA_MATH__HALF_MISC +* \brief Generates a `st.global.wt` store instruction. +* \param[out] ptr - memory location +* \param[in] value - the value to be stored +*/ +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value); +#endif /*defined(__cplusplus) && ( __CUDA_ARCH__ >=320 || !defined(__CUDA_ARCH__) )*/ + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs half2 vector if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The \p half2 result of less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The half2 vector result of less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of unordered if-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of unordered not-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison. +* +* Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of unordered less-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The \p half2 vector result of unordered greater-equal comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The vector result of unordered less-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The corresponding \p half results are set to 1.0 for true, or 0.0 for false. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The \p half2 vector result of unordered greater-than comparison of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Determine whether \p half2 argument is a NaN. +* +* \details Determine whether each half of input \p half2 number \p a is a NaN. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The half2 with the corresponding \p half results set to +* 1.0 for NaN, 0.0 otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-95 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The sum of vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-104 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The subtraction of vector \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-102 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise multiplying the vectors \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector division in round-to-nearest-even mode. +* +* \details Divides \p half2 input vector \p a by input vector \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-103 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise division of \p a with \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* +* \details Calculates the absolute value of both halves of the input \p half2 number and +* returns the result. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval Returns \p a with the absolute value of both halves. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector add of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the results to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector subtraction in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Subtracts \p half2 input vector \p b from input vector \p a in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The subtraction of vector \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector multiplication in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiplication of inputs \p a and \p b, in +* round-to-nearest-even mode, and clamps the results to range [0.0, 1.0]. NaN +* results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise multiplication of vectors \p a and \p b, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-105 +* \endinternal +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode, with saturation to [0.0, 1.0]. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the +* results to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c, +* with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Negates both halves of the input \p half2 number and returns the +* result. +* +* \details Negates both halves of the input \p half2 number \p a and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-101 +* \endinternal +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval Returns \p a with both halves negated. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Calculates the absolute value of input \p half number and returns the result. +* +* \details Calculates the absolute value of input \p half number and returns the result. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The absolute value of a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __habs(const __half a); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode. +* +* \details Performs \p half addition of inputs \p a and \p b, in round-to-nearest-even +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-94 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The sum of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-97 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The result of subtracting \p b from \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-99 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The result of multiplying \p a and \p b. +*/ +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half division in round-to-nearest-even mode. +* +* \details Divides \p half input \p a by input \p b in round-to-nearest +* mode. +* \internal +* \req DEEPLEARN-SRM_REQ-98 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The result of dividing \p a by \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half addition in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half add of inputs \p a and \p b, in round-to-nearest-even mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The sum of \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half subtraction in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Subtracts \p half input \p b from input \p a in round-to-nearest +* mode, +* and clamps the result to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The result of subtraction of \p b from \p a, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half multiplication in round-to-nearest-even mode, with +* saturation to [0.0, 1.0]. +* +* \details Performs \p half multiplication of inputs \p a and \p b, in round-to-nearest +* mode, and clamps the result to range [0.0, 1.0]. NaN results are flushed to +* +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \retval The result of multiplying \p a and \p b, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* \internal +* \req DEEPLEARN-SRM_REQ-96 +* \endinternal +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* \retval The result of fused multiply-add operation on \p +* a, \p b, and \p c. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode, +* with saturation to [0.0, 1.0]. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode, and clamps the result +* to range [0.0, 1.0]. NaN results are flushed to +0.0. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* \retval The result of fused multiply-add operation on \p +* a, \p b, and \p c, with respect to saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Negates input \p half number and returns the result. +* +* \details Negates input \p half number and returns the result. +* \internal +* \req DEEPLEARN-SRM_REQ-100 +* \endinternal +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval minus a +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hneg(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector if-equal comparison and returns boolean true +* iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of if-equal comparison +* of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector not-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of not-equal comparison +* of vectors \p a and \p b are true, +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of less-equal comparison +* of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-equal comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of greater-equal +* comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector less-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of less-than comparison +* of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector greater-than comparison and returns boolean +* true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate false results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of greater-than +* comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered if-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector if-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half if-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered if-equal +* comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered not-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector not-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half not-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered not-equal +* comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-equal comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered less-equal +* comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-equal comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-equal comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-equal comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered +* greater-equal comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered less-than comparison and returns +* boolean true iff both \p half results are true, boolean false otherwise. +* +* \details Performs \p half2 vector less-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half less-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered less-than comparison of +* vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Performs \p half2 vector unordered greater-than comparison and +* returns boolean true iff both \p half results are true, boolean false +* otherwise. +* +* \details Performs \p half2 vector greater-than comparison of inputs \p a and \p b. +* The bool result is set to true only if both \p half greater-than comparisons +* evaluate to true, or false otherwise. +* NaN inputs generate true results. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns bool +* \retval true if both \p half results of unordered +* greater-than comparison of vectors \p a and \p b are true; +* \retval false otherwise. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of if-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of not-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of less-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of greater-equal comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of less-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate false results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of greater-than comparison of \p a and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered if-equal comparison. +* +* \details Performs \p half if-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered if-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered not-equal comparison. +* +* \details Performs \p half not-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered not-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-equal comparison. +* +* \details Performs \p half less-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered less-equal comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-equal comparison. +* +* \details Performs \p half greater-equal comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered greater-equal comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered less-than comparison. +* +* \details Performs \p half less-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered less-than comparison of \p a and +* \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Performs \p half unordered greater-than comparison. +* +* \details Performs \p half greater-than comparison of inputs \p a and \p b. +* NaN inputs generate true results. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns bool +* \retval The boolean result of unordered greater-than comparison of \p a +* and \p b. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Determine whether \p half argument is a NaN. +* +* \details Determine whether \p half value \p a is a NaN. +* \param[in] a - half. Is only being read. +* +* \returns bool +* \retval true iff argument is NaN. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ bool __hisnan(const __half a); +#if __CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__) +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half maximum of two input values, NaNs pass through. +* +* \details Calculates \p half max(\p a, \p b) +* defined as (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_COMPARISON +* \brief Calculates \p half minimum of two input values, NaNs pass through. +* +* \details Calculates \p half min(\p a, \p b) +* defined as (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* +* \returns half +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b); +/** +* \ingroup CUDA_MATH__HALF_ARITHMETIC +* \brief Performs \p half fused multiply-add in round-to-nearest-even mode with relu saturation. +* +* \details Performs \p half multiply on inputs \p a and \p b, +* then performs a \p half add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half. Is only being read. +* \param[in] b - half. Is only being read. +* \param[in] c - half. Is only being read. +* +* \returns half +* \retval The result of fused multiply-add operation on \p +* a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise maximum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, the other input is returned. +* - If both inputs are NaNs, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise minimum of vectors \p a and \p b +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector maximum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector max(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a > \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise maximum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_COMPARISON +* \brief Calculates \p half2 vector minimum of two inputs, NaNs pass through. +* +* \details Calculates \p half2 vector min(\p a, \p b). +* Elementwise \p half operation is defined as +* (\p a < \p b) ? \p a : \p b. +* - If either of inputs is NaN, then canonical NaN is returned. +* - If values of both inputs are 0.0, then +0.0 > -0.0 +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise minimum of vectors \p a and \p b, with NaNs pass through +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b); +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs \p half2 vector fused multiply-add in round-to-nearest-even +* mode with relu saturation. +* +* \details Performs \p half2 vector multiply on inputs \p a and \p b, +* then performs a \p half2 vector add of the result with \p c, +* rounding the result once in round-to-nearest-even mode. +* Then negative result is clamped to 0. +* NaN result is converted to canonical NaN. +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* \retval The result of elementwise fused multiply-add operation on vectors \p a, \p b, and \p c with relu saturation. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c); +#endif /*__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)*/ +/** +* \ingroup CUDA_MATH__HALF2_ARITHMETIC +* \brief Performs fast complex multiply-accumulate +* +* \details Interprets vector \p half2 input pairs \p a, \p b, and \p c as +* complex numbers in \p half precision and performs +* complex multiply-accumulate operation: a*b + c +* \param[in] a - half2. Is only being read. +* \param[in] b - half2. Is only being read. +* \param[in] c - half2. Is only being read. +* +* \returns half2 +* \retval The result of complex multiply-accumulate operation on complex numbers \p a, \p b, and \p c +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half square root in round-to-nearest-even mode. +* +* \details Calculates \p half square root of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal square root in round-to-nearest-even +* mode. +* +* \details Calculates \p half reciprocal square root of input \p a in round-to-nearest +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The reciprocal square root of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrsqrt(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half reciprocal of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The reciprocal of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hrcp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half natural logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The natural logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half binary logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The binary logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal logarithm in round-to-nearest-even mode. +* +* \details Calculates \p half decimal logarithm of input \p a in round-to-nearest-even +* mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The decimal logarithm of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hlog10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half natural exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half natural exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The natural exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half binary exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half binary exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The binary exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp2(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half decimal exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half decimal exponential function of input \p a in +* round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The decimal exponential function on \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hexp10(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half cosine in round-to-nearest-even mode. +* +* \details Calculates \p half cosine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The cosine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hcos(const __half a); +/** +* \ingroup CUDA_MATH__HALF_FUNCTIONS +* \brief Calculates \p half sine in round-to-nearest-even mode. +* +* \details Calculates \p half sine of input \p a in round-to-nearest-even mode. +* \param[in] a - half. Is only being read. +* +* \returns half +* \retval The sine of \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half hsin(const __half a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector square root in round-to-nearest-even mode. +* +* \details Calculates \p half2 square root of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal square root in round-to-nearest +* mode. +* +* \details Calculates \p half2 reciprocal square root of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise reciprocal square root on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector reciprocal in round-to-nearest-even mode. +* +* \details Calculates \p half2 reciprocal of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise reciprocal on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector natural logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 natural logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise natural logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 binary logarithm of input vector \p a in round-to-nearest +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise binary logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal logarithm in round-to-nearest-even +* mode. +* +* \details Calculates \p half2 decimal logarithm of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise decimal logarithm on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector exponential function in round-to-nearest +* mode. +* +* \details Calculates \p half2 exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector binary exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 binary exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise binary exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector decimal exponential function in +* round-to-nearest-even mode. +* +* \details Calculates \p half2 decimal exponential function of input vector \p a in +* round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise decimal exponential function on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector cosine in round-to-nearest-even mode. +* +* \details Calculates \p half2 cosine of input vector \p a in round-to-nearest-even +* mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise cosine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a); +/** +* \ingroup CUDA_MATH__HALF2_FUNCTIONS +* \brief Calculates \p half2 vector sine in round-to-nearest-even mode. +* +* \details Calculates \p half2 sine of input vector \p a in round-to-nearest-even mode. +* \param[in] a - half2. Is only being read. +* +* \returns half2 +* \retval The elementwise sine on vector \p a. +* \internal +* \exception-guarantee no-throw guarantee +* \behavior reentrant, thread safe +* \endinternal +*/ +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a); + +#endif /*if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ + +#if __CUDA_ARCH__ >= 600 || !defined(__CUDA_ARCH__) + +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val); + +#endif /*if __CUDA_ARCH__ >= 600 || !defined(__CUDA_ARCH__)*/ + +#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__) + +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val); + +#endif /*if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)*/ + +#endif /* defined(__CUDACC__) */ + +#undef __CUDA_FP16_DECL__ +#undef __CUDA_HOSTDEVICE_FP16_DECL__ + +#endif /* defined(__cplusplus) */ + +/* Note the .hpp file is included even for host-side compilation, to capture the "half" & "half2" definitions */ +#include "cuda_fp16.hpp" + +#endif /* end of include guard: __CUDA_FP16_H__ */ diff --git a/lib/python3.10/site-packages/numba/cuda/cuda_fp16.hpp b/lib/python3.10/site-packages/numba/cuda/cuda_fp16.hpp new file mode 100644 index 0000000000000000000000000000000000000000..19bbd3412d52b6df05404313cdc7ce3699beaaf3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cuda_fp16.hpp @@ -0,0 +1,2465 @@ +/* +* Copyright 1993-2020 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO LICENSEE: +* +* This source code and/or documentation ("Licensed Deliverables") are +* subject to NVIDIA intellectual property rights under U.S. and +* international Copyright laws. +* +* These Licensed Deliverables contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and +* conditions of a form of NVIDIA software license agreement by and +* between NVIDIA and Licensee ("License Agreement") or electronically +* accepted by Licensee. Notwithstanding any terms or conditions to +* the contrary in the License Agreement, reproduction or disclosure +* of the Licensed Deliverables to any third party without the express +* written consent of NVIDIA is prohibited. +* +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +* OF THESE LICENSED DELIVERABLES. +* +* U.S. Government End Users. These Licensed Deliverables are a +* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +* 1995), consisting of "commercial computer software" and "commercial +* computer software documentation" as such terms are used in 48 +* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +* only as a commercial end item. Consistent with 48 C.F.R.12.212 and +* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +* U.S. Government End Users acquire the Licensed Deliverables with +* only those rights set forth herein. +* +* Any use of the Licensed Deliverables in individual and commercial +* software must include, in the user documentation and internal +* comments to the code, the above Disclaimer and U.S. Government End +* Users Notice. +*/ + +#if !defined(__CUDA_FP16_HPP__) +#define __CUDA_FP16_HPP__ + +#if !defined(__CUDA_FP16_H__) +#error "Do not include this file directly. Instead, include cuda_fp16.h." +#endif + +#if !defined(_MSC_VER) && __cplusplus >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L +# define __CPP_VERSION_AT_LEAST_11_FP16 +#endif + +/* C++11 header for std::move. + * In RTC mode, std::move is provided implicitly; don't include the header + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) && !defined(__CUDACC_RTC__) +#include +#endif /* __cplusplus >= 201103L && !defined(__CUDACC_RTC__) */ + +/* C++ header for std::memcpy (used for type punning in host-side implementations). + * When compiling as a CUDA source file memcpy is provided implicitly. + * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + + +/* Set up function decorations */ +#if defined(__CUDACC__) +#define __CUDA_FP16_DECL__ static __device__ __inline__ +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#define __CUDA_HOSTDEVICE__ __host__ __device__ +#else /* !defined(__CUDACC__) */ +#if defined(__GNUC__) +#define __CUDA_HOSTDEVICE_FP16_DECL__ static __attribute__ ((unused)) +#else +#define __CUDA_HOSTDEVICE_FP16_DECL__ static +#endif /* defined(__GNUC__) */ +#define __CUDA_HOSTDEVICE__ +#endif /* defined(__CUDACC_) */ + +/* Set up structure-alignment attribute */ +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */ +#if __cplusplus >= 201103L +#define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP16)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ +#endif /* defined(__CUDACC__) */ + +/* Macros to allow half & half2 to be used by inline assembly */ +#define __HALF_TO_US(var) *(reinterpret_cast(&(var))) +#define __HALF_TO_CUS(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_UI(var) *(reinterpret_cast(&(var))) +#define __HALF2_TO_CUI(var) *(reinterpret_cast(&(var))) + +/* Macros for half & half2 binary arithmetic */ +#define __BINARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{"#name".f16 %0,%1,%2;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \ + return val; \ +} /* while(0) */ +#define __BINARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{"#name".f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF_MACRO(name) /* do */ {\ + __half val; \ + asm( "{"#name".f16 %0,%1,%2,%3;\n}" \ + :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \ + return val; \ +} /* while(0) */ +#define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{"#name".f16x2 %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \ + return val; \ +} /* while(0) */ + +/** +* Types which allow static initialization of "half" and "half2" until +* these become an actual builtin. Note this initialization is as a +* bitfield representation of "half", and not a conversion from short->half. +* Such a representation will be deprecated in a future version of CUDA. +* (Note these are visible to non-nvcc compilers, including C-only compilation) +*/ +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __half_raw; + +typedef struct __CUDA_ALIGN__(4) { + unsigned short x; + unsigned short y; +} __half2_raw; + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/* Hide GCC member initialization list warnings because of host/device in-function init requirement */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic ignored "-Weffc++" +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +/* class' : multiple assignment operators specified + The class has multiple assignment operators of a single type. This warning is informational */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( push ) +#pragma warning( disable:4522 ) +#endif /* defined(__GNUC__) */ + +struct __CUDA_ALIGN__(2) __half { +protected: + unsigned short __x; + +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half() = default; +#else + __CUDA_HOSTDEVICE__ __half() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + + /* Convert to/from __half_raw */ + __CUDA_HOSTDEVICE__ __half(const __half_raw &hr) : __x(hr.x) { } + __CUDA_HOSTDEVICE__ __half &operator=(const __half_raw &hr) { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ volatile __half &operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; } + __CUDA_HOSTDEVICE__ operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; } + __CUDA_HOSTDEVICE__ operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; } + +#if !defined(__CUDA_NO_HALF_CONVERSIONS__) + + /* Construct from float/double */ + __CUDA_HOSTDEVICE__ __half(const float f) { __x = __float2half(f).__x; } + __CUDA_HOSTDEVICE__ __half(const double f) { __x = __double2half(f).__x; } + + __CUDA_HOSTDEVICE__ operator float() const { return __half2float(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const float f) { __x = __float2half(f).__x; return *this; } + + /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */ + __CUDA_HOSTDEVICE__ __half &operator=(const double f) { __x = __double2half(f).__x; return *this; } + +/* Member functions only available to nvcc compilation so far */ +#if defined(__CUDACC__) + /* Allow automatic construction from types supported natively in hardware */ + /* Note we do avoid constructor init-list because of special host/device compilation rules */ + __CUDA_HOSTDEVICE__ __half(const short val) { __x = __short2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned short val) { __x = __ushort2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const int val) { __x = __int2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned int val) { __x = __uint2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const long long val) { __x = __ll2half_rn(val).__x; } + __CUDA_HOSTDEVICE__ __half(const unsigned long long val) { __x = __ull2half_rn(val).__x; } + + /* Allow automatic casts to supported builtin types, matching all that are permitted with float */ + __CUDA_HOSTDEVICE__ operator short() const { return __half2short_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const short val) { __x = __short2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned short() const { return __half2ushort_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator int() const { return __half2int_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const int val) { __x = __int2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned int() const { return __half2uint_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator long long() const { return __half2ll_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; } + + __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __half2ull_rz(*this); } + __CUDA_HOSTDEVICE__ __half &operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; } + + /* Boolean conversion - note both 0 and -0 must return false */ + __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFFU) != 0U; } +#endif /* defined(__CUDACC__) */ +#endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */ +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16 operations only supported on arch >= 5.3 */ +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) +#if !defined(__CUDA_NO_HALF_OPERATORS__) +/* Some basic arithmetic operations expected of a builtin */ +__device__ __forceinline__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); } +__device__ __forceinline__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); } +__device__ __forceinline__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); } +__device__ __forceinline__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); } + +__device__ __forceinline__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; } +__device__ __forceinline__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; } +__device__ __forceinline__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; } +__device__ __forceinline__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; } + +/* Note for increment and decrement we use the raw value 0x3C00U equating to half(1.0F), to avoid the extra conversion */ +__device__ __forceinline__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; } +__device__ __forceinline__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; } +__device__ __forceinline__ __half operator++(__half &h, const int ignored) { const __half ret = h; __half_raw one; one.x = 0x3C00U; h += one; return ret; } +__device__ __forceinline__ __half operator--(__half &h, const int ignored) { const __half ret = h; __half_raw one; one.x = 0x3C00U; h -= one; return ret; } + +/* Unary plus and inverse operators */ +__device__ __forceinline__ __half operator+(const __half &h) { return h; } +__device__ __forceinline__ __half operator-(const __half &h) { return __hneg(h); } + +/* Some basic comparison operations to make it look like a builtin */ +__device__ __forceinline__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); } +__device__ __forceinline__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); } +__device__ __forceinline__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); } +#endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */ +#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */ +#endif /* defined(__CUDACC__) */ + +/* __half2 is visible to non-nvcc host compilers */ +struct __CUDA_ALIGN__(4) __half2 { + __half x; + __half y; + + // All construct/copy/assign/move +public: +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) + __half2() = default; + __CUDA_HOSTDEVICE__ __half2(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); return *this; } +#else + __CUDA_HOSTDEVICE__ __half2() { } +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + __CUDA_HOSTDEVICE__ __half2(const __half &a, const __half &b) : x(a), y(b) { } + __CUDA_HOSTDEVICE__ __half2(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); return *this; } + + /* Convert to/from __half2_raw */ + __CUDA_HOSTDEVICE__ __half2(const __half2_raw &h2r ) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); } + __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2_raw &h2r) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); return *this; } + __CUDA_HOSTDEVICE__ operator __half2_raw() const { __half2_raw ret; ret.x = 0U; ret.y = 0U; __HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this); return ret; } +}; + +/* Global-space operator functions are only available to nvcc compilation */ +#if defined(__CUDACC__) + +/* Arithmetic FP16x2 operations only supported on arch >= 5.3 */ +#if (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)) && !defined(__CUDA_NO_HALF2_OPERATORS__) + +__device__ __forceinline__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); } +__device__ __forceinline__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); } +__device__ __forceinline__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); } +__device__ __forceinline__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); } + +__device__ __forceinline__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; } +__device__ __forceinline__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; } + +__device__ __forceinline__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; } +__device__ __forceinline__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; } +__device__ __forceinline__ __half2 operator++(__half2 &h, const int ignored) { const __half2 ret = h; __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return ret; } +__device__ __forceinline__ __half2 operator--(__half2 &h, const int ignored) { const __half2 ret = h; __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return ret; } + +__device__ __forceinline__ __half2 operator+(const __half2 &h) { return h; } +__device__ __forceinline__ __half2 operator-(const __half2 &h) { return __hneg2(h); } + +__device__ __forceinline__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); } +__device__ __forceinline__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); } +__device__ __forceinline__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); } +__device__ __forceinline__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); } +__device__ __forceinline__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); } +__device__ __forceinline__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); } + +#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) */ +#endif /* defined(__CUDACC__) */ + +/* Restore warning for multiple assignment operators */ +#if defined(_MSC_VER) && _MSC_VER >= 1500 +#pragma warning( pop ) +#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */ + +/* Restore -Weffc++ warnings from here on */ +#if defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */ +#endif /* defined(__GNUC__) */ + +#undef __CUDA_HOSTDEVICE__ +#undef __CUDA_ALIGN__ + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder) +{ + unsigned int x; + unsigned int u; + unsigned int result; +#if defined(__CUDACC__) + (void)memcpy(&x, &f, sizeof(f)); +#else + (void)std::memcpy(&x, &f, sizeof(f)); +#endif + u = (x & 0x7fffffffU); + sign = ((x >> 16U) & 0x8000U); + // NaN/+Inf/-Inf + if (u >= 0x7f800000U) { + remainder = 0U; + result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU); + } else if (u > 0x477fefffU) { // Overflows + remainder = 0x80000000U; + result = (sign | 0x7bffU); + } else if (u >= 0x38800000U) { // Normal numbers + remainder = u << 19U; + u -= 0x38000000U; + result = (sign | (u >> 13U)); + } else if (u < 0x33000001U) { // +0/-0 + remainder = u; + result = sign; + } else { // Denormal numbers + const unsigned int exponent = u >> 23U; + const unsigned int shift = 0x7eU - exponent; + unsigned int mantissa = (u & 0x7fffffU); + mantissa |= 0x800000U; + remainder = mantissa << (32U - shift); + result = (sign | (mantissa >> shift)); + } + return static_cast(result); +} +#endif /* #if !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a) +{ +#if defined(__CUDA_ARCH__) + __half val; + asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a)); + return val; +#else + __half result; + // Perform rounding to 11 bits of precision, convert value + // to float and call existing float to half conversion. + // By pre-rounding to 11 bits we avoid additional rounding + // in float to half conversion. + unsigned long long int absa; + unsigned long long int ua; + #if defined(__CUDACC__) + (void)memcpy(&ua, &a, sizeof(a)); + #else + (void)std::memcpy(&ua, &a, sizeof(a)); + #endif + absa = (ua & 0x7fffffffffffffffULL); + if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL)) + { + // |a| >= 2^16 or NaN or |a| <= 2^(-25) + // double-rounding is not a problem + result = __float2half(static_cast(a)); + } + else + { + // here 2^(-25) < |a| < 2^16 + // prepare shifter value such that a + shifter + // done in double precision performs round-to-nearest-even + // and (a + shifter) - shifter results in a rounded to + // 11 bits of precision. Shifter needs to have exponent of + // a plus 53 - 11 = 42 and a leading bit in mantissa to guard + // against negative values. + // So need to have |a| capped to avoid overflow in exponent. + // For inputs that are smaller than half precision minnorm + // we prepare fixed shifter exponent. + unsigned long long shifterBits; + if (absa >= 0x3f10000000000000ULL) + { // Here if |a| >= 2^(-14) + // add 42 to exponent bits + shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL; + } + else + { // 2^(-25) < |a| < 2^(-14), potentially results in denormal + // set exponent bits to 42 - 14 + bias + shifterBits = 0x41B0000000000000ULL; + } + // set leading mantissa bit to protect against negative inputs + shifterBits |= 0x0008000000000000ULL; + double shifter; + #if defined(__CUDACC__) + (void)memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + #else + (void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits)); + #endif + double aShiftRound = a + shifter; + + // Prevent the compiler from optimizing away a + shifter - shifter + // by doing intermediate memcopy and harmless bitwize operation + unsigned long long int aShiftRoundBits; + #if defined(__CUDACC__) + (void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + #else + (void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound)); + #endif + + // the value is positive, so this operation doesn't change anything + aShiftRoundBits &= 0x7fffffffffffffffULL; + + #if defined(__CUDACC__) + (void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + #else + (void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound)); + #endif + + result = __float2half(static_cast(aShiftRound - shifter)); + } + + return result; +#endif +} + +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a) +{ + __half val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +#else + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a) +{ + __half val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +#else + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) { + r.x++; + } + val = r; +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a) +{ + __half val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +#else + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + val = r; +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a) +{ + __half val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +#else + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign != 0U)) { + r.x++; + } + val = r; +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a) +{ + __half val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a)); +#else + __half_raw r; + unsigned int sign = 0U; + unsigned int remainder = 0U; + r.x = __internal_float2half(a, sign, remainder); + if ((remainder != 0U) && (sign == 0U)) { + r.x++; + } + val = r; +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a) +{ + __half2 val; +#if defined(__CUDA_ARCH__) + asm("{.reg .f16 low;\n" + " cvt.rn.f16.f32 low, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a)); +#else + val = __half2(__float2half_rn(a), __float2half_rn(a)); +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b) +{ + __half2 val; +#if defined(__CUDA_ARCH__) + asm("{.reg .f16 low,high;\n" + " cvt.rn.f16.f32 low, %1;\n" + " cvt.rn.f16.f32 high, %2;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b)); +#else + val = __half2(__float2half_rn(a), __float2half_rn(b)); +#endif + return val; +} + +#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */ +static inline float __internal_half2float(const unsigned short h) +{ + unsigned int sign = ((static_cast(h) >> 15U) & 1U); + unsigned int exponent = ((static_cast(h) >> 10U) & 0x1fU); + unsigned int mantissa = ((static_cast(h) & 0x3ffU) << 13U); + float f; + if (exponent == 0x1fU) { /* NaN or Inf */ + /* discard sign of a NaN */ + sign = ((mantissa != 0U) ? (sign >> 1U) : sign); + mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U); + exponent = 0xffU; + } else if (exponent == 0U) { /* Denorm or Zero */ + if (mantissa != 0U) { + unsigned int msb; + exponent = 0x71U; + do { + msb = (mantissa & 0x400000U); + mantissa <<= 1U; /* normalize */ + --exponent; + } while (msb == 0U); + mantissa &= 0x7fffffU; /* 1.mantissa is implicit */ + } + } else { + exponent += 0x70U; + } + unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa); +#if defined(__CUDACC__) + (void)memcpy(&f, &u, sizeof(u)); +#else + (void)std::memcpy(&f, &u, sizeof(u)); +#endif + return f; +} +#endif /* !defined(__CUDACC_RTC__) */ + +__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a) +{ + float val; +#if defined(__CUDA_ARCH__) + asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a))); +#else + val = __internal_half2float(static_cast<__half_raw>(a).x); +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a) +{ + float val; +#if defined(__CUDA_ARCH__) + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +#else + val = __internal_half2float(static_cast<__half2_raw>(a).x); +#endif + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a) +{ + float val; +#if defined(__CUDA_ARCH__) + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a))); +#else + val = __internal_half2float(static_cast<__half2_raw>(a).y); +#endif + return val; +} + +/* Intrinsic functions only available to nvcc compilers */ +#if defined(__CUDACC__) + +/* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */ +__VECTOR_FUNCTIONS_DECL__ __half2 make_half2(const __half x, const __half y) +{ + __half2 t; t.x = x; t.y = y; return t; +} +#undef __VECTOR_FUNCTIONS_DECL__ + + +/* Definitions of intrinsics */ +__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a) +{ + const __half2 val = __floats2half2_rn(a.x, a.y); + return val; +} +__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a) +{ + float hi_float; + float lo_float; +#if defined(__CUDA_ARCH__) + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a))); + + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high},%1;\n" + " cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a))); +#else + lo_float = __internal_half2float(((__half2_raw)a).x); + hi_float = __internal_half2float(((__half2_raw)a).y); +#endif + return make_float2(lo_float, hi_float); +} +__CUDA_FP16_DECL__ int __half2int_rn(const __half h) +{ + int i; + asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h) +{ + int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const int max_val = (int)0x7fffffffU; + const int min_val = (int)0x80000000U; + // saturation fixup + if (f != f) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ int __half2int_rd(const __half h) +{ + int i; + asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ int __half2int_ru(const __half h) +{ + int i; + asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i) +{ + __half h; +#if defined(__CUDA_ARCH__) + asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +#else + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rz(const int i) +{ + __half h; + asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_rd(const int i) +{ + __half h; + asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __int2half_ru(const int i) +{ + __half h; + asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ short int __half2short_rn(const __half h) +{ + short int i; + asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h) +{ + short int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const short int max_val = (short int)0x7fffU; + const short int min_val = (short int)0x8000U; + // saturation fixup + if (f != f) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ short int __half2short_rd(const __half h) +{ + short int i; + asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ short int __half2short_ru(const __half h) +{ + short int i; + asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i) +{ + __half h; +#if defined __CUDA_ARCH__ + asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +#else + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rz(const short int i) +{ + __half h; + asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_rd(const short int i) +{ + __half h; + asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __short2half_ru(const short int i) +{ + __half h; + asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h) +{ + unsigned int i; + asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h) +{ + unsigned int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const unsigned int max_val = 0xffffffffU; + const unsigned int min_val = 0U; + // saturation fixup + if (f != f) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h) +{ + unsigned int i; + asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h) +{ + unsigned int i; + asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i) +{ + __half h; +#if defined __CUDA_ARCH__ + asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); +#else + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i) +{ + __half h; + asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i) +{ + __half h; + asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i) +{ + __half h; + asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h) +{ + unsigned short int i; + asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h) +{ + unsigned short int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const unsigned short int max_val = 0xffffU; + const unsigned short int min_val = 0U; + // saturation fixup + if (f != f) { + // NaN + i = 0U; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h) +{ + unsigned short int i; + asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h) +{ + unsigned short int i; + asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i) +{ + __half h; +#if defined __CUDA_ARCH__ + asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); +#else + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i) +{ + __half h; + asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i) +{ + __half h; + asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i) +{ + __half h; + asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i)); + return h; +} + +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h) +{ + unsigned long long int i; + asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h) +{ + unsigned long long int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const unsigned long long int max_val = 0xffffffffffffffffULL; + const unsigned long long int min_val = 0ULL; + // saturation fixup + if (f != f) { + // NaN + i = 0x8000000000000000ULL; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h) +{ + unsigned long long int i; + asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h) +{ + unsigned long long int i; + asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i) +{ + __half h; +#if defined(__CUDA_ARCH__) + asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +#else + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i) +{ + __half h; + asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i) +{ + __half h; + asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i) +{ + __half h; + asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h) +{ + long long int i; + asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h) +{ + long long int i; +#if defined __CUDA_ARCH__ + asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); +#else + const float f = __half2float(h); + i = static_cast(f); + const long long int max_val = (long long int)0x7fffffffffffffffULL; + const long long int min_val = (long long int)0x8000000000000000ULL; + // saturation fixup + if (f != f) { + // NaN + i = min_val; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } +#endif + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h) +{ + long long int i; + asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h) +{ + long long int i; + asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h))); + return i; +} +__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i) +{ + __half h; +#if defined(__CUDA_ARCH__) + asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); +#else + // double-rounding is not a problem here: if integer + // has more than 24 bits, it is already too large to + // be represented in half precision, and result will + // be infinity. + const float f = static_cast(i); + h = __float2half_rn(f); +#endif + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i) +{ + __half h; + asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i) +{ + __half h; + asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} +__CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i) +{ + __half h; + asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i)); + return h; +} + +__CUDA_FP16_DECL__ __half htrunc(const __half h) +{ + __half r; + asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hceil(const __half h) +{ + __half r; + asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hfloor(const __half h) +{ + __half r; + asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} +__CUDA_FP16_DECL__ __half hrint(const __half h) +{ + __half r; + asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h))); + return r; +} + +__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rzi.f16.f16 low, low;\n" + " cvt.rzi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rpi.f16.f16 low, low;\n" + " cvt.rpi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rmi.f16.f16 low, low;\n" + " cvt.rmi.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " cvt.rni.f16.f16 low, low;\n" + " cvt.rni.f16.f16 high, high;\n" + " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b) +{ + __half2 val; + asm("{.reg .f16 alow,ahigh,blow,bhigh;\n" + " mov.b32 {alow,ahigh}, %1;\n" + " mov.b32 {blow,bhigh}, %2;\n" + " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b))); + return val; +} +__CUDA_FP16_DECL__ __half __low2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ int __hisinf(const __half a) +{ + int retval; + if (__HALF_TO_CUS(a) == 0xFC00U) { + retval = -1; + } else if (__HALF_TO_CUS(a) == 0x7C00U) { + retval = 1; + } else { + retval = 0; + } + return retval; +} +__CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half __high2half(const __half2 a) +{ + __half ret; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a))); + return ret; +} +__CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%2};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); + return val; +} +__CUDA_FP16_DECL__ __half2 __half2half2(const __half a) +{ + __half2 val; + asm("{ mov.b32 %0, {%1,%1};}\n" + : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a) +{ + __half2 val; + asm("{.reg .f16 low,high;\n" + " mov.b32 {low,high}, %1;\n" + " mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ short int __half_as_short(const __half h) +{ + return static_cast(__HALF_TO_CUS(h)); +} +__CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h) +{ + return __HALF_TO_CUS(h); +} +__CUDA_FP16_DECL__ __half __short_as_half(const short int i) +{ + __half h; + __HALF_TO_US(h) = static_cast(i); + return h; +} +__CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i) +{ + __half h; + __HALF_TO_US(h) = i; + return h; +} + +#if __CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__) +/****************************************************************************** +* __half, __half2 warp shuffle * +******************************************************************************/ +#define __SHUFFLE_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{"#name" %0,%1,%2,%3;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \ + return r; \ +} /* while(0) */ + +#define __SHUFFLE_SYNC_HALF2_MACRO(name) /* do */ {\ + __half2 r; \ + asm volatile ("{"#name" %0,%1,%2,%3,%4;\n}" \ + :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \ + return r; \ +} /* while(0) */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 + +__CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_HALF2_MACRO(shfl.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_HALF2_MACRO(shfl.bfly.b32) +} + +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */ + +__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = (warp_size - static_cast(width)) << 8U; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32) +} +__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width) +{ + unsigned int warp_size; + asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size)); + const unsigned int c = ((warp_size - static_cast(width)) << 8U) | 0x1fU; + __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32) +} + +#undef __SHUFFLE_HALF2_MACRO +#undef __SHUFFLE_SYNC_HALF2_MACRO + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 + +__CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down(temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor(temp1, delta, width); + return __low2half(temp2); +} + +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */ + +__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width); + return __low2half(temp2); +} +__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width) +{ + const __half2 temp1 = __halves2half2(var, var); + const __half2 temp2 = __shfl_xor_sync(mask, temp1, delta, width); + return __low2half(temp2); +} + +#endif /*__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__)*/ +/****************************************************************************** +* __half and __half2 __ldg,__ldcg,__ldca,__ldcs * +******************************************************************************/ + +#if defined(__cplusplus) && (__CUDA_ARCH__ >= 320 || !defined(__CUDA_ARCH__)) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ +__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr) +{ + __half ret; + asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr)); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr) +{ + __half ret; + asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr) +{ + __half2 ret; + asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr) +{ + __half ret; + asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory"); + return ret; +} +__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value) +{ + asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value) +{ + asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value) +{ + asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value) +{ + asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory"); +} +__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value) +{ + asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory"); +} +#undef __LDG_PTR +#endif /*defined(__cplusplus) && (__CUDA_ARCH__ >= 320 || !defined(__CUDA_ARCH__))*/ +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) +/****************************************************************************** +* __half2 comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + asm( "{ "#name".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + return val; \ +} /* while(0) */ +__CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b) +{ + __COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __COMPARISON_OP_HALF2_MACRO +#define __BOOL_COMPARISON_OP_HALF2_MACRO(name) /* do */ {\ + __half2 val; \ + bool retval; \ + asm( "{ "#name".f16x2.f16x2 %0,%1,%2;\n}" \ + :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \ + if (__HALF2_TO_CUI(val) == 0x3C003C00U) {\ + retval = true; \ + } else { \ + retval = false; \ + }\ + return retval;\ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.eq) +} +__CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ne) +} +__CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.le) +} +__CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ge) +} +__CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.lt) +} +__CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gt) +} +__CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.equ) +} +__CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.neu) +} +__CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.leu) +} +__CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.geu) +} +__CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.ltu) +} +__CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b) +{ + __BOOL_COMPARISON_OP_HALF2_MACRO(set.gtu) +} +#undef __BOOL_COMPARISON_OP_HALF2_MACRO +/****************************************************************************** +* __half comparison * +******************************************************************************/ +#define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\ + unsigned short val; \ + asm( "{ .reg .pred __$temp3;\n" \ + " setp."#name".f16 __$temp3, %1, %2;\n" \ + " selp.u16 %0, 1, 0, __$temp3;}" \ + : "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \ + return (val != 0U) ? true : false; \ +} /* while(0) */ +__CUDA_FP16_DECL__ bool __heq(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(eq) +} +__CUDA_FP16_DECL__ bool __hne(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ne) +} +__CUDA_FP16_DECL__ bool __hle(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(le) +} +__CUDA_FP16_DECL__ bool __hge(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ge) +} +__CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(lt) +} +__CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gt) +} +__CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(equ) +} +__CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(neu) +} +__CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(leu) +} +__CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(geu) +} +__CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(ltu) +} +__CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b) +{ + __COMPARISON_OP_HALF_MACRO(gtu) +} +#undef __COMPARISON_OP_HALF_MACRO +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add) +} +__CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub) +} +__CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul) +} +__CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(mul.sat) +} +__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) { + __half ha = __low2half(a); + __half hb = __low2half(b); + + const __half v1 = __hdiv(ha, hb); + + ha = __high2half(a); + hb = __high2half(b); + + const __half v2 = __hdiv(ha, hb); + + return __halves2half2(v1, v2); +} +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add) +} +__CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub) +} +__CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul) +} +__CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(add.sat) +} +__CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(sub.sat) +} +__CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(mul.sat) +} + +__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn) +} +__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.sat) +} +__CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b) { + __half v; + __half abs; + __half den; + __HALF_TO_US(den) = 0x008FU; + + float rcp; + const float fa = __half2float(a); + const float fb = __half2float(b); + + asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb)); + + float fv = rcp * fa; + + v = __float2half(fv); + __HALF_TO_US(abs) = static_cast(static_cast(__HALF_TO_CUS(v)) & 0x00007FFFU); + if (__hlt(abs, den) && (!(__HALF_TO_CUS(abs) == 0x0000U))) { + const float err = __fmaf_rn(-fb, fv, fa); + fv = __fmaf_rn(rcp, err, fv); + v = __float2half(fv); + } + return v; +} + +/****************************************************************************** +* __half2 functions * +******************************************************************************/ +#define __SPEC_CASE2(i,r, spc, ulp) \ + "{.reg.b32 spc, ulp, p;\n"\ + " mov.b32 spc,"#spc";\n"\ + " mov.b32 ulp,"#ulp";\n"\ + " set.eq.f16x2.f16x2 p,"#i", spc;\n"\ + " fma.rn.f16x2 "#r",p,ulp,"#r";\n}\n" +#define __SPEC_CASE(i,r, spc, ulp) \ + "{.reg.b16 spc, ulp, p;\n"\ + " mov.b16 spc,"#spc";\n"\ + " mov.b16 ulp,"#ulp";\n"\ + " set.eq.f16.f16 p,"#i", spc;\n"\ + " fma.rn.f16 "#r",p,ulp,"#r";\n}\n" +#define __APPROX_FCAST(fun) /* do */ {\ + __half val;\ + asm("{.reg.b32 f; \n"\ + " .reg.b16 r; \n"\ + " mov.b16 r,%1; \n"\ + " cvt.f32.f16 f,r; \n"\ + " "#fun".approx.f32 f,f; \n"\ + " cvt.rn.f16.f32 r,f; \n"\ + " mov.b16 %0,r; \n"\ + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\ + return val;\ +} /* while(0) */ +#define __APPROX_FCAST2(fun) /* do */ {\ + __half2 val;\ + asm("{.reg.b16 hl, hu; \n"\ + " .reg.b32 fl, fu; \n"\ + " mov.b32 {hl, hu}, %1; \n"\ + " cvt.f32.f16 fl, hl; \n"\ + " cvt.f32.f16 fu, hu; \n"\ + " "#fun".approx.f32 fl, fl; \n"\ + " "#fun".approx.f32 fu, fu; \n"\ + " cvt.rn.f16.f32 hl, fl; \n"\ + " cvt.rn.f16.f32 hu, fu; \n"\ + " mov.b32 %0, {hl, hu}; \n"\ + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \ + return val;\ +} /* while(0) */ +static __device__ __forceinline__ float __float_simpl_sinf(float a); +static __device__ __forceinline__ float __float_simpl_cosf(float a); +__CUDA_FP16_DECL__ __half __hsin_internal(const __half a) { + float f = __half2float(a); + f = __float_simpl_sinf(f); + return __float2half_rn(f); +} +__CUDA_FP16_DECL__ __half hsin(const __half a) { + __half r = __hsin_internal(a); + asm("{\n\t" + " .reg.b16 i,r,t; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + " mov.b16 t, 0x8000U; \n\t" + " and.b16 t,r,t; \n\t" + __SPEC_CASE(i, r, 0X32B3U, 0x0800U) + __SPEC_CASE(i, r, 0X5CB0U, 0x1000U) + __SPEC_CASE(i, r, 0XB2B3U, 0x8800U) + __SPEC_CASE(i, r, 0XDCB0U, 0x9000U) + " or.b16 r,r,t; \n\t" + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) { + const __half l = __low2half(a); + const __half h = __high2half(a); + const __half sl = __hsin_internal(l); + const __half sh = __hsin_internal(h); + __half2 r = __halves2half2(sl, sh); + asm("{\n\t" + " .reg.b32 i,r,t; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + " and.b32 t, r, 0x80008000U; \n\t" + __SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U) + __SPEC_CASE2(i, r, 0X5CB05CB0U, 0x10001000U) + __SPEC_CASE2(i, r, 0XB2B3B2B3U, 0x88008800U) + __SPEC_CASE2(i, r, 0XDCB0DCB0U, 0x90009000U) + " or.b32 r, r, t; \n\t" + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __hcos_internal(const __half a) { + float f = __half2float(a); + f = __float_simpl_cosf(f); + return __float2half_rn(f); +} +__CUDA_FP16_DECL__ __half hcos(const __half a) { + __half r = __hcos_internal(a); + asm("{\n\t" + " .reg.b16 i,r; \n\t" + " mov.b16 r, %0; \n\t" + " mov.b16 i, %1; \n\t" + __SPEC_CASE(i, r, 0X2B7CU, 0x1000U) + __SPEC_CASE(i, r, 0XAB7CU, 0x1000U) + " mov.b16 %0, r; \n" + "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) { + const __half l = __low2half(a); + const __half h = __high2half(a); + const __half cl = __hcos_internal(l); + const __half ch = __hcos_internal(h); + __half2 r = __halves2half2(cl, ch); + asm("{\n\t" + " .reg.b32 i,r; \n\t" + " mov.b32 r, %0; \n\t" + " mov.b32 i, %1; \n\t" + __SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U) + __SPEC_CASE2(i, r, 0XAB7CAB7CU, 0x10001000U) + " mov.b32 %0, r; \n" + "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, int *quadrant) +{ + const int q = __float2int_rn(a * 0.636619772F); + const float j = static_cast(q); + float t = __fmaf_rn(-j, 1.5707962512969971e+000F, a); + t = __fmaf_rn(-j, 7.5497894158615964e-008F, t); + *quadrant = q; + return t; +} +static __device__ __forceinline__ float __internal_sin_cos_kernel(float x, const int i) +{ + float z; + const float x2 = x*x; + + if ((static_cast(i) & 1U) != 0U) { + z = 2.44331571e-5F; + z = __fmaf_rn(z, x2, -1.38873163e-3F); + } + else { + z = -1.95152959e-4F; + z = __fmaf_rn(z, x2, 8.33216087e-3F); + } + if ((static_cast(i) & 1U) != 0U) { + z = __fmaf_rn(z, x2, 4.16666457e-2F); + z = __fmaf_rn(z, x2, -5.00000000e-1F); + } + else { + z = __fmaf_rn(z, x2, -1.66666546e-1F); + z = __fmaf_rn(z, x2, 0.0F); + } + if ((static_cast(i) & 1U) != 0U) { + x = __fmaf_rn(z, x2, 1.0F); + } + else { + x = __fmaf_rn(z, x, x); + } + if ((static_cast(i) & 2U) != 0U) { + x = __fmaf_rn(x, -1.0F, 0.0F); + } + return x; +} +static __device__ __forceinline__ float __float_simpl_sinf(float a) +{ + float z; + int i; + if (::isinf(a)) { + a = a * 0.0F; + } + a = __internal_trig_reduction_kernel(a, &i); + z = __internal_sin_cos_kernel(a, i); + return z; +} +static __device__ __forceinline__ float __float_simpl_cosf(float a) +{ + float z; + int i; + if (::isinf(a)) { + a = a * 0.0F; + } + a = __internal_trig_reduction_kernel(a, &i); + i++; + z = __internal_sin_cos_kernel(a, i); + return z; +} + +__CUDA_FP16_DECL__ __half hexp(const __half a) { + __half val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 h,r; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f,f; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X1F79U, 0x9400U) + __SPEC_CASE(h, r, 0X25CFU, 0x9400U) + __SPEC_CASE(h, r, 0XC13BU, 0x0400U) + __SPEC_CASE(h, r, 0XC1EFU, 0x0200U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x3fb8aa3bU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U) + __SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U) + __SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U) + __SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp2(const __half a) { + __half val; + asm("{.reg.b32 f, ULP; \n" + " .reg.b16 r; \n" + " mov.b16 r,%1; \n" + " cvt.f32.f16 f,r; \n" + " ex2.approx.f32 f,f; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 f,f,ULP,f; \n" + " cvt.rn.f16.f32 r,f; \n" + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, ULP; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " mov.b32 ULP, 0x33800000U;\n" + " fma.rn.f32 fl,fl,ULP,fl; \n" + " fma.rn.f32 fu,fu,ULP,fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 %0, {hl, hu}; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hexp10(const __half a) { + __half val; + asm("{.reg.b16 h,r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 f,f,C; \n" + " ex2.approx.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x34DEU, 0x9800U) + __SPEC_CASE(h, r, 0x9766U, 0x9000U) + __SPEC_CASE(h, r, 0x9972U, 0x1000U) + __SPEC_CASE(h, r, 0xA5C4U, 0x1000U) + __SPEC_CASE(h, r, 0xBF0AU, 0x8100U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 h,r,fl,fu, C; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " mov.b32 C, 0x40549A78U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " ex2.approx.f32 fl, fl; \n" + " ex2.approx.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U) + __SPEC_CASE2(h, r, 0x97669766U, 0x90009000U) + __SPEC_CASE2(h, r, 0x99729972U, 0x10001000U) + __SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U) + __SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog2(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.f32 f, f; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(r, r, 0xA2E2U, 0x8080U) + __SPEC_CASE(r, r, 0xBF46U, 0x9400U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 fl, fu, r, p; \n" + " mov.b32 {hl, hu}, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U) + __SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog(const __half a) { + __half val; + asm("{.reg.b32 f, C; \n" + " .reg.b16 r,h; \n" + " mov.b16 h,%1; \n" + " cvt.f32.f16 f,h; \n" + " lg2.approx.f32 f,f; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r,f; \n" + __SPEC_CASE(h, r, 0X160DU, 0x9C00U) + __SPEC_CASE(h, r, 0X3BFEU, 0x8010U) + __SPEC_CASE(h, r, 0X3C0BU, 0x8080U) + __SPEC_CASE(h, r, 0X6051U, 0x1C00U) + " mov.b16 %0,r; \n" + "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3f317218U; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U) + __SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U) + __SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U) + __SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +__CUDA_FP16_DECL__ __half hlog10(const __half a) { + __half val; + asm("{.reg.b16 h, r; \n" + " .reg.b32 f, C; \n" + " mov.b16 h, %1; \n" + " cvt.f32.f16 f, h; \n" + " lg2.approx.f32 f, f; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 f,f,C; \n" + " cvt.rn.f16.f32 r, f; \n" + __SPEC_CASE(h, r, 0x338FU, 0x1000U) + __SPEC_CASE(h, r, 0x33F8U, 0x9000U) + __SPEC_CASE(h, r, 0x57E1U, 0x9800U) + __SPEC_CASE(h, r, 0x719DU, 0x9C00U) + " mov.b16 %0, r; \n" + "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a))); + return val; +} +__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) { + __half2 val; + asm("{.reg.b16 hl, hu; \n" + " .reg.b32 r, fl, fu, C, h; \n" + " mov.b32 {hl, hu}, %1; \n" + " mov.b32 h, %1; \n" + " cvt.f32.f16 fl, hl; \n" + " cvt.f32.f16 fu, hu; \n" + " lg2.approx.f32 fl, fl; \n" + " lg2.approx.f32 fu, fu; \n" + " mov.b32 C, 0x3E9A209BU; \n" + " mul.f32 fl,fl,C; \n" + " mul.f32 fu,fu,C; \n" + " cvt.rn.f16.f32 hl, fl; \n" + " cvt.rn.f16.f32 hu, fu; \n" + " mov.b32 r, {hl, hu}; \n" + __SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U) + __SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U) + __SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U) + __SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U) + " mov.b32 %0, r; \n" + "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); + return val; +} +#undef __SPEC_CASE2 +#undef __SPEC_CASE +__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) { + __APPROX_FCAST2(rcp) +} +__CUDA_FP16_DECL__ __half hrcp(const __half a) { + __APPROX_FCAST(rcp) +} +__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) { + __APPROX_FCAST2(rsqrt) +} +__CUDA_FP16_DECL__ __half hrsqrt(const __half a) { + __APPROX_FCAST(rsqrt) +} +__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) { + __APPROX_FCAST2(sqrt) +} +__CUDA_FP16_DECL__ __half hsqrt(const __half a) { + __APPROX_FCAST(sqrt) +} +#undef __APPROX_FCAST +#undef __APPROX_FCAST2 +__CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a) +{ + __half2 r; + asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ bool __hisnan(const __half a) +{ + __half r; + asm("{set.nan.f16.f16 %0,%1,%2;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a))); + return __HALF_TO_CUS(r) != 0U; +} +__CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a) +{ + __half2 r; + asm("{neg.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __hneg(const __half a) +{ + __half r; + asm("{neg.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} +__CUDA_FP16_DECL__ __half2 __habs2(const __half2 a) +{ + __half2 r; + asm("{abs.f16x2 %0,%1;\n}" + :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a))); + return r; +} +__CUDA_FP16_DECL__ __half __habs(const __half a) +{ + __half r; + asm("{abs.f16 %0,%1;\n}" + :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a))); + return r; +} + +__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c) +{ + // fast version of complex multiply-accumulate + // (a.re, a.im) * (b.re, b.im) + (c.re, c.im) + // acc.re = (c.re + a.re*b.re) - a.im*b.im + // acc.im = (c.im + a.re*b.im) + a.im*b.re + const __half2 a_re = __half2half2(a.x); + __half2 acc = __hfma2(a_re, b, c); + const __half2 a_im = __half2half2(a.y); + const __half2 ib = __halves2half2(__hneg(b.y), b.x); + acc = __hfma2(a_im, ib, acc); + return acc; +} +#endif /*__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ + +#if __CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__) +/****************************************************************************** +* __half arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(max) +} +__CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(min) +} +__CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b) +{ + __BINARY_OP_HALF_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c) +{ + __TERNARY_OP_HALF_MACRO(fma.rn.relu) +} +/****************************************************************************** +* __half2 arithmetic * +******************************************************************************/ +__CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(max) +} +__CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(min) +} +__CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(max.NaN) +} +__CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b) +{ + __BINARY_OP_HALF2_MACRO(min.NaN) +} +__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c) +{ + __TERNARY_OP_HALF2_MACRO(fma.rn.relu) +} +#endif /*__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)*/ + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) { + __half2 r; + asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n" + : "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val)) + : "memory"); + return r; +} + +#endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600*/ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) { + __half r; + asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n" + : "=h"(__HALF_TO_US(r)) + : __PTR(address), "h"(__HALF_TO_CUS(val)) + : "memory"); + return r; +} + +#endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700*/ + +#undef __PTR + +#undef __CUDA_FP16_DECL__ +#endif /* defined(__CUDACC__) */ +#endif /* defined(__cplusplus) */ + +#undef __TERNARY_OP_HALF2_MACRO +#undef __TERNARY_OP_HALF_MACRO +#undef __BINARY_OP_HALF2_MACRO +#undef __BINARY_OP_HALF_MACRO + +#undef __CUDA_HOSTDEVICE_FP16_DECL__ +#undef __CUDA_FP16_DECL__ + +/* Define first-class types "half" and "half2", unless user specifies otherwise via "#define CUDA_NO_HALF" */ +/* C cannot ever have these types defined here, because __half and __half2 are C++ classes */ +#if defined(__cplusplus) && !defined(CUDA_NO_HALF) +typedef __half half; +typedef __half2 half2; +// for consistency with __nv_bfloat16 +typedef __half __nv_half; +typedef __half2 __nv_half2; +typedef __half_raw __nv_half_raw; +typedef __half2_raw __nv_half2_raw; +typedef __half nv_half; +typedef __half2 nv_half2; +#endif /* defined(__cplusplus) && !defined(CUDA_NO_HALF) */ + +#if defined(__CPP_VERSION_AT_LEAST_11_FP16) +#undef __CPP_VERSION_AT_LEAST_11_FP16 +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */ + +#endif /* end of include guard: __CUDA_FP16_HPP__ */ diff --git a/lib/python3.10/site-packages/numba/cuda/cuda_paths.py b/lib/python3.10/site-packages/numba/cuda/cuda_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..8ff96640d16bab02679117b3df61c546717ced39 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cuda_paths.py @@ -0,0 +1,258 @@ +import sys +import re +import os +from collections import namedtuple + +from numba.core.config import IS_WIN32 +from numba.misc.findlib import find_lib, find_file + + +_env_path_tuple = namedtuple('_env_path_tuple', ['by', 'info']) + + +def _find_valid_path(options): + """Find valid path from *options*, which is a list of 2-tuple of + (name, path). Return first pair where *path* is not None. + If no valid path is found, return ('', None) + """ + for by, data in options: + if data is not None: + return by, data + else: + return '', None + + +def _get_libdevice_path_decision(): + options = [ + ('Conda environment', get_conda_ctk()), + ('Conda environment (NVIDIA package)', get_nvidia_libdevice_ctk()), + ('CUDA_HOME', get_cuda_home('nvvm', 'libdevice')), + ('System', get_system_ctk('nvvm', 'libdevice')), + ('Debian package', get_debian_pkg_libdevice()), + ] + by, libdir = _find_valid_path(options) + return by, libdir + + +def _nvvm_lib_dir(): + if IS_WIN32: + return 'nvvm', 'bin' + else: + return 'nvvm', 'lib64' + + +def _get_nvvm_path_decision(): + options = [ + ('Conda environment', get_conda_ctk()), + ('Conda environment (NVIDIA package)', get_nvidia_nvvm_ctk()), + ('CUDA_HOME', get_cuda_home(*_nvvm_lib_dir())), + ('System', get_system_ctk(*_nvvm_lib_dir())), + ] + by, path = _find_valid_path(options) + return by, path + + +def _get_libdevice_paths(): + by, libdir = _get_libdevice_path_decision() + # Search for pattern + pat = r'libdevice(\.\d+)*\.bc$' + candidates = find_file(re.compile(pat), libdir) + # Keep only the max (most recent version) of the bitcode files. + out = max(candidates, default=None) + return _env_path_tuple(by, out) + + +def _cudalib_path(): + if IS_WIN32: + return 'bin' + else: + return 'lib64' + + +def _cuda_home_static_cudalib_path(): + if IS_WIN32: + return ('lib', 'x64') + else: + return ('lib64',) + + +def _get_cudalib_dir_path_decision(): + options = [ + ('Conda environment', get_conda_ctk()), + ('Conda environment (NVIDIA package)', get_nvidia_cudalib_ctk()), + ('CUDA_HOME', get_cuda_home(_cudalib_path())), + ('System', get_system_ctk(_cudalib_path())), + ] + by, libdir = _find_valid_path(options) + return by, libdir + + +def _get_static_cudalib_dir_path_decision(): + options = [ + ('Conda environment', get_conda_ctk()), + ('Conda environment (NVIDIA package)', get_nvidia_static_cudalib_ctk()), + ('CUDA_HOME', get_cuda_home(*_cuda_home_static_cudalib_path())), + ('System', get_system_ctk(_cudalib_path())), + ] + by, libdir = _find_valid_path(options) + return by, libdir + + +def _get_cudalib_dir(): + by, libdir = _get_cudalib_dir_path_decision() + return _env_path_tuple(by, libdir) + + +def _get_static_cudalib_dir(): + by, libdir = _get_static_cudalib_dir_path_decision() + return _env_path_tuple(by, libdir) + + +def get_system_ctk(*subdirs): + """Return path to system-wide cudatoolkit; or, None if it doesn't exist. + """ + # Linux? + if sys.platform.startswith('linux'): + # Is cuda alias to /usr/local/cuda? + # We are intentionally not getting versioned cuda installation. + base = '/usr/local/cuda' + if os.path.exists(base): + return os.path.join(base, *subdirs) + + +def get_conda_ctk(): + """Return path to directory containing the shared libraries of cudatoolkit. + """ + is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta')) + if not is_conda_env: + return + # Assume the existence of NVVM to imply cudatoolkit installed + paths = find_lib('nvvm') + if not paths: + return + # Use the directory name of the max path + return os.path.dirname(max(paths)) + + +def get_nvidia_nvvm_ctk(): + """Return path to directory containing the NVVM shared library. + """ + is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta')) + if not is_conda_env: + return + + # Assume the existence of NVVM in the conda env implies that a CUDA toolkit + # conda package is installed. + + # First, try the location used on Linux and the Windows 11.x packages + libdir = os.path.join(sys.prefix, 'nvvm', _cudalib_path()) + if not os.path.exists(libdir) or not os.path.isdir(libdir): + # If that fails, try the location used for Windows 12.x packages + libdir = os.path.join(sys.prefix, 'Library', 'nvvm', _cudalib_path()) + if not os.path.exists(libdir) or not os.path.isdir(libdir): + # If that doesn't exist either, assume we don't have the NVIDIA + # conda package + return + + paths = find_lib('nvvm', libdir=libdir) + if not paths: + return + # Use the directory name of the max path + return os.path.dirname(max(paths)) + + +def get_nvidia_libdevice_ctk(): + """Return path to directory containing the libdevice library. + """ + nvvm_ctk = get_nvidia_nvvm_ctk() + if not nvvm_ctk: + return + nvvm_dir = os.path.dirname(nvvm_ctk) + return os.path.join(nvvm_dir, 'libdevice') + + +def get_nvidia_cudalib_ctk(): + """Return path to directory containing the shared libraries of cudatoolkit. + """ + nvvm_ctk = get_nvidia_nvvm_ctk() + if not nvvm_ctk: + return + env_dir = os.path.dirname(os.path.dirname(nvvm_ctk)) + subdir = 'bin' if IS_WIN32 else 'lib' + return os.path.join(env_dir, subdir) + + +def get_nvidia_static_cudalib_ctk(): + """Return path to directory containing the static libraries of cudatoolkit. + """ + nvvm_ctk = get_nvidia_nvvm_ctk() + if not nvvm_ctk: + return + + if IS_WIN32 and ("Library" not in nvvm_ctk): + # Location specific to CUDA 11.x packages on Windows + dirs = ('Lib', 'x64') + else: + # Linux, or Windows with CUDA 12.x packages + dirs = ('lib',) + + env_dir = os.path.dirname(os.path.dirname(nvvm_ctk)) + return os.path.join(env_dir, *dirs) + + +def get_cuda_home(*subdirs): + """Get paths of CUDA_HOME. + If *subdirs* are the subdirectory name to be appended in the resulting + path. + """ + cuda_home = os.environ.get('CUDA_HOME') + if cuda_home is None: + # Try Windows CUDA installation without Anaconda + cuda_home = os.environ.get('CUDA_PATH') + if cuda_home is not None: + return os.path.join(cuda_home, *subdirs) + + +def _get_nvvm_path(): + by, path = _get_nvvm_path_decision() + candidates = find_lib('nvvm', path) + path = max(candidates) if candidates else None + return _env_path_tuple(by, path) + + +def get_cuda_paths(): + """Returns a dictionary mapping component names to a 2-tuple + of (source_variable, info). + + The returned dictionary will have the following keys and infos: + - "nvvm": file_path + - "libdevice": List[Tuple[arch, file_path]] + - "cudalib_dir": directory_path + + Note: The result of the function is cached. + """ + # Check cache + if hasattr(get_cuda_paths, '_cached_result'): + return get_cuda_paths._cached_result + else: + # Not in cache + d = { + 'nvvm': _get_nvvm_path(), + 'libdevice': _get_libdevice_paths(), + 'cudalib_dir': _get_cudalib_dir(), + 'static_cudalib_dir': _get_static_cudalib_dir(), + } + # Cache result + get_cuda_paths._cached_result = d + return d + + +def get_debian_pkg_libdevice(): + """ + Return the Debian NVIDIA Maintainers-packaged libdevice location, if it + exists. + """ + pkg_libdevice_location = '/usr/lib/nvidia-cuda-toolkit/libdevice' + if not os.path.exists(pkg_libdevice_location): + return None + return pkg_libdevice_location diff --git a/lib/python3.10/site-packages/numba/cuda/cudadecl.py b/lib/python3.10/site-packages/numba/cuda/cudadecl.py new file mode 100644 index 0000000000000000000000000000000000000000..dde8977936e79d90fb911268d767e9144d65aa9a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadecl.py @@ -0,0 +1,806 @@ +import operator +from numba.core import types +from numba.core.typing.npydecl import (parse_dtype, parse_shape, + register_number_classes, + register_numpy_ufunc, + trigonometric_functions, + comparison_functions, + math_operations, + bit_twiddling_functions) +from numba.core.typing.templates import (AttributeTemplate, ConcreteTemplate, + AbstractTemplate, CallableTemplate, + signature, Registry) +from numba.cuda.types import dim3 +from numba.core.typeconv import Conversion +from numba import cuda +from numba.cuda.compiler import declare_device_function_template + +registry = Registry() +register = registry.register +register_attr = registry.register_attr +register_global = registry.register_global + +register_number_classes(register_global) + + +class Cuda_array_decl(CallableTemplate): + def generic(self): + def typer(shape, dtype): + + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + return None + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) + for s in shape]): + return None + else: + return None + + ndim = parse_shape(shape) + nb_dtype = parse_dtype(dtype) + if nb_dtype is not None and ndim is not None: + return types.Array(dtype=nb_dtype, ndim=ndim, layout='C') + + return typer + + +@register +class Cuda_shared_array(Cuda_array_decl): + key = cuda.shared.array + + +@register +class Cuda_local_array(Cuda_array_decl): + key = cuda.local.array + + +@register +class Cuda_const_array_like(CallableTemplate): + key = cuda.const.array_like + + def generic(self): + def typer(ndarray): + return ndarray + return typer + + +@register +class Cuda_threadfence_device(ConcreteTemplate): + key = cuda.threadfence + cases = [signature(types.none)] + + +@register +class Cuda_threadfence_block(ConcreteTemplate): + key = cuda.threadfence_block + cases = [signature(types.none)] + + +@register +class Cuda_threadfence_system(ConcreteTemplate): + key = cuda.threadfence_system + cases = [signature(types.none)] + + +@register +class Cuda_syncwarp(ConcreteTemplate): + key = cuda.syncwarp + cases = [signature(types.none), signature(types.none, types.i4)] + + +@register +class Cuda_shfl_sync_intrinsic(ConcreteTemplate): + key = cuda.shfl_sync_intrinsic + cases = [ + signature(types.Tuple((types.i4, types.b1)), + types.i4, types.i4, types.i4, types.i4, types.i4), + signature(types.Tuple((types.i8, types.b1)), + types.i4, types.i4, types.i8, types.i4, types.i4), + signature(types.Tuple((types.f4, types.b1)), + types.i4, types.i4, types.f4, types.i4, types.i4), + signature(types.Tuple((types.f8, types.b1)), + types.i4, types.i4, types.f8, types.i4, types.i4), + ] + + +@register +class Cuda_vote_sync_intrinsic(ConcreteTemplate): + key = cuda.vote_sync_intrinsic + cases = [signature(types.Tuple((types.i4, types.b1)), + types.i4, types.i4, types.b1)] + + +@register +class Cuda_match_any_sync(ConcreteTemplate): + key = cuda.match_any_sync + cases = [ + signature(types.i4, types.i4, types.i4), + signature(types.i4, types.i4, types.i8), + signature(types.i4, types.i4, types.f4), + signature(types.i4, types.i4, types.f8), + ] + + +@register +class Cuda_match_all_sync(ConcreteTemplate): + key = cuda.match_all_sync + cases = [ + signature(types.Tuple((types.i4, types.b1)), types.i4, types.i4), + signature(types.Tuple((types.i4, types.b1)), types.i4, types.i8), + signature(types.Tuple((types.i4, types.b1)), types.i4, types.f4), + signature(types.Tuple((types.i4, types.b1)), types.i4, types.f8), + ] + + +@register +class Cuda_activemask(ConcreteTemplate): + key = cuda.activemask + cases = [signature(types.uint32)] + + +@register +class Cuda_lanemask_lt(ConcreteTemplate): + key = cuda.lanemask_lt + cases = [signature(types.uint32)] + + +@register +class Cuda_popc(ConcreteTemplate): + """ + Supported types from `llvm.popc` + [here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics) + """ + key = cuda.popc + cases = [ + signature(types.int8, types.int8), + signature(types.int16, types.int16), + signature(types.int32, types.int32), + signature(types.int64, types.int64), + signature(types.uint8, types.uint8), + signature(types.uint16, types.uint16), + signature(types.uint32, types.uint32), + signature(types.uint64, types.uint64), + ] + + +@register +class Cuda_fma(ConcreteTemplate): + """ + Supported types from `llvm.fma` + [here](https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#standard-c-library-intrinics) + """ + key = cuda.fma + cases = [ + signature(types.float32, types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64, types.float64), + ] + + +@register +class Cuda_hfma(ConcreteTemplate): + key = cuda.fp16.hfma + cases = [ + signature(types.float16, types.float16, types.float16, types.float16) + ] + + +@register +class Cuda_cbrt(ConcreteTemplate): + + key = cuda.cbrt + cases = [ + signature(types.float32, types.float32), + signature(types.float64, types.float64), + ] + + +@register +class Cuda_brev(ConcreteTemplate): + key = cuda.brev + cases = [ + signature(types.uint32, types.uint32), + signature(types.uint64, types.uint64), + ] + + +@register +class Cuda_clz(ConcreteTemplate): + """ + Supported types from `llvm.ctlz` + [here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics) + """ + key = cuda.clz + cases = [ + signature(types.int8, types.int8), + signature(types.int16, types.int16), + signature(types.int32, types.int32), + signature(types.int64, types.int64), + signature(types.uint8, types.uint8), + signature(types.uint16, types.uint16), + signature(types.uint32, types.uint32), + signature(types.uint64, types.uint64), + ] + + +@register +class Cuda_ffs(ConcreteTemplate): + """ + Supported types from `llvm.cttz` + [here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics) + """ + key = cuda.ffs + cases = [ + signature(types.uint32, types.int8), + signature(types.uint32, types.int16), + signature(types.uint32, types.int32), + signature(types.uint32, types.int64), + signature(types.uint32, types.uint8), + signature(types.uint32, types.uint16), + signature(types.uint32, types.uint32), + signature(types.uint32, types.uint64), + ] + + +@register +class Cuda_selp(AbstractTemplate): + key = cuda.selp + + def generic(self, args, kws): + assert not kws + test, a, b = args + + # per docs + # http://docs.nvidia.com/cuda/parallel-thread-execution/index.html#comparison-and-selection-instructions-selp + supported_types = (types.float64, types.float32, + types.int16, types.uint16, + types.int32, types.uint32, + types.int64, types.uint64) + + if a != b or a not in supported_types: + return + + return signature(a, test, a, a) + + +def _genfp16_unary(l_key): + @register + class Cuda_fp16_unary(ConcreteTemplate): + key = l_key + cases = [signature(types.float16, types.float16)] + + return Cuda_fp16_unary + + +def _genfp16_unary_operator(l_key): + @register_global(l_key) + class Cuda_fp16_unary(AbstractTemplate): + key = l_key + + def generic(self, args, kws): + assert not kws + if len(args) == 1 and args[0] == types.float16: + return signature(types.float16, types.float16) + + return Cuda_fp16_unary + + +def _genfp16_binary(l_key): + @register + class Cuda_fp16_binary(ConcreteTemplate): + key = l_key + cases = [signature(types.float16, types.float16, types.float16)] + + return Cuda_fp16_binary + + +@register_global(float) +class Float(AbstractTemplate): + + def generic(self, args, kws): + assert not kws + + [arg] = args + + if arg == types.float16: + return signature(arg, arg) + + +def _genfp16_binary_comparison(l_key): + @register + class Cuda_fp16_cmp(ConcreteTemplate): + key = l_key + + cases = [ + signature(types.b1, types.float16, types.float16) + ] + return Cuda_fp16_cmp + +# If multiple ConcreteTemplates provide typing for a single function, then +# function resolution will pick the first compatible typing it finds even if it +# involves inserting a cast that would be considered undesirable (in this +# specific case, float16s could be cast to float32s for comparisons). +# +# To work around this, we instead use an AbstractTemplate that implements +# exactly the casting logic that we desire. The AbstractTemplate gets +# considered in preference to ConcreteTemplates during typing. +# +# This is tracked as Issue #7863 (https://github.com/numba/numba/issues/7863) - +# once this is resolved it should be possible to replace this AbstractTemplate +# with a ConcreteTemplate to simplify the logic. + + +def _fp16_binary_operator(l_key, retty): + @register_global(l_key) + class Cuda_fp16_operator(AbstractTemplate): + key = l_key + + def generic(self, args, kws): + assert not kws + + if len(args) == 2 and \ + (args[0] == types.float16 or args[1] == types.float16): + if (args[0] == types.float16): + convertible = self.context.can_convert(args[1], args[0]) + else: + convertible = self.context.can_convert(args[0], args[1]) + + # We allow three cases here: + # + # 1. fp16 to fp16 - Conversion.exact + # 2. fp16 to other types fp16 can be promoted to + # - Conversion.promote + # 3. fp16 to int8 (safe conversion) - + # - Conversion.safe + + if (convertible == Conversion.exact) or \ + (convertible == Conversion.promote) or \ + (convertible == Conversion.safe): + return signature(retty, types.float16, types.float16) + + return Cuda_fp16_operator + + +def _genfp16_comparison_operator(op): + return _fp16_binary_operator(op, types.b1) + + +def _genfp16_binary_operator(op): + return _fp16_binary_operator(op, types.float16) + + +Cuda_hadd = _genfp16_binary(cuda.fp16.hadd) +Cuda_add = _genfp16_binary_operator(operator.add) +Cuda_iadd = _genfp16_binary_operator(operator.iadd) +Cuda_hsub = _genfp16_binary(cuda.fp16.hsub) +Cuda_sub = _genfp16_binary_operator(operator.sub) +Cuda_isub = _genfp16_binary_operator(operator.isub) +Cuda_hmul = _genfp16_binary(cuda.fp16.hmul) +Cuda_mul = _genfp16_binary_operator(operator.mul) +Cuda_imul = _genfp16_binary_operator(operator.imul) +Cuda_hmax = _genfp16_binary(cuda.fp16.hmax) +Cuda_hmin = _genfp16_binary(cuda.fp16.hmin) +Cuda_hneg = _genfp16_unary(cuda.fp16.hneg) +Cuda_neg = _genfp16_unary_operator(operator.neg) +Cuda_habs = _genfp16_unary(cuda.fp16.habs) +Cuda_abs = _genfp16_unary_operator(abs) +Cuda_heq = _genfp16_binary_comparison(cuda.fp16.heq) +_genfp16_comparison_operator(operator.eq) +Cuda_hne = _genfp16_binary_comparison(cuda.fp16.hne) +_genfp16_comparison_operator(operator.ne) +Cuda_hge = _genfp16_binary_comparison(cuda.fp16.hge) +_genfp16_comparison_operator(operator.ge) +Cuda_hgt = _genfp16_binary_comparison(cuda.fp16.hgt) +_genfp16_comparison_operator(operator.gt) +Cuda_hle = _genfp16_binary_comparison(cuda.fp16.hle) +_genfp16_comparison_operator(operator.le) +Cuda_hlt = _genfp16_binary_comparison(cuda.fp16.hlt) +_genfp16_comparison_operator(operator.lt) +_genfp16_binary_operator(operator.truediv) +_genfp16_binary_operator(operator.itruediv) + + +def _resolve_wrapped_unary(fname): + decl = declare_device_function_template(f'__numba_wrapper_{fname}', + types.float16, + (types.float16,)) + return types.Function(decl) + + +def _resolve_wrapped_binary(fname): + decl = declare_device_function_template(f'__numba_wrapper_{fname}', + types.float16, + (types.float16, types.float16,)) + return types.Function(decl) + + +hsin_device = _resolve_wrapped_unary('hsin') +hcos_device = _resolve_wrapped_unary('hcos') +hlog_device = _resolve_wrapped_unary('hlog') +hlog10_device = _resolve_wrapped_unary('hlog10') +hlog2_device = _resolve_wrapped_unary('hlog2') +hexp_device = _resolve_wrapped_unary('hexp') +hexp10_device = _resolve_wrapped_unary('hexp10') +hexp2_device = _resolve_wrapped_unary('hexp2') +hsqrt_device = _resolve_wrapped_unary('hsqrt') +hrsqrt_device = _resolve_wrapped_unary('hrsqrt') +hfloor_device = _resolve_wrapped_unary('hfloor') +hceil_device = _resolve_wrapped_unary('hceil') +hrcp_device = _resolve_wrapped_unary('hrcp') +hrint_device = _resolve_wrapped_unary('hrint') +htrunc_device = _resolve_wrapped_unary('htrunc') +hdiv_device = _resolve_wrapped_binary('hdiv') + + +# generate atomic operations +def _gen(l_key, supported_types): + @register + class Cuda_atomic(AbstractTemplate): + key = l_key + + def generic(self, args, kws): + assert not kws + ary, idx, val = args + + if ary.dtype not in supported_types: + return + + if ary.ndim == 1: + return signature(ary.dtype, ary, types.intp, ary.dtype) + elif ary.ndim > 1: + return signature(ary.dtype, ary, idx, ary.dtype) + return Cuda_atomic + + +all_numba_types = (types.float64, types.float32, + types.int32, types.uint32, + types.int64, types.uint64) + +integer_numba_types = (types.int32, types.uint32, + types.int64, types.uint64) + +unsigned_int_numba_types = (types.uint32, types.uint64) + +Cuda_atomic_add = _gen(cuda.atomic.add, all_numba_types) +Cuda_atomic_sub = _gen(cuda.atomic.sub, all_numba_types) +Cuda_atomic_max = _gen(cuda.atomic.max, all_numba_types) +Cuda_atomic_min = _gen(cuda.atomic.min, all_numba_types) +Cuda_atomic_nanmax = _gen(cuda.atomic.nanmax, all_numba_types) +Cuda_atomic_nanmin = _gen(cuda.atomic.nanmin, all_numba_types) +Cuda_atomic_and = _gen(cuda.atomic.and_, integer_numba_types) +Cuda_atomic_or = _gen(cuda.atomic.or_, integer_numba_types) +Cuda_atomic_xor = _gen(cuda.atomic.xor, integer_numba_types) +Cuda_atomic_inc = _gen(cuda.atomic.inc, unsigned_int_numba_types) +Cuda_atomic_dec = _gen(cuda.atomic.dec, unsigned_int_numba_types) +Cuda_atomic_exch = _gen(cuda.atomic.exch, integer_numba_types) + + +@register +class Cuda_atomic_compare_and_swap(AbstractTemplate): + key = cuda.atomic.compare_and_swap + + def generic(self, args, kws): + assert not kws + ary, old, val = args + dty = ary.dtype + + if dty in integer_numba_types and ary.ndim == 1: + return signature(dty, ary, dty, dty) + + +@register +class Cuda_atomic_cas(AbstractTemplate): + key = cuda.atomic.cas + + def generic(self, args, kws): + assert not kws + ary, idx, old, val = args + dty = ary.dtype + + if dty not in integer_numba_types: + return + + if ary.ndim == 1: + return signature(dty, ary, types.intp, dty, dty) + elif ary.ndim > 1: + return signature(dty, ary, idx, dty, dty) + + +@register +class Cuda_nanosleep(ConcreteTemplate): + key = cuda.nanosleep + + cases = [signature(types.void, types.uint32)] + + +@register_attr +class Dim3_attrs(AttributeTemplate): + key = dim3 + + def resolve_x(self, mod): + return types.int32 + + def resolve_y(self, mod): + return types.int32 + + def resolve_z(self, mod): + return types.int32 + + +@register_attr +class CudaSharedModuleTemplate(AttributeTemplate): + key = types.Module(cuda.shared) + + def resolve_array(self, mod): + return types.Function(Cuda_shared_array) + + +@register_attr +class CudaConstModuleTemplate(AttributeTemplate): + key = types.Module(cuda.const) + + def resolve_array_like(self, mod): + return types.Function(Cuda_const_array_like) + + +@register_attr +class CudaLocalModuleTemplate(AttributeTemplate): + key = types.Module(cuda.local) + + def resolve_array(self, mod): + return types.Function(Cuda_local_array) + + +@register_attr +class CudaAtomicTemplate(AttributeTemplate): + key = types.Module(cuda.atomic) + + def resolve_add(self, mod): + return types.Function(Cuda_atomic_add) + + def resolve_sub(self, mod): + return types.Function(Cuda_atomic_sub) + + def resolve_and_(self, mod): + return types.Function(Cuda_atomic_and) + + def resolve_or_(self, mod): + return types.Function(Cuda_atomic_or) + + def resolve_xor(self, mod): + return types.Function(Cuda_atomic_xor) + + def resolve_inc(self, mod): + return types.Function(Cuda_atomic_inc) + + def resolve_dec(self, mod): + return types.Function(Cuda_atomic_dec) + + def resolve_exch(self, mod): + return types.Function(Cuda_atomic_exch) + + def resolve_max(self, mod): + return types.Function(Cuda_atomic_max) + + def resolve_min(self, mod): + return types.Function(Cuda_atomic_min) + + def resolve_nanmin(self, mod): + return types.Function(Cuda_atomic_nanmin) + + def resolve_nanmax(self, mod): + return types.Function(Cuda_atomic_nanmax) + + def resolve_compare_and_swap(self, mod): + return types.Function(Cuda_atomic_compare_and_swap) + + def resolve_cas(self, mod): + return types.Function(Cuda_atomic_cas) + + +@register_attr +class CudaFp16Template(AttributeTemplate): + key = types.Module(cuda.fp16) + + def resolve_hadd(self, mod): + return types.Function(Cuda_hadd) + + def resolve_hsub(self, mod): + return types.Function(Cuda_hsub) + + def resolve_hmul(self, mod): + return types.Function(Cuda_hmul) + + def resolve_hdiv(self, mod): + return hdiv_device + + def resolve_hneg(self, mod): + return types.Function(Cuda_hneg) + + def resolve_habs(self, mod): + return types.Function(Cuda_habs) + + def resolve_hfma(self, mod): + return types.Function(Cuda_hfma) + + def resolve_hsin(self, mod): + return hsin_device + + def resolve_hcos(self, mod): + return hcos_device + + def resolve_hlog(self, mod): + return hlog_device + + def resolve_hlog10(self, mod): + return hlog10_device + + def resolve_hlog2(self, mod): + return hlog2_device + + def resolve_hexp(self, mod): + return hexp_device + + def resolve_hexp10(self, mod): + return hexp10_device + + def resolve_hexp2(self, mod): + return hexp2_device + + def resolve_hfloor(self, mod): + return hfloor_device + + def resolve_hceil(self, mod): + return hceil_device + + def resolve_hsqrt(self, mod): + return hsqrt_device + + def resolve_hrsqrt(self, mod): + return hrsqrt_device + + def resolve_hrcp(self, mod): + return hrcp_device + + def resolve_hrint(self, mod): + return hrint_device + + def resolve_htrunc(self, mod): + return htrunc_device + + def resolve_heq(self, mod): + return types.Function(Cuda_heq) + + def resolve_hne(self, mod): + return types.Function(Cuda_hne) + + def resolve_hge(self, mod): + return types.Function(Cuda_hge) + + def resolve_hgt(self, mod): + return types.Function(Cuda_hgt) + + def resolve_hle(self, mod): + return types.Function(Cuda_hle) + + def resolve_hlt(self, mod): + return types.Function(Cuda_hlt) + + def resolve_hmax(self, mod): + return types.Function(Cuda_hmax) + + def resolve_hmin(self, mod): + return types.Function(Cuda_hmin) + + +@register_attr +class CudaModuleTemplate(AttributeTemplate): + key = types.Module(cuda) + + def resolve_cg(self, mod): + return types.Module(cuda.cg) + + def resolve_threadIdx(self, mod): + return dim3 + + def resolve_blockIdx(self, mod): + return dim3 + + def resolve_blockDim(self, mod): + return dim3 + + def resolve_gridDim(self, mod): + return dim3 + + def resolve_laneid(self, mod): + return types.int32 + + def resolve_shared(self, mod): + return types.Module(cuda.shared) + + def resolve_popc(self, mod): + return types.Function(Cuda_popc) + + def resolve_brev(self, mod): + return types.Function(Cuda_brev) + + def resolve_clz(self, mod): + return types.Function(Cuda_clz) + + def resolve_ffs(self, mod): + return types.Function(Cuda_ffs) + + def resolve_fma(self, mod): + return types.Function(Cuda_fma) + + def resolve_cbrt(self, mod): + return types.Function(Cuda_cbrt) + + def resolve_threadfence(self, mod): + return types.Function(Cuda_threadfence_device) + + def resolve_threadfence_block(self, mod): + return types.Function(Cuda_threadfence_block) + + def resolve_threadfence_system(self, mod): + return types.Function(Cuda_threadfence_system) + + def resolve_syncwarp(self, mod): + return types.Function(Cuda_syncwarp) + + def resolve_shfl_sync_intrinsic(self, mod): + return types.Function(Cuda_shfl_sync_intrinsic) + + def resolve_vote_sync_intrinsic(self, mod): + return types.Function(Cuda_vote_sync_intrinsic) + + def resolve_match_any_sync(self, mod): + return types.Function(Cuda_match_any_sync) + + def resolve_match_all_sync(self, mod): + return types.Function(Cuda_match_all_sync) + + def resolve_activemask(self, mod): + return types.Function(Cuda_activemask) + + def resolve_lanemask_lt(self, mod): + return types.Function(Cuda_lanemask_lt) + + def resolve_selp(self, mod): + return types.Function(Cuda_selp) + + def resolve_nanosleep(self, mod): + return types.Function(Cuda_nanosleep) + + def resolve_atomic(self, mod): + return types.Module(cuda.atomic) + + def resolve_fp16(self, mod): + return types.Module(cuda.fp16) + + def resolve_const(self, mod): + return types.Module(cuda.const) + + def resolve_local(self, mod): + return types.Module(cuda.local) + + +register_global(cuda, types.Module(cuda)) + + +# NumPy + +for func in trigonometric_functions: + register_numpy_ufunc(func, register_global) + +for func in comparison_functions: + register_numpy_ufunc(func, register_global) + +for func in bit_twiddling_functions: + register_numpy_ufunc(func, register_global) + +for func in math_operations: + if func in ('log', 'log2', 'log10'): + register_numpy_ufunc(func, register_global) diff --git a/lib/python3.10/site-packages/numba/cuda/cudaimpl.py b/lib/python3.10/site-packages/numba/cuda/cudaimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..5de024d1c9a6da9b4cf73f76d2ad7228045bd42b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudaimpl.py @@ -0,0 +1,1055 @@ +from functools import reduce +import operator +import math + +from llvmlite import ir +import llvmlite.binding as ll + +from numba.core.imputils import Registry, lower_cast +from numba.core.typing.npydecl import parse_dtype +from numba.core.datamodel import models +from numba.core import types, cgutils +from numba.np import ufunc_db +from numba.np.npyimpl import register_ufuncs +from .cudadrv import nvvm +from numba import cuda +from numba.cuda import nvvmutils, stubs, errors +from numba.cuda.types import dim3, CUDADispatcher + +registry = Registry() +lower = registry.lower +lower_attr = registry.lower_getattr +lower_constant = registry.lower_constant + + +def initialize_dim3(builder, prefix): + x = nvvmutils.call_sreg(builder, "%s.x" % prefix) + y = nvvmutils.call_sreg(builder, "%s.y" % prefix) + z = nvvmutils.call_sreg(builder, "%s.z" % prefix) + return cgutils.pack_struct(builder, (x, y, z)) + + +@lower_attr(types.Module(cuda), 'threadIdx') +def cuda_threadIdx(context, builder, sig, args): + return initialize_dim3(builder, 'tid') + + +@lower_attr(types.Module(cuda), 'blockDim') +def cuda_blockDim(context, builder, sig, args): + return initialize_dim3(builder, 'ntid') + + +@lower_attr(types.Module(cuda), 'blockIdx') +def cuda_blockIdx(context, builder, sig, args): + return initialize_dim3(builder, 'ctaid') + + +@lower_attr(types.Module(cuda), 'gridDim') +def cuda_gridDim(context, builder, sig, args): + return initialize_dim3(builder, 'nctaid') + + +@lower_attr(types.Module(cuda), 'laneid') +def cuda_laneid(context, builder, sig, args): + return nvvmutils.call_sreg(builder, 'laneid') + + +@lower_attr(dim3, 'x') +def dim3_x(context, builder, sig, args): + return builder.extract_value(args, 0) + + +@lower_attr(dim3, 'y') +def dim3_y(context, builder, sig, args): + return builder.extract_value(args, 1) + + +@lower_attr(dim3, 'z') +def dim3_z(context, builder, sig, args): + return builder.extract_value(args, 2) + + +# ----------------------------------------------------------------------------- + +@lower(cuda.const.array_like, types.Array) +def cuda_const_array_like(context, builder, sig, args): + # This is a no-op because CUDATargetContext.make_constant_array already + # created the constant array. + return args[0] + + +_unique_smem_id = 0 + + +def _get_unique_smem_id(name): + """Due to bug with NVVM invalid internalizing of shared memory in the + PTX output. We can't mark shared memory to be internal. We have to + ensure unique name is generated for shared memory symbol. + """ + global _unique_smem_id + _unique_smem_id += 1 + return "{0}_{1}".format(name, _unique_smem_id) + + +@lower(cuda.shared.array, types.IntegerLiteral, types.Any) +def cuda_shared_array_integer(context, builder, sig, args): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + return _generic_array(context, builder, shape=(length,), dtype=dtype, + symbol_name=_get_unique_smem_id('_cudapy_smem'), + addrspace=nvvm.ADDRSPACE_SHARED, + can_dynsized=True) + + +@lower(cuda.shared.array, types.Tuple, types.Any) +@lower(cuda.shared.array, types.UniTuple, types.Any) +def cuda_shared_array_tuple(context, builder, sig, args): + shape = [ s.literal_value for s in sig.args[0] ] + dtype = parse_dtype(sig.args[1]) + return _generic_array(context, builder, shape=shape, dtype=dtype, + symbol_name=_get_unique_smem_id('_cudapy_smem'), + addrspace=nvvm.ADDRSPACE_SHARED, + can_dynsized=True) + + +@lower(cuda.local.array, types.IntegerLiteral, types.Any) +def cuda_local_array_integer(context, builder, sig, args): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + return _generic_array(context, builder, shape=(length,), dtype=dtype, + symbol_name='_cudapy_lmem', + addrspace=nvvm.ADDRSPACE_LOCAL, + can_dynsized=False) + + +@lower(cuda.local.array, types.Tuple, types.Any) +@lower(cuda.local.array, types.UniTuple, types.Any) +def ptx_lmem_alloc_array(context, builder, sig, args): + shape = [ s.literal_value for s in sig.args[0] ] + dtype = parse_dtype(sig.args[1]) + return _generic_array(context, builder, shape=shape, dtype=dtype, + symbol_name='_cudapy_lmem', + addrspace=nvvm.ADDRSPACE_LOCAL, + can_dynsized=False) + + +@lower(stubs.threadfence_block) +def ptx_threadfence_block(context, builder, sig, args): + assert not args + fname = 'llvm.nvvm.membar.cta' + lmod = builder.module + fnty = ir.FunctionType(ir.VoidType(), ()) + sync = cgutils.get_or_insert_function(lmod, fnty, fname) + builder.call(sync, ()) + return context.get_dummy_value() + + +@lower(stubs.threadfence_system) +def ptx_threadfence_system(context, builder, sig, args): + assert not args + fname = 'llvm.nvvm.membar.sys' + lmod = builder.module + fnty = ir.FunctionType(ir.VoidType(), ()) + sync = cgutils.get_or_insert_function(lmod, fnty, fname) + builder.call(sync, ()) + return context.get_dummy_value() + + +@lower(stubs.threadfence) +def ptx_threadfence_device(context, builder, sig, args): + assert not args + fname = 'llvm.nvvm.membar.gl' + lmod = builder.module + fnty = ir.FunctionType(ir.VoidType(), ()) + sync = cgutils.get_or_insert_function(lmod, fnty, fname) + builder.call(sync, ()) + return context.get_dummy_value() + + +@lower(stubs.syncwarp) +def ptx_syncwarp(context, builder, sig, args): + mask = context.get_constant(types.int32, 0xFFFFFFFF) + mask_sig = types.none(types.int32) + return ptx_syncwarp_mask(context, builder, mask_sig, [mask]) + + +@lower(stubs.syncwarp, types.i4) +def ptx_syncwarp_mask(context, builder, sig, args): + fname = 'llvm.nvvm.bar.warp.sync' + lmod = builder.module + fnty = ir.FunctionType(ir.VoidType(), (ir.IntType(32),)) + sync = cgutils.get_or_insert_function(lmod, fnty, fname) + builder.call(sync, args) + return context.get_dummy_value() + + +@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i4, types.i4, + types.i4) +@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.i8, types.i4, + types.i4) +@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f4, types.i4, + types.i4) +@lower(stubs.shfl_sync_intrinsic, types.i4, types.i4, types.f8, types.i4, + types.i4) +def ptx_shfl_sync_i32(context, builder, sig, args): + """ + The NVVM intrinsic for shfl only supports i32, but the cuda intrinsic + function supports both 32 and 64 bit ints and floats, so for feature parity, + i64, f32, and f64 are implemented. Floats by way of bitcasting the float to + an int, then shuffling, then bitcasting back. And 64-bit values by packing + them into 2 32bit values, shuffling thoose, and then packing back together. + """ + mask, mode, value, index, clamp = args + value_type = sig.args[2] + if value_type in types.real_domain: + value = builder.bitcast(value, ir.IntType(value_type.bitwidth)) + fname = 'llvm.nvvm.shfl.sync.i32' + lmod = builder.module + fnty = ir.FunctionType( + ir.LiteralStructType((ir.IntType(32), ir.IntType(1))), + (ir.IntType(32), ir.IntType(32), ir.IntType(32), + ir.IntType(32), ir.IntType(32)) + ) + func = cgutils.get_or_insert_function(lmod, fnty, fname) + if value_type.bitwidth == 32: + ret = builder.call(func, (mask, mode, value, index, clamp)) + if value_type == types.float32: + rv = builder.extract_value(ret, 0) + pred = builder.extract_value(ret, 1) + fv = builder.bitcast(rv, ir.FloatType()) + ret = cgutils.make_anonymous_struct(builder, (fv, pred)) + else: + value1 = builder.trunc(value, ir.IntType(32)) + value_lshr = builder.lshr(value, context.get_constant(types.i8, 32)) + value2 = builder.trunc(value_lshr, ir.IntType(32)) + ret1 = builder.call(func, (mask, mode, value1, index, clamp)) + ret2 = builder.call(func, (mask, mode, value2, index, clamp)) + rv1 = builder.extract_value(ret1, 0) + rv2 = builder.extract_value(ret2, 0) + pred = builder.extract_value(ret1, 1) + rv1_64 = builder.zext(rv1, ir.IntType(64)) + rv2_64 = builder.zext(rv2, ir.IntType(64)) + rv_shl = builder.shl(rv2_64, context.get_constant(types.i8, 32)) + rv = builder.or_(rv_shl, rv1_64) + if value_type == types.float64: + rv = builder.bitcast(rv, ir.DoubleType()) + ret = cgutils.make_anonymous_struct(builder, (rv, pred)) + return ret + + +@lower(stubs.vote_sync_intrinsic, types.i4, types.i4, types.boolean) +def ptx_vote_sync(context, builder, sig, args): + fname = 'llvm.nvvm.vote.sync' + lmod = builder.module + fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32), + ir.IntType(1))), + (ir.IntType(32), ir.IntType(32), ir.IntType(1))) + func = cgutils.get_or_insert_function(lmod, fnty, fname) + return builder.call(func, args) + + +@lower(stubs.match_any_sync, types.i4, types.i4) +@lower(stubs.match_any_sync, types.i4, types.i8) +@lower(stubs.match_any_sync, types.i4, types.f4) +@lower(stubs.match_any_sync, types.i4, types.f8) +def ptx_match_any_sync(context, builder, sig, args): + mask, value = args + width = sig.args[1].bitwidth + if sig.args[1] in types.real_domain: + value = builder.bitcast(value, ir.IntType(width)) + fname = 'llvm.nvvm.match.any.sync.i{}'.format(width) + lmod = builder.module + fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32), ir.IntType(width))) + func = cgutils.get_or_insert_function(lmod, fnty, fname) + return builder.call(func, (mask, value)) + + +@lower(stubs.match_all_sync, types.i4, types.i4) +@lower(stubs.match_all_sync, types.i4, types.i8) +@lower(stubs.match_all_sync, types.i4, types.f4) +@lower(stubs.match_all_sync, types.i4, types.f8) +def ptx_match_all_sync(context, builder, sig, args): + mask, value = args + width = sig.args[1].bitwidth + if sig.args[1] in types.real_domain: + value = builder.bitcast(value, ir.IntType(width)) + fname = 'llvm.nvvm.match.all.sync.i{}'.format(width) + lmod = builder.module + fnty = ir.FunctionType(ir.LiteralStructType((ir.IntType(32), + ir.IntType(1))), + (ir.IntType(32), ir.IntType(width))) + func = cgutils.get_or_insert_function(lmod, fnty, fname) + return builder.call(func, (mask, value)) + + +@lower(stubs.activemask) +def ptx_activemask(context, builder, sig, args): + activemask = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []), + "activemask.b32 $0;", '=r', side_effect=True) + return builder.call(activemask, []) + + +@lower(stubs.lanemask_lt) +def ptx_lanemask_lt(context, builder, sig, args): + activemask = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []), + "mov.u32 $0, %lanemask_lt;", '=r', + side_effect=True) + return builder.call(activemask, []) + + +@lower(stubs.popc, types.Any) +def ptx_popc(context, builder, sig, args): + return builder.ctpop(args[0]) + + +@lower(stubs.fma, types.Any, types.Any, types.Any) +def ptx_fma(context, builder, sig, args): + return builder.fma(*args) + + +def float16_float_ty_constraint(bitwidth): + typemap = {32: ('f32', 'f'), 64: ('f64', 'd')} + + try: + return typemap[bitwidth] + except KeyError: + msg = f"Conversion between float16 and float{bitwidth} unsupported" + raise errors.CudaLoweringError(msg) + + +@lower_cast(types.float16, types.Float) +def float16_to_float_cast(context, builder, fromty, toty, val): + if fromty.bitwidth == toty.bitwidth: + return val + + ty, constraint = float16_float_ty_constraint(toty.bitwidth) + + fnty = ir.FunctionType(context.get_value_type(toty), [ir.IntType(16)]) + asm = ir.InlineAsm(fnty, f"cvt.{ty}.f16 $0, $1;", f"={constraint},h") + return builder.call(asm, [val]) + + +@lower_cast(types.Float, types.float16) +def float_to_float16_cast(context, builder, fromty, toty, val): + if fromty.bitwidth == toty.bitwidth: + return val + + ty, constraint = float16_float_ty_constraint(fromty.bitwidth) + + fnty = ir.FunctionType(ir.IntType(16), [context.get_value_type(fromty)]) + asm = ir.InlineAsm(fnty, f"cvt.rn.f16.{ty} $0, $1;", f"=h,{constraint}") + return builder.call(asm, [val]) + + +def float16_int_constraint(bitwidth): + typemap = { 8: 'c', 16: 'h', 32: 'r', 64: 'l' } + + try: + return typemap[bitwidth] + except KeyError: + msg = f"Conversion between float16 and int{bitwidth} unsupported" + raise errors.CudaLoweringError(msg) + + +@lower_cast(types.float16, types.Integer) +def float16_to_integer_cast(context, builder, fromty, toty, val): + bitwidth = toty.bitwidth + constraint = float16_int_constraint(bitwidth) + signedness = 's' if toty.signed else 'u' + + fnty = ir.FunctionType(context.get_value_type(toty), [ir.IntType(16)]) + asm = ir.InlineAsm(fnty, + f"cvt.rni.{signedness}{bitwidth}.f16 $0, $1;", + f"={constraint},h") + return builder.call(asm, [val]) + + +@lower_cast(types.Integer, types.float16) +@lower_cast(types.IntegerLiteral, types.float16) +def integer_to_float16_cast(context, builder, fromty, toty, val): + bitwidth = fromty.bitwidth + constraint = float16_int_constraint(bitwidth) + signedness = 's' if fromty.signed else 'u' + + fnty = ir.FunctionType(ir.IntType(16), + [context.get_value_type(fromty)]) + asm = ir.InlineAsm(fnty, + f"cvt.rn.f16.{signedness}{bitwidth} $0, $1;", + f"=h,{constraint}") + return builder.call(asm, [val]) + + +def lower_fp16_binary(fn, op): + @lower(fn, types.float16, types.float16) + def ptx_fp16_binary(context, builder, sig, args): + fnty = ir.FunctionType(ir.IntType(16), + [ir.IntType(16), ir.IntType(16)]) + asm = ir.InlineAsm(fnty, f'{op}.f16 $0,$1,$2;', '=h,h,h') + return builder.call(asm, args) + + +lower_fp16_binary(stubs.fp16.hadd, 'add') +lower_fp16_binary(operator.add, 'add') +lower_fp16_binary(operator.iadd, 'add') +lower_fp16_binary(stubs.fp16.hsub, 'sub') +lower_fp16_binary(operator.sub, 'sub') +lower_fp16_binary(operator.isub, 'sub') +lower_fp16_binary(stubs.fp16.hmul, 'mul') +lower_fp16_binary(operator.mul, 'mul') +lower_fp16_binary(operator.imul, 'mul') + + +@lower(stubs.fp16.hneg, types.float16) +def ptx_fp16_hneg(context, builder, sig, args): + fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16)]) + asm = ir.InlineAsm(fnty, 'neg.f16 $0, $1;', '=h,h') + return builder.call(asm, args) + + +@lower(operator.neg, types.float16) +def operator_hneg(context, builder, sig, args): + return ptx_fp16_hneg(context, builder, sig, args) + + +@lower(stubs.fp16.habs, types.float16) +def ptx_fp16_habs(context, builder, sig, args): + fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16)]) + asm = ir.InlineAsm(fnty, 'abs.f16 $0, $1;', '=h,h') + return builder.call(asm, args) + + +@lower(abs, types.float16) +def operator_habs(context, builder, sig, args): + return ptx_fp16_habs(context, builder, sig, args) + + +@lower(stubs.fp16.hfma, types.float16, types.float16, types.float16) +def ptx_hfma(context, builder, sig, args): + argtys = [ir.IntType(16), ir.IntType(16), ir.IntType(16)] + fnty = ir.FunctionType(ir.IntType(16), argtys) + asm = ir.InlineAsm(fnty, "fma.rn.f16 $0,$1,$2,$3;", "=h,h,h,h") + return builder.call(asm, args) + + +@lower(operator.truediv, types.float16, types.float16) +@lower(operator.itruediv, types.float16, types.float16) +def fp16_div_impl(context, builder, sig, args): + def fp16_div(x, y): + return cuda.fp16.hdiv(x, y) + + return context.compile_internal(builder, fp16_div, sig, args) + + +_fp16_cmp = """{{ + .reg .pred __$$f16_cmp_tmp; + setp.{op}.f16 __$$f16_cmp_tmp, $1, $2; + selp.u16 $0, 1, 0, __$$f16_cmp_tmp; + }}""" + + +def _gen_fp16_cmp(op): + def ptx_fp16_comparison(context, builder, sig, args): + fnty = ir.FunctionType(ir.IntType(16), [ir.IntType(16), ir.IntType(16)]) + asm = ir.InlineAsm(fnty, _fp16_cmp.format(op=op), '=h,h,h') + result = builder.call(asm, args) + + zero = context.get_constant(types.int16, 0) + int_result = builder.bitcast(result, ir.IntType(16)) + return builder.icmp_unsigned("!=", int_result, zero) + return ptx_fp16_comparison + + +lower(stubs.fp16.heq, types.float16, types.float16)(_gen_fp16_cmp('eq')) +lower(operator.eq, types.float16, types.float16)(_gen_fp16_cmp('eq')) +lower(stubs.fp16.hne, types.float16, types.float16)(_gen_fp16_cmp('ne')) +lower(operator.ne, types.float16, types.float16)(_gen_fp16_cmp('ne')) +lower(stubs.fp16.hge, types.float16, types.float16)(_gen_fp16_cmp('ge')) +lower(operator.ge, types.float16, types.float16)(_gen_fp16_cmp('ge')) +lower(stubs.fp16.hgt, types.float16, types.float16)(_gen_fp16_cmp('gt')) +lower(operator.gt, types.float16, types.float16)(_gen_fp16_cmp('gt')) +lower(stubs.fp16.hle, types.float16, types.float16)(_gen_fp16_cmp('le')) +lower(operator.le, types.float16, types.float16)(_gen_fp16_cmp('le')) +lower(stubs.fp16.hlt, types.float16, types.float16)(_gen_fp16_cmp('lt')) +lower(operator.lt, types.float16, types.float16)(_gen_fp16_cmp('lt')) + + +def lower_fp16_minmax(fn, fname, op): + @lower(fn, types.float16, types.float16) + def ptx_fp16_minmax(context, builder, sig, args): + choice = _gen_fp16_cmp(op)(context, builder, sig, args) + return builder.select(choice, args[0], args[1]) + + +lower_fp16_minmax(stubs.fp16.hmax, 'max', 'gt') +lower_fp16_minmax(stubs.fp16.hmin, 'min', 'lt') + +# See: +# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrt.html#__nv_cbrt +# https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrtf.html#__nv_cbrtf + + +cbrt_funcs = { + types.float32: '__nv_cbrtf', + types.float64: '__nv_cbrt', +} + + +@lower(stubs.cbrt, types.float32) +@lower(stubs.cbrt, types.float64) +def ptx_cbrt(context, builder, sig, args): + ty = sig.return_type + fname = cbrt_funcs[ty] + fty = context.get_value_type(ty) + lmod = builder.module + fnty = ir.FunctionType(fty, [fty]) + fn = cgutils.get_or_insert_function(lmod, fnty, fname) + return builder.call(fn, args) + + +@lower(stubs.brev, types.u4) +def ptx_brev_u4(context, builder, sig, args): + # FIXME the llvm.bitreverse.i32 intrinsic isn't supported by nvcc + # return builder.bitreverse(args[0]) + + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType(ir.IntType(32), (ir.IntType(32),)), + '__nv_brev') + return builder.call(fn, args) + + +@lower(stubs.brev, types.u8) +def ptx_brev_u8(context, builder, sig, args): + # FIXME the llvm.bitreverse.i64 intrinsic isn't supported by nvcc + # return builder.bitreverse(args[0]) + + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType(ir.IntType(64), (ir.IntType(64),)), + '__nv_brevll') + return builder.call(fn, args) + + +@lower(stubs.clz, types.Any) +def ptx_clz(context, builder, sig, args): + return builder.ctlz( + args[0], + context.get_constant(types.boolean, 0)) + + +@lower(stubs.ffs, types.i4) +@lower(stubs.ffs, types.u4) +def ptx_ffs_32(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType(ir.IntType(32), (ir.IntType(32),)), + '__nv_ffs') + return builder.call(fn, args) + + +@lower(stubs.ffs, types.i8) +@lower(stubs.ffs, types.u8) +def ptx_ffs_64(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType(ir.IntType(32), (ir.IntType(64),)), + '__nv_ffsll') + return builder.call(fn, args) + + +@lower(stubs.selp, types.Any, types.Any, types.Any) +def ptx_selp(context, builder, sig, args): + test, a, b = args + return builder.select(test, a, b) + + +@lower(max, types.f4, types.f4) +def ptx_max_f4(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType( + ir.FloatType(), + (ir.FloatType(), ir.FloatType())), + '__nv_fmaxf') + return builder.call(fn, args) + + +@lower(max, types.f8, types.f4) +@lower(max, types.f4, types.f8) +@lower(max, types.f8, types.f8) +def ptx_max_f8(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType( + ir.DoubleType(), + (ir.DoubleType(), ir.DoubleType())), + '__nv_fmax') + + return builder.call(fn, [ + context.cast(builder, args[0], sig.args[0], types.double), + context.cast(builder, args[1], sig.args[1], types.double), + ]) + + +@lower(min, types.f4, types.f4) +def ptx_min_f4(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType( + ir.FloatType(), + (ir.FloatType(), ir.FloatType())), + '__nv_fminf') + return builder.call(fn, args) + + +@lower(min, types.f8, types.f4) +@lower(min, types.f4, types.f8) +@lower(min, types.f8, types.f8) +def ptx_min_f8(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType( + ir.DoubleType(), + (ir.DoubleType(), ir.DoubleType())), + '__nv_fmin') + + return builder.call(fn, [ + context.cast(builder, args[0], sig.args[0], types.double), + context.cast(builder, args[1], sig.args[1], types.double), + ]) + + +@lower(round, types.f4) +@lower(round, types.f8) +def ptx_round(context, builder, sig, args): + fn = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType( + ir.IntType(64), + (ir.DoubleType(),)), + '__nv_llrint') + return builder.call(fn, [ + context.cast(builder, args[0], sig.args[0], types.double), + ]) + + +# This rounding implementation follows the algorithm used in the "fallback +# version" of double_round in CPython. +# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/floatobject.c#L964-L1007 + +@lower(round, types.f4, types.Integer) +@lower(round, types.f8, types.Integer) +def round_to_impl(context, builder, sig, args): + def round_ndigits(x, ndigits): + if math.isinf(x) or math.isnan(x): + return x + + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow. + pow1 = 10.0 ** (ndigits - 22) + pow2 = 1e22 + else: + pow1 = 10.0 ** ndigits + pow2 = 1.0 + y = (x * pow1) * pow2 + if math.isinf(y): + return x + + else: + pow1 = 10.0 ** (-ndigits) + y = x / pow1 + + z = round(y) + if (math.fabs(y - z) == 0.5): + # halfway between two integers; use round-half-even + z = 2.0 * round(y / 2.0) + + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + + return z + + return context.compile_internal(builder, round_ndigits, sig, args, ) + + +def gen_deg_rad(const): + def impl(context, builder, sig, args): + argty, = sig.args + factor = context.get_constant(argty, const) + return builder.fmul(factor, args[0]) + return impl + + +_deg2rad = math.pi / 180. +_rad2deg = 180. / math.pi +lower(math.radians, types.f4)(gen_deg_rad(_deg2rad)) +lower(math.radians, types.f8)(gen_deg_rad(_deg2rad)) +lower(math.degrees, types.f4)(gen_deg_rad(_rad2deg)) +lower(math.degrees, types.f8)(gen_deg_rad(_rad2deg)) + + +def _normalize_indices(context, builder, indty, inds, aryty, valty): + """ + Convert integer indices into tuple of intp + """ + if indty in types.integer_domain: + indty = types.UniTuple(dtype=indty, count=1) + indices = [inds] + else: + indices = cgutils.unpack_tuple(builder, inds, count=len(indty)) + indices = [context.cast(builder, i, t, types.intp) + for t, i in zip(indty, indices)] + + dtype = aryty.dtype + if dtype != valty: + raise TypeError("expect %s but got %s" % (dtype, valty)) + + if aryty.ndim != len(indty): + raise TypeError("indexing %d-D array with %d-D index" % + (aryty.ndim, len(indty))) + + return indty, indices + + +def _atomic_dispatcher(dispatch_fn): + def imp(context, builder, sig, args): + # The common argument handling code + aryty, indty, valty = sig.args + ary, inds, val = args + dtype = aryty.dtype + + indty, indices = _normalize_indices(context, builder, indty, inds, + aryty, valty) + + lary = context.make_array(aryty)(context, builder, ary) + ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices, + wraparound=True) + # dispatcher to implementation base on dtype + return dispatch_fn(context, builder, dtype, ptr, val) + return imp + + +@lower(stubs.atomic.add, types.Array, types.intp, types.Any) +@lower(stubs.atomic.add, types.Array, types.UniTuple, types.Any) +@lower(stubs.atomic.add, types.Array, types.Tuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_add_tuple(context, builder, dtype, ptr, val): + if dtype == types.float32: + lmod = builder.module + return builder.call(nvvmutils.declare_atomic_add_float32(lmod), + (ptr, val)) + elif dtype == types.float64: + lmod = builder.module + return builder.call(nvvmutils.declare_atomic_add_float64(lmod), + (ptr, val)) + else: + return builder.atomic_rmw('add', ptr, val, 'monotonic') + + +@lower(stubs.atomic.sub, types.Array, types.intp, types.Any) +@lower(stubs.atomic.sub, types.Array, types.UniTuple, types.Any) +@lower(stubs.atomic.sub, types.Array, types.Tuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_sub(context, builder, dtype, ptr, val): + if dtype == types.float32: + lmod = builder.module + return builder.call(nvvmutils.declare_atomic_sub_float32(lmod), + (ptr, val)) + elif dtype == types.float64: + lmod = builder.module + return builder.call(nvvmutils.declare_atomic_sub_float64(lmod), + (ptr, val)) + else: + return builder.atomic_rmw('sub', ptr, val, 'monotonic') + + +@lower(stubs.atomic.inc, types.Array, types.intp, types.Any) +@lower(stubs.atomic.inc, types.Array, types.UniTuple, types.Any) +@lower(stubs.atomic.inc, types.Array, types.Tuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_inc(context, builder, dtype, ptr, val): + if dtype in cuda.cudadecl.unsigned_int_numba_types: + bw = dtype.bitwidth + lmod = builder.module + fn = getattr(nvvmutils, f'declare_atomic_inc_int{bw}') + return builder.call(fn(lmod), (ptr, val)) + else: + raise TypeError(f'Unimplemented atomic inc with {dtype} array') + + +@lower(stubs.atomic.dec, types.Array, types.intp, types.Any) +@lower(stubs.atomic.dec, types.Array, types.UniTuple, types.Any) +@lower(stubs.atomic.dec, types.Array, types.Tuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_dec(context, builder, dtype, ptr, val): + if dtype in cuda.cudadecl.unsigned_int_numba_types: + bw = dtype.bitwidth + lmod = builder.module + fn = getattr(nvvmutils, f'declare_atomic_dec_int{bw}') + return builder.call(fn(lmod), (ptr, val)) + else: + raise TypeError(f'Unimplemented atomic dec with {dtype} array') + + +def ptx_atomic_bitwise(stub, op): + @_atomic_dispatcher + def impl_ptx_atomic(context, builder, dtype, ptr, val): + if dtype in (cuda.cudadecl.integer_numba_types): + return builder.atomic_rmw(op, ptr, val, 'monotonic') + else: + raise TypeError(f'Unimplemented atomic {op} with {dtype} array') + + for ty in (types.intp, types.UniTuple, types.Tuple): + lower(stub, types.Array, ty, types.Any)(impl_ptx_atomic) + + +ptx_atomic_bitwise(stubs.atomic.and_, 'and') +ptx_atomic_bitwise(stubs.atomic.or_, 'or') +ptx_atomic_bitwise(stubs.atomic.xor, 'xor') + + +@lower(stubs.atomic.exch, types.Array, types.intp, types.Any) +@lower(stubs.atomic.exch, types.Array, types.UniTuple, types.Any) +@lower(stubs.atomic.exch, types.Array, types.Tuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_exch(context, builder, dtype, ptr, val): + if dtype in (cuda.cudadecl.integer_numba_types): + return builder.atomic_rmw('xchg', ptr, val, 'monotonic') + else: + raise TypeError(f'Unimplemented atomic exch with {dtype} array') + + +@lower(stubs.atomic.max, types.Array, types.intp, types.Any) +@lower(stubs.atomic.max, types.Array, types.Tuple, types.Any) +@lower(stubs.atomic.max, types.Array, types.UniTuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_max(context, builder, dtype, ptr, val): + lmod = builder.module + if dtype == types.float64: + return builder.call(nvvmutils.declare_atomic_max_float64(lmod), + (ptr, val)) + elif dtype == types.float32: + return builder.call(nvvmutils.declare_atomic_max_float32(lmod), + (ptr, val)) + elif dtype in (types.int32, types.int64): + return builder.atomic_rmw('max', ptr, val, ordering='monotonic') + elif dtype in (types.uint32, types.uint64): + return builder.atomic_rmw('umax', ptr, val, ordering='monotonic') + else: + raise TypeError('Unimplemented atomic max with %s array' % dtype) + + +@lower(stubs.atomic.min, types.Array, types.intp, types.Any) +@lower(stubs.atomic.min, types.Array, types.Tuple, types.Any) +@lower(stubs.atomic.min, types.Array, types.UniTuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_min(context, builder, dtype, ptr, val): + lmod = builder.module + if dtype == types.float64: + return builder.call(nvvmutils.declare_atomic_min_float64(lmod), + (ptr, val)) + elif dtype == types.float32: + return builder.call(nvvmutils.declare_atomic_min_float32(lmod), + (ptr, val)) + elif dtype in (types.int32, types.int64): + return builder.atomic_rmw('min', ptr, val, ordering='monotonic') + elif dtype in (types.uint32, types.uint64): + return builder.atomic_rmw('umin', ptr, val, ordering='monotonic') + else: + raise TypeError('Unimplemented atomic min with %s array' % dtype) + + +@lower(stubs.atomic.nanmax, types.Array, types.intp, types.Any) +@lower(stubs.atomic.nanmax, types.Array, types.Tuple, types.Any) +@lower(stubs.atomic.nanmax, types.Array, types.UniTuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_nanmax(context, builder, dtype, ptr, val): + lmod = builder.module + if dtype == types.float64: + return builder.call(nvvmutils.declare_atomic_nanmax_float64(lmod), + (ptr, val)) + elif dtype == types.float32: + return builder.call(nvvmutils.declare_atomic_nanmax_float32(lmod), + (ptr, val)) + elif dtype in (types.int32, types.int64): + return builder.atomic_rmw('max', ptr, val, ordering='monotonic') + elif dtype in (types.uint32, types.uint64): + return builder.atomic_rmw('umax', ptr, val, ordering='monotonic') + else: + raise TypeError('Unimplemented atomic max with %s array' % dtype) + + +@lower(stubs.atomic.nanmin, types.Array, types.intp, types.Any) +@lower(stubs.atomic.nanmin, types.Array, types.Tuple, types.Any) +@lower(stubs.atomic.nanmin, types.Array, types.UniTuple, types.Any) +@_atomic_dispatcher +def ptx_atomic_nanmin(context, builder, dtype, ptr, val): + lmod = builder.module + if dtype == types.float64: + return builder.call(nvvmutils.declare_atomic_nanmin_float64(lmod), + (ptr, val)) + elif dtype == types.float32: + return builder.call(nvvmutils.declare_atomic_nanmin_float32(lmod), + (ptr, val)) + elif dtype in (types.int32, types.int64): + return builder.atomic_rmw('min', ptr, val, ordering='monotonic') + elif dtype in (types.uint32, types.uint64): + return builder.atomic_rmw('umin', ptr, val, ordering='monotonic') + else: + raise TypeError('Unimplemented atomic min with %s array' % dtype) + + +@lower(stubs.atomic.compare_and_swap, types.Array, types.Any, types.Any) +def ptx_atomic_compare_and_swap(context, builder, sig, args): + sig = sig.return_type(sig.args[0], types.intp, sig.args[1], sig.args[2]) + args = (args[0], context.get_constant(types.intp, 0), args[1], args[2]) + return ptx_atomic_cas(context, builder, sig, args) + + +@lower(stubs.atomic.cas, types.Array, types.intp, types.Any, types.Any) +@lower(stubs.atomic.cas, types.Array, types.Tuple, types.Any, types.Any) +@lower(stubs.atomic.cas, types.Array, types.UniTuple, types.Any, types.Any) +def ptx_atomic_cas(context, builder, sig, args): + aryty, indty, oldty, valty = sig.args + ary, inds, old, val = args + + indty, indices = _normalize_indices(context, builder, indty, inds, aryty, + valty) + + lary = context.make_array(aryty)(context, builder, ary) + ptr = cgutils.get_item_pointer(context, builder, aryty, lary, indices, + wraparound=True) + + if aryty.dtype in (cuda.cudadecl.integer_numba_types): + lmod = builder.module + bitwidth = aryty.dtype.bitwidth + return nvvmutils.atomic_cmpxchg(builder, lmod, bitwidth, ptr, old, val) + else: + raise TypeError('Unimplemented atomic cas with %s array' % aryty.dtype) + + +# ----------------------------------------------------------------------------- + +@lower(stubs.nanosleep, types.uint32) +def ptx_nanosleep(context, builder, sig, args): + nanosleep = ir.InlineAsm(ir.FunctionType(ir.VoidType(), [ir.IntType(32)]), + "nanosleep.u32 $0;", 'r', side_effect=True) + ns = args[0] + builder.call(nanosleep, [ns]) + + +# ----------------------------------------------------------------------------- + + +def _generic_array(context, builder, shape, dtype, symbol_name, addrspace, + can_dynsized=False): + elemcount = reduce(operator.mul, shape, 1) + + # Check for valid shape for this type of allocation. + # Only 1d arrays can be dynamic. + dynamic_smem = elemcount <= 0 and can_dynsized and len(shape) == 1 + if elemcount <= 0 and not dynamic_smem: + raise ValueError("array length <= 0") + + # Check that we support the requested dtype + data_model = context.data_model_manager[dtype] + other_supported_type = ( + isinstance(dtype, (types.Record, types.Boolean)) + or isinstance(data_model, models.StructModel) + or dtype == types.float16 + ) + if dtype not in types.number_domain and not other_supported_type: + raise TypeError("unsupported type: %s" % dtype) + + lldtype = context.get_data_type(dtype) + laryty = ir.ArrayType(lldtype, elemcount) + + if addrspace == nvvm.ADDRSPACE_LOCAL: + # Special case local address space allocation to use alloca + # NVVM is smart enough to only use local memory if no register is + # available + dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name) + else: + lmod = builder.module + + # Create global variable in the requested address space + gvmem = cgutils.add_global_variable(lmod, laryty, symbol_name, + addrspace) + # Specify alignment to avoid misalignment bug + align = context.get_abi_sizeof(lldtype) + # Alignment is required to be a power of 2 for shared memory. If it is + # not a power of 2 (e.g. for a Record array) then round up accordingly. + gvmem.align = 1 << (align - 1 ).bit_length() + + if dynamic_smem: + gvmem.linkage = 'external' + else: + ## Comment out the following line to workaround a NVVM bug + ## which generates a invalid symbol name when the linkage + ## is internal and in some situation. + ## See _get_unique_smem_id() + # gvmem.linkage = lc.LINKAGE_INTERNAL + + gvmem.initializer = ir.Constant(laryty, ir.Undefined) + + # Convert to generic address-space + dataptr = builder.addrspacecast(gvmem, ir.PointerType(ir.IntType(8)), + 'generic') + + targetdata = ll.create_target_data(nvvm.NVVM().data_layout) + lldtype = context.get_data_type(dtype) + itemsize = lldtype.get_abi_size(targetdata) + + # Compute strides + laststride = itemsize + rstrides = [] + for i, lastsize in enumerate(reversed(shape)): + rstrides.append(laststride) + laststride *= lastsize + strides = [s for s in reversed(rstrides)] + kstrides = [context.get_constant(types.intp, s) for s in strides] + + # Compute shape + if dynamic_smem: + # Compute the shape based on the dynamic shared memory configuration. + # Unfortunately NVVM does not provide an intrinsic for the + # %dynamic_smem_size register, so we must read it using inline + # assembly. + get_dynshared_size = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []), + "mov.u32 $0, %dynamic_smem_size;", + '=r', side_effect=True) + dynsmem_size = builder.zext(builder.call(get_dynshared_size, []), + ir.IntType(64)) + # Only 1-D dynamic shared memory is supported so the following is a + # sufficient construction of the shape + kitemsize = context.get_constant(types.intp, itemsize) + kshape = [builder.udiv(dynsmem_size, kitemsize)] + else: + kshape = [context.get_constant(types.intp, s) for s in shape] + + # Create array object + ndim = len(shape) + aryty = types.Array(dtype=dtype, ndim=ndim, layout='C') + ary = context.make_array(aryty)(context, builder) + + context.populate_array(ary, + data=builder.bitcast(dataptr, ary.data.type), + shape=kshape, + strides=kstrides, + itemsize=context.get_constant(types.intp, itemsize), + meminfo=None) + return ary._getvalue() + + +@lower_constant(CUDADispatcher) +def cuda_dispatcher_const(context, builder, ty, pyval): + return context.get_dummy_value() + + +# NumPy + +register_ufuncs(ufunc_db.get_ufuncs(), lower) diff --git a/lib/python3.10/site-packages/numba/cuda/cudamath.py b/lib/python3.10/site-packages/numba/cuda/cudamath.py new file mode 100644 index 0000000000000000000000000000000000000000..12d9715b62eb29b057d814748cf56b469b6a7bfc --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudamath.py @@ -0,0 +1,140 @@ +import math +from numba.core import types +from numba.core.typing.templates import ConcreteTemplate, signature, Registry + + +registry = Registry() +infer_global = registry.register_global + + +@infer_global(math.acos) +@infer_global(math.acosh) +@infer_global(math.asin) +@infer_global(math.asinh) +@infer_global(math.atan) +@infer_global(math.atanh) +@infer_global(math.cosh) +@infer_global(math.degrees) +@infer_global(math.erf) +@infer_global(math.erfc) +@infer_global(math.expm1) +@infer_global(math.gamma) +@infer_global(math.lgamma) +@infer_global(math.log1p) +@infer_global(math.radians) +@infer_global(math.sinh) +@infer_global(math.tanh) +@infer_global(math.tan) +class Math_unary(ConcreteTemplate): + cases = [ + signature(types.float64, types.int64), + signature(types.float64, types.uint64), + signature(types.float32, types.float32), + signature(types.float64, types.float64), + ] + + +@infer_global(math.sin) +@infer_global(math.cos) +@infer_global(math.ceil) +@infer_global(math.floor) +@infer_global(math.sqrt) +@infer_global(math.log) +@infer_global(math.log2) +@infer_global(math.log10) +@infer_global(math.exp) +@infer_global(math.fabs) +@infer_global(math.trunc) +class Math_unary_with_fp16(ConcreteTemplate): + cases = [ + signature(types.float64, types.int64), + signature(types.float64, types.uint64), + signature(types.float32, types.float32), + signature(types.float64, types.float64), + signature(types.float16, types.float16), + ] + + +@infer_global(math.atan2) +class Math_atan2(ConcreteTemplate): + key = math.atan2 + cases = [ + signature(types.float64, types.int64, types.int64), + signature(types.float64, types.uint64, types.uint64), + signature(types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64), + ] + + +@infer_global(math.hypot) +class Math_hypot(ConcreteTemplate): + key = math.hypot + cases = [ + signature(types.float64, types.int64, types.int64), + signature(types.float64, types.uint64, types.uint64), + signature(types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64), + ] + + +@infer_global(math.copysign) +@infer_global(math.fmod) +class Math_binary(ConcreteTemplate): + cases = [ + signature(types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64), + ] + + +@infer_global(math.remainder) +class Math_remainder(ConcreteTemplate): + cases = [ + signature(types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64), + ] + + +@infer_global(math.pow) +class Math_pow(ConcreteTemplate): + cases = [ + signature(types.float32, types.float32, types.float32), + signature(types.float64, types.float64, types.float64), + signature(types.float32, types.float32, types.int32), + signature(types.float64, types.float64, types.int32), + ] + + +@infer_global(math.frexp) +class Math_frexp(ConcreteTemplate): + cases = [ + signature(types.Tuple([types.float32, types.int32]), types.float32), + signature(types.Tuple([types.float64, types.int32]), types.float64), + ] + + +@infer_global(math.ldexp) +class Math_ldexp(ConcreteTemplate): + cases = [ + signature(types.float32, types.float32, types.int32), + signature(types.float64, types.float64, types.int32), + ] + + +@infer_global(math.isinf) +@infer_global(math.isnan) +@infer_global(math.isfinite) +class Math_isnan(ConcreteTemplate): + cases = [ + signature(types.boolean, types.int64), + signature(types.boolean, types.uint64), + signature(types.boolean, types.float32), + signature(types.boolean, types.float64), + ] + + +@infer_global(math.modf) +class Math_modf(ConcreteTemplate): + cases = [ + signature(types.UniTuple(types.float64, 2), types.float64), + signature(types.UniTuple(types.float32, 2), types.float32) + ] diff --git a/lib/python3.10/site-packages/numba/cuda/decorators.py b/lib/python3.10/site-packages/numba/cuda/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..0e2ae4e25d38e2d05929662742be0868f5c8770c --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/decorators.py @@ -0,0 +1,189 @@ +from warnings import warn +from numba.core import types, config, sigutils +from numba.core.errors import DeprecationError, NumbaInvalidConfigWarning +from numba.cuda.compiler import declare_device_function +from numba.cuda.dispatcher import CUDADispatcher +from numba.cuda.simulator.kernel import FakeCUDAKernel + + +_msg_deprecated_signature_arg = ("Deprecated keyword argument `{0}`. " + "Signatures should be passed as the first " + "positional argument.") + + +def jit(func_or_sig=None, device=False, inline=False, link=[], debug=None, + opt=True, lineinfo=False, cache=False, **kws): + """ + JIT compile a Python function for CUDA GPUs. + + :param func_or_sig: A function to JIT compile, or *signatures* of a + function to compile. If a function is supplied, then a + :class:`Dispatcher ` is returned. + Otherwise, ``func_or_sig`` may be a signature or a list of signatures, + and a function is returned. The returned function accepts another + function, which it will compile and then return a :class:`Dispatcher + `. See :ref:`jit-decorator` for + more information about passing signatures. + + .. note:: A kernel cannot have any return value. + :param device: Indicates whether this is a device function. + :type device: bool + :param link: A list of files containing PTX or CUDA C/C++ source to link + with the function + :type link: list + :param debug: If True, check for exceptions thrown when executing the + kernel. Since this degrades performance, this should only be used for + debugging purposes. If set to True, then ``opt`` should be set to False. + Defaults to False. (The default value can be overridden by setting + environment variable ``NUMBA_CUDA_DEBUGINFO=1``.) + :param fastmath: When True, enables fastmath optimizations as outlined in + the :ref:`CUDA Fast Math documentation `. + :param max_registers: Request that the kernel is limited to using at most + this number of registers per thread. The limit may not be respected if + the ABI requires a greater number of registers than that requested. + Useful for increasing occupancy. + :param opt: Whether to compile from LLVM IR to PTX with optimization + enabled. When ``True``, ``-opt=3`` is passed to NVVM. When + ``False``, ``-opt=0`` is passed to NVVM. Defaults to ``True``. + :type opt: bool + :param lineinfo: If True, generate a line mapping between source code and + assembly code. This enables inspection of the source code in NVIDIA + profiling tools and correlation with program counter sampling. + :type lineinfo: bool + :param cache: If True, enables the file-based cache for this function. + :type cache: bool + """ + + if link and config.ENABLE_CUDASIM: + raise NotImplementedError('Cannot link PTX in the simulator') + + if kws.get('boundscheck'): + raise NotImplementedError("bounds checking is not supported for CUDA") + + if kws.get('argtypes') is not None: + msg = _msg_deprecated_signature_arg.format('argtypes') + raise DeprecationError(msg) + if kws.get('restype') is not None: + msg = _msg_deprecated_signature_arg.format('restype') + raise DeprecationError(msg) + if kws.get('bind') is not None: + msg = _msg_deprecated_signature_arg.format('bind') + raise DeprecationError(msg) + + debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug + fastmath = kws.get('fastmath', False) + extensions = kws.get('extensions', []) + + if debug and opt: + msg = ("debug=True with opt=True (the default) " + "is not supported by CUDA. This may result in a crash" + " - set debug=False or opt=False.") + warn(NumbaInvalidConfigWarning(msg)) + + if debug and lineinfo: + msg = ("debug and lineinfo are mutually exclusive. Use debug to get " + "full debug info (this disables some optimizations), or " + "lineinfo for line info only with code generation unaffected.") + warn(NumbaInvalidConfigWarning(msg)) + + if device and kws.get('link'): + raise ValueError("link keyword invalid for device function") + + if sigutils.is_signature(func_or_sig): + signatures = [func_or_sig] + specialized = True + elif isinstance(func_or_sig, list): + signatures = func_or_sig + specialized = False + else: + signatures = None + + if signatures is not None: + if config.ENABLE_CUDASIM: + def jitwrapper(func): + return FakeCUDAKernel(func, device=device, fastmath=fastmath) + return jitwrapper + + def _jit(func): + targetoptions = kws.copy() + targetoptions['debug'] = debug + targetoptions['lineinfo'] = lineinfo + targetoptions['link'] = link + targetoptions['opt'] = opt + targetoptions['fastmath'] = fastmath + targetoptions['device'] = device + targetoptions['extensions'] = extensions + + disp = CUDADispatcher(func, targetoptions=targetoptions) + + if cache: + disp.enable_caching() + + for sig in signatures: + argtypes, restype = sigutils.normalize_signature(sig) + + if restype and not device and restype != types.void: + raise TypeError("CUDA kernel must have void return type.") + + if device: + from numba.core import typeinfer + with typeinfer.register_dispatcher(disp): + disp.compile_device(argtypes, restype) + else: + disp.compile(argtypes) + + disp._specialized = specialized + disp.disable_compile() + + return disp + + return _jit + else: + if func_or_sig is None: + if config.ENABLE_CUDASIM: + def autojitwrapper(func): + return FakeCUDAKernel(func, device=device, + fastmath=fastmath) + else: + def autojitwrapper(func): + return jit(func, device=device, debug=debug, opt=opt, + lineinfo=lineinfo, link=link, cache=cache, **kws) + + return autojitwrapper + # func_or_sig is a function + else: + if config.ENABLE_CUDASIM: + return FakeCUDAKernel(func_or_sig, device=device, + fastmath=fastmath) + else: + targetoptions = kws.copy() + targetoptions['debug'] = debug + targetoptions['lineinfo'] = lineinfo + targetoptions['opt'] = opt + targetoptions['link'] = link + targetoptions['fastmath'] = fastmath + targetoptions['device'] = device + targetoptions['extensions'] = extensions + disp = CUDADispatcher(func_or_sig, targetoptions=targetoptions) + + if cache: + disp.enable_caching() + + return disp + + +def declare_device(name, sig): + """ + Declare the signature of a foreign function. Returns a descriptor that can + be used to call the function from a Python kernel. + + :param name: The name of the foreign function. + :type name: str + :param sig: The Numba signature of the function. + """ + argtypes, restype = sigutils.normalize_signature(sig) + if restype is None: + msg = 'Return type must be provided for device declarations' + raise TypeError(msg) + + return declare_device_function(name, restype, argtypes) diff --git a/lib/python3.10/site-packages/numba/cuda/descriptor.py b/lib/python3.10/site-packages/numba/cuda/descriptor.py new file mode 100644 index 0000000000000000000000000000000000000000..b91ddf7a1b9b1fe5eca6b92162d68b155c34204c --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/descriptor.py @@ -0,0 +1,33 @@ +from numba.core.descriptors import TargetDescriptor +from numba.core.options import TargetOptions +from .target import CUDATargetContext, CUDATypingContext + + +class CUDATargetOptions(TargetOptions): + pass + + +class CUDATarget(TargetDescriptor): + def __init__(self, name): + self.options = CUDATargetOptions + # The typing and target contexts are initialized only when needed - + # this prevents an attempt to load CUDA libraries at import time on + # systems that might not have them present. + self._typingctx = None + self._targetctx = None + super().__init__(name) + + @property + def typing_context(self): + if self._typingctx is None: + self._typingctx = CUDATypingContext() + return self._typingctx + + @property + def target_context(self): + if self._targetctx is None: + self._targetctx = CUDATargetContext(self._typingctx) + return self._targetctx + + +cuda_target = CUDATarget('cuda') diff --git a/lib/python3.10/site-packages/numba/cuda/device_init.py b/lib/python3.10/site-packages/numba/cuda/device_init.py new file mode 100644 index 0000000000000000000000000000000000000000..9df5ae99d8f5ad2a40a926a1e40343d9b8797fd6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/device_init.py @@ -0,0 +1,89 @@ +# Re export +import sys +from numba.cuda import cg +from .stubs import (threadIdx, blockIdx, blockDim, gridDim, laneid, warpsize, + syncwarp, shared, local, const, atomic, + shfl_sync_intrinsic, vote_sync_intrinsic, match_any_sync, + match_all_sync, threadfence_block, threadfence_system, + threadfence, selp, popc, brev, clz, ffs, fma, cbrt, + activemask, lanemask_lt, nanosleep, fp16, + _vector_type_stubs) +from .intrinsics import (grid, gridsize, syncthreads, syncthreads_and, + syncthreads_count, syncthreads_or) +from .cudadrv.error import CudaSupportError +from numba.cuda.cudadrv.driver import (BaseCUDAMemoryManager, + HostOnlyCUDAMemoryManager, + GetIpcHandleMixin, MemoryPointer, + MappedMemory, PinnedMemory, MemoryInfo, + IpcHandle, set_memory_manager) +from numba.cuda.cudadrv.runtime import runtime +from .cudadrv import nvvm +from numba.cuda import initialize +from .errors import KernelRuntimeError + +from .decorators import jit, declare_device +from .api import * +from .api import _auto_device +from .args import In, Out, InOut + +from .intrinsic_wrapper import (all_sync, any_sync, eq_sync, ballot_sync, + shfl_sync, shfl_up_sync, shfl_down_sync, + shfl_xor_sync) + +from .kernels import reduction + +reduce = Reduce = reduction.Reduce + +# Expose vector type constructors and aliases as module level attributes. +for vector_type_stub in _vector_type_stubs: + setattr(sys.modules[__name__], vector_type_stub.__name__, vector_type_stub) + for alias in vector_type_stub.aliases: + setattr(sys.modules[__name__], alias, vector_type_stub) +del vector_type_stub, _vector_type_stubs + + +def is_available(): + """Returns a boolean to indicate the availability of a CUDA GPU. + + This will initialize the driver if it hasn't been initialized. + """ + # whilst `driver.is_available` will init the driver itself, + # the driver initialization may raise and as a result break + # test discovery/orchestration as `cuda.is_available` is often + # used as a guard for whether to run a CUDA test, the try/except + # below is to handle this case. + driver_is_available = False + try: + driver_is_available = driver.driver.is_available + except CudaSupportError: + pass + + return driver_is_available and nvvm.is_available() + + +def is_supported_version(): + """Returns True if the CUDA Runtime is a supported version. + + Unsupported versions (e.g. newer versions than those known to Numba) + may still work; this function provides a facility to check whether the + current Numba version is tested and known to work with the current + runtime version. If the current version is unsupported, the caller can + decide how to act. Options include: + + - Continuing silently, + - Emitting a warning, + - Generating an error or otherwise preventing the use of CUDA. + """ + + return runtime.is_supported_version() + + +def cuda_error(): + """Returns None if there was no error initializing the CUDA driver. + If there was an error initializing the driver, a string describing the + error is returned. + """ + return driver.driver.initialization_error + + +initialize.initialize_all() diff --git a/lib/python3.10/site-packages/numba/cuda/deviceufunc.py b/lib/python3.10/site-packages/numba/cuda/deviceufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..c29335a91b6b810b2a4731901217c389867a8bbf --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/deviceufunc.py @@ -0,0 +1,908 @@ +""" +Implements custom ufunc dispatch mechanism for non-CPU devices. +""" + +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +import operator +import warnings +from functools import reduce + +import numpy as np + +from numba.np.ufunc.ufuncbuilder import _BaseUFuncBuilder, parse_identity +from numba.core import types, sigutils +from numba.core.typing import signature +from numba.np.ufunc.sigparse import parse_signature + + +def _broadcast_axis(a, b): + """ + Raises + ------ + ValueError if broadcast fails + """ + if a == b: + return a + elif a == 1: + return b + elif b == 1: + return a + else: + raise ValueError("failed to broadcast {0} and {1}".format(a, b)) + + +def _pairwise_broadcast(shape1, shape2): + """ + Raises + ------ + ValueError if broadcast fails + """ + shape1, shape2 = map(tuple, [shape1, shape2]) + + while len(shape1) < len(shape2): + shape1 = (1,) + shape1 + + while len(shape1) > len(shape2): + shape2 = (1,) + shape2 + + return tuple(_broadcast_axis(a, b) for a, b in zip(shape1, shape2)) + + +def _multi_broadcast(*shapelist): + """ + Raises + ------ + ValueError if broadcast fails + """ + assert shapelist + + result = shapelist[0] + others = shapelist[1:] + try: + for i, each in enumerate(others, start=1): + result = _pairwise_broadcast(result, each) + except ValueError: + raise ValueError("failed to broadcast argument #{0}".format(i)) + else: + return result + + +class UFuncMechanism(object): + """ + Prepare ufunc arguments for vectorize. + """ + DEFAULT_STREAM = None + SUPPORT_DEVICE_SLICING = False + + def __init__(self, typemap, args): + """Never used directly by user. Invoke by UFuncMechanism.call(). + """ + self.typemap = typemap + self.args = args + nargs = len(self.args) + self.argtypes = [None] * nargs + self.scalarpos = [] + self.signature = None + self.arrays = [None] * nargs + + def _fill_arrays(self): + """ + Get all arguments in array form + """ + for i, arg in enumerate(self.args): + if self.is_device_array(arg): + self.arrays[i] = self.as_device_array(arg) + elif isinstance(arg, (int, float, complex, np.number)): + # Is scalar + self.scalarpos.append(i) + else: + self.arrays[i] = np.asarray(arg) + + def _fill_argtypes(self): + """ + Get dtypes + """ + for i, ary in enumerate(self.arrays): + if ary is not None: + dtype = getattr(ary, 'dtype') + if dtype is None: + dtype = np.asarray(ary).dtype + self.argtypes[i] = dtype + + def _resolve_signature(self): + """Resolve signature. + May have ambiguous case. + """ + matches = [] + # Resolve scalar args exact match first + if self.scalarpos: + # Try resolve scalar arguments + for formaltys in self.typemap: + match_map = [] + for i, (formal, actual) in enumerate(zip(formaltys, + self.argtypes)): + if actual is None: + actual = np.asarray(self.args[i]).dtype + + match_map.append(actual == formal) + + if all(match_map): + matches.append(formaltys) + + # No matching with exact match; try coercing the scalar arguments + if not matches: + matches = [] + for formaltys in self.typemap: + all_matches = all(actual is None or formal == actual + for formal, actual in + zip(formaltys, self.argtypes)) + if all_matches: + matches.append(formaltys) + + if not matches: + raise TypeError("No matching version. GPU ufunc requires array " + "arguments to have the exact types. This behaves " + "like regular ufunc with casting='no'.") + + if len(matches) > 1: + raise TypeError("Failed to resolve ufunc due to ambiguous " + "signature. Too many untyped scalars. " + "Use numpy dtype object to type tag.") + + # Try scalar arguments + self.argtypes = matches[0] + + def _get_actual_args(self): + """Return the actual arguments + Casts scalar arguments to np.array. + """ + for i in self.scalarpos: + self.arrays[i] = np.array([self.args[i]], dtype=self.argtypes[i]) + + return self.arrays + + def _broadcast(self, arys): + """Perform numpy ufunc broadcasting + """ + shapelist = [a.shape for a in arys] + shape = _multi_broadcast(*shapelist) + + for i, ary in enumerate(arys): + if ary.shape == shape: + pass + + else: + if self.is_device_array(ary): + arys[i] = self.broadcast_device(ary, shape) + + else: + ax_differs = [ax for ax in range(len(shape)) + if ax >= ary.ndim + or ary.shape[ax] != shape[ax]] + + missingdim = len(shape) - len(ary.shape) + strides = [0] * missingdim + list(ary.strides) + + for ax in ax_differs: + strides[ax] = 0 + + strided = np.lib.stride_tricks.as_strided(ary, + shape=shape, + strides=strides) + + arys[i] = self.force_array_layout(strided) + + return arys + + def get_arguments(self): + """Prepare and return the arguments for the ufunc. + Does not call to_device(). + """ + self._fill_arrays() + self._fill_argtypes() + self._resolve_signature() + arys = self._get_actual_args() + return self._broadcast(arys) + + def get_function(self): + """Returns (result_dtype, function) + """ + return self.typemap[self.argtypes] + + def is_device_array(self, obj): + """Is the `obj` a device array? + Override in subclass + """ + return False + + def as_device_array(self, obj): + """Convert the `obj` to a device array + Override in subclass + + Default implementation is an identity function + """ + return obj + + def broadcast_device(self, ary, shape): + """Handles ondevice broadcasting + + Override in subclass to add support. + """ + raise NotImplementedError("broadcasting on device is not supported") + + def force_array_layout(self, ary): + """Ensures array layout met device requirement. + + Override in sublcass + """ + return ary + + @classmethod + def call(cls, typemap, args, kws): + """Perform the entire ufunc call mechanism. + """ + # Handle keywords + stream = kws.pop('stream', cls.DEFAULT_STREAM) + out = kws.pop('out', None) + + if kws: + warnings.warn("unrecognized keywords: %s" % ', '.join(kws)) + + # Begin call resolution + cr = cls(typemap, args) + args = cr.get_arguments() + resty, func = cr.get_function() + + outshape = args[0].shape + + # Adjust output value + if out is not None and cr.is_device_array(out): + out = cr.as_device_array(out) + + def attempt_ravel(a): + if cr.SUPPORT_DEVICE_SLICING: + raise NotImplementedError + + try: + # Call the `.ravel()` method + return a.ravel() + except NotImplementedError: + # If it is not a device array + if not cr.is_device_array(a): + raise + # For device array, retry ravel on the host by first + # copying it back. + else: + hostary = cr.to_host(a, stream).ravel() + return cr.to_device(hostary, stream) + + if args[0].ndim > 1: + args = [attempt_ravel(a) for a in args] + + # Prepare argument on the device + devarys = [] + any_device = False + for a in args: + if cr.is_device_array(a): + devarys.append(a) + any_device = True + else: + dev_a = cr.to_device(a, stream=stream) + devarys.append(dev_a) + + # Launch + shape = args[0].shape + if out is None: + # No output is provided + devout = cr.allocate_device_array(shape, resty, stream=stream) + + devarys.extend([devout]) + cr.launch(func, shape[0], stream, devarys) + + if any_device: + # If any of the arguments are on device, + # Keep output on the device + return devout.reshape(outshape) + else: + # Otherwise, transfer output back to host + return devout.copy_to_host().reshape(outshape) + + elif cr.is_device_array(out): + # If output is provided and it is a device array, + # Return device array + if out.ndim > 1: + out = attempt_ravel(out) + devout = out + devarys.extend([devout]) + cr.launch(func, shape[0], stream, devarys) + return devout.reshape(outshape) + + else: + # If output is provided and it is a host array, + # Return host array + assert out.shape == shape + assert out.dtype == resty + devout = cr.allocate_device_array(shape, resty, stream=stream) + devarys.extend([devout]) + cr.launch(func, shape[0], stream, devarys) + return devout.copy_to_host(out, stream=stream).reshape(outshape) + + def to_device(self, hostary, stream): + """Implement to device transfer + Override in subclass + """ + raise NotImplementedError + + def to_host(self, devary, stream): + """Implement to host transfer + Override in subclass + """ + raise NotImplementedError + + def allocate_device_array(self, shape, dtype, stream): + """Implements device allocation + Override in subclass + """ + raise NotImplementedError + + def launch(self, func, count, stream, args): + """Implements device function invocation + Override in subclass + """ + raise NotImplementedError + + +def to_dtype(ty): + if isinstance(ty, types.EnumMember): + ty = ty.dtype + return np.dtype(str(ty)) + + +class DeviceVectorize(_BaseUFuncBuilder): + def __init__(self, func, identity=None, cache=False, targetoptions={}): + if cache: + raise TypeError("caching is not supported") + for opt in targetoptions: + if opt == 'nopython': + warnings.warn("nopython kwarg for cuda target is redundant", + RuntimeWarning) + else: + fmt = "Unrecognized options. " + fmt += "cuda vectorize target does not support option: '%s'" + raise KeyError(fmt % opt) + self.py_func = func + self.identity = parse_identity(identity) + # { arg_dtype: (return_dtype), cudakernel } + self.kernelmap = OrderedDict() + + @property + def pyfunc(self): + return self.py_func + + def add(self, sig=None): + # compile core as device function + args, return_type = sigutils.normalize_signature(sig) + devfnsig = signature(return_type, *args) + + funcname = self.pyfunc.__name__ + kernelsource = self._get_kernel_source(self._kernel_template, + devfnsig, funcname) + corefn, return_type = self._compile_core(devfnsig) + glbl = self._get_globals(corefn) + sig = signature(types.void, *([a[:] for a in args] + [return_type[:]])) + exec(kernelsource, glbl) + + stager = glbl['__vectorized_%s' % funcname] + kernel = self._compile_kernel(stager, sig) + + argdtypes = tuple(to_dtype(t) for t in devfnsig.args) + resdtype = to_dtype(return_type) + self.kernelmap[tuple(argdtypes)] = resdtype, kernel + + def build_ufunc(self): + raise NotImplementedError + + def _get_kernel_source(self, template, sig, funcname): + args = ['a%d' % i for i in range(len(sig.args))] + fmts = dict(name=funcname, + args=', '.join(args), + argitems=', '.join('%s[__tid__]' % i for i in args)) + return template.format(**fmts) + + def _compile_core(self, sig): + raise NotImplementedError + + def _get_globals(self, corefn): + raise NotImplementedError + + def _compile_kernel(self, fnobj, sig): + raise NotImplementedError + + +class DeviceGUFuncVectorize(_BaseUFuncBuilder): + def __init__(self, func, sig, identity=None, cache=False, targetoptions={}, + writable_args=()): + if cache: + raise TypeError("caching is not supported") + if writable_args: + raise TypeError("writable_args are not supported") + + # Allow nopython flag to be set. + if not targetoptions.pop('nopython', True): + raise TypeError("nopython flag must be True") + # Are there any more target options? + if targetoptions: + opts = ', '.join([repr(k) for k in targetoptions.keys()]) + fmt = "The following target options are not supported: {0}" + raise TypeError(fmt.format(opts)) + + self.py_func = func + self.identity = parse_identity(identity) + self.signature = sig + self.inputsig, self.outputsig = parse_signature(self.signature) + + # Maps from a tuple of input_dtypes to (output_dtypes, kernel) + self.kernelmap = OrderedDict() + + @property + def pyfunc(self): + return self.py_func + + def add(self, sig=None): + indims = [len(x) for x in self.inputsig] + outdims = [len(x) for x in self.outputsig] + args, return_type = sigutils.normalize_signature(sig) + + # It is only valid to specify types.none as a return type, or to not + # specify the return type (where the "Python None" is the return type) + valid_return_type = return_type in (types.none, None) + if not valid_return_type: + raise TypeError('guvectorized functions cannot return values: ' + f'signature {sig} specifies {return_type} return ' + 'type') + + funcname = self.py_func.__name__ + src = expand_gufunc_template(self._kernel_template, indims, + outdims, funcname, args) + + glbls = self._get_globals(sig) + + exec(src, glbls) + fnobj = glbls['__gufunc_{name}'.format(name=funcname)] + + outertys = list(_determine_gufunc_outer_types(args, indims + outdims)) + kernel = self._compile_kernel(fnobj, sig=tuple(outertys)) + + nout = len(outdims) + dtypes = [np.dtype(str(t.dtype)) for t in outertys] + indtypes = tuple(dtypes[:-nout]) + outdtypes = tuple(dtypes[-nout:]) + + self.kernelmap[indtypes] = outdtypes, kernel + + def _compile_kernel(self, fnobj, sig): + raise NotImplementedError + + def _get_globals(self, sig): + raise NotImplementedError + + +def _determine_gufunc_outer_types(argtys, dims): + for at, nd in zip(argtys, dims): + if isinstance(at, types.Array): + yield at.copy(ndim=nd + 1) + else: + if nd > 0: + raise ValueError("gufunc signature mismatch: ndim>0 for scalar") + yield types.Array(dtype=at, ndim=1, layout='A') + + +def expand_gufunc_template(template, indims, outdims, funcname, argtypes): + """Expand gufunc source template + """ + argdims = indims + outdims + argnames = ["arg{0}".format(i) for i in range(len(argdims))] + checkedarg = "min({0})".format(', '.join(["{0}.shape[0]".format(a) + for a in argnames])) + inputs = [_gen_src_for_indexing(aref, adims, atype) + for aref, adims, atype in zip(argnames, indims, argtypes)] + outputs = [_gen_src_for_indexing(aref, adims, atype) + for aref, adims, atype in zip(argnames[len(indims):], outdims, + argtypes[len(indims):])] + argitems = inputs + outputs + src = template.format(name=funcname, args=', '.join(argnames), + checkedarg=checkedarg, + argitems=', '.join(argitems)) + return src + + +def _gen_src_for_indexing(aref, adims, atype): + return "{aref}[{sliced}]".format(aref=aref, + sliced=_gen_src_index(adims, atype)) + + +def _gen_src_index(adims, atype): + if adims > 0: + return ','.join(['__tid__'] + [':'] * adims) + elif isinstance(atype, types.Array) and atype.ndim - 1 == adims: + # Special case for 0-nd in shape-signature but + # 1d array in type signature. + # Slice it so that the result has the same dimension. + return '__tid__:(__tid__ + 1)' + else: + return '__tid__' + + +class GUFuncEngine(object): + '''Determine how to broadcast and execute a gufunc + base on input shape and signature + ''' + + @classmethod + def from_signature(cls, signature): + return cls(*parse_signature(signature)) + + def __init__(self, inputsig, outputsig): + # signatures + self.sin = inputsig + self.sout = outputsig + # argument count + self.nin = len(self.sin) + self.nout = len(self.sout) + + def schedule(self, ishapes): + if len(ishapes) != self.nin: + raise TypeError('invalid number of input argument') + + # associate symbol values for input signature + symbolmap = {} + outer_shapes = [] + inner_shapes = [] + + for argn, (shape, symbols) in enumerate(zip(ishapes, self.sin)): + argn += 1 # start from 1 for human + inner_ndim = len(symbols) + if len(shape) < inner_ndim: + fmt = "arg #%d: insufficient inner dimension" + raise ValueError(fmt % (argn,)) + if inner_ndim: + inner_shape = shape[-inner_ndim:] + outer_shape = shape[:-inner_ndim] + else: + inner_shape = () + outer_shape = shape + + for axis, (dim, sym) in enumerate(zip(inner_shape, symbols)): + axis += len(outer_shape) + if sym in symbolmap: + if symbolmap[sym] != dim: + fmt = "arg #%d: shape[%d] mismatch argument" + raise ValueError(fmt % (argn, axis)) + symbolmap[sym] = dim + + outer_shapes.append(outer_shape) + inner_shapes.append(inner_shape) + + # solve output shape + oshapes = [] + for outsig in self.sout: + oshape = [] + for sym in outsig: + oshape.append(symbolmap[sym]) + oshapes.append(tuple(oshape)) + + # find the biggest outershape as looping dimension + sizes = [reduce(operator.mul, s, 1) for s in outer_shapes] + largest_i = np.argmax(sizes) + loopdims = outer_shapes[largest_i] + + pinned = [False] * self.nin # same argument for each iteration + for i, d in enumerate(outer_shapes): + if d != loopdims: + if d == (1,) or d == (): + pinned[i] = True + else: + fmt = "arg #%d: outer dimension mismatch" + raise ValueError(fmt % (i + 1,)) + + return GUFuncSchedule(self, inner_shapes, oshapes, loopdims, pinned) + + +class GUFuncSchedule(object): + def __init__(self, parent, ishapes, oshapes, loopdims, pinned): + self.parent = parent + # core shapes + self.ishapes = ishapes + self.oshapes = oshapes + # looping dimension + self.loopdims = loopdims + self.loopn = reduce(operator.mul, loopdims, 1) + # flags + self.pinned = pinned + + self.output_shapes = [loopdims + s for s in oshapes] + + def __str__(self): + import pprint + + attrs = 'ishapes', 'oshapes', 'loopdims', 'loopn', 'pinned' + values = [(k, getattr(self, k)) for k in attrs] + return pprint.pformat(dict(values)) + + +class GeneralizedUFunc(object): + def __init__(self, kernelmap, engine): + self.kernelmap = kernelmap + self.engine = engine + self.max_blocksize = 2 ** 30 + + def __call__(self, *args, **kws): + callsteps = self._call_steps(self.engine.nin, self.engine.nout, + args, kws) + indtypes, schedule, outdtypes, kernel = self._schedule( + callsteps.inputs, callsteps.outputs) + callsteps.adjust_input_types(indtypes) + + outputs = callsteps.prepare_outputs(schedule, outdtypes) + inputs = callsteps.prepare_inputs() + parameters = self._broadcast(schedule, inputs, outputs) + + callsteps.launch_kernel(kernel, schedule.loopn, parameters) + + return callsteps.post_process_outputs(outputs) + + def _schedule(self, inputs, outs): + input_shapes = [a.shape for a in inputs] + schedule = self.engine.schedule(input_shapes) + + # find kernel + indtypes = tuple(i.dtype for i in inputs) + try: + outdtypes, kernel = self.kernelmap[indtypes] + except KeyError: + # No exact match, then use the first compatible. + # This does not match the numpy dispatching exactly. + # Later, we may just jit a new version for the missing signature. + indtypes = self._search_matching_signature(indtypes) + # Select kernel + outdtypes, kernel = self.kernelmap[indtypes] + + # check output + for sched_shape, out in zip(schedule.output_shapes, outs): + if out is not None and sched_shape != out.shape: + raise ValueError('output shape mismatch') + + return indtypes, schedule, outdtypes, kernel + + def _search_matching_signature(self, idtypes): + """ + Given the input types in `idtypes`, return a compatible sequence of + types that is defined in `kernelmap`. + + Note: Ordering is guaranteed by `kernelmap` being a OrderedDict + """ + for sig in self.kernelmap.keys(): + if all(np.can_cast(actual, desired) + for actual, desired in zip(sig, idtypes)): + return sig + else: + raise TypeError("no matching signature") + + def _broadcast(self, schedule, params, retvals): + assert schedule.loopn > 0, "zero looping dimension" + + odim = 1 if not schedule.loopdims else schedule.loopn + newparams = [] + for p, cs in zip(params, schedule.ishapes): + if not cs and p.size == 1: + # Broadcast scalar input + devary = self._broadcast_scalar_input(p, odim) + newparams.append(devary) + else: + # Broadcast vector input + newparams.append(self._broadcast_array(p, odim, cs)) + + newretvals = [] + for retval, oshape in zip(retvals, schedule.oshapes): + newretvals.append(retval.reshape(odim, *oshape)) + return tuple(newparams) + tuple(newretvals) + + def _broadcast_array(self, ary, newdim, innerdim): + newshape = (newdim,) + innerdim + # No change in shape + if ary.shape == newshape: + return ary + + # Creating new dimension + elif len(ary.shape) < len(newshape): + assert newshape[-len(ary.shape):] == ary.shape, \ + "cannot add dim and reshape at the same time" + return self._broadcast_add_axis(ary, newshape) + + # Collapsing dimension + else: + return ary.reshape(*newshape) + + def _broadcast_add_axis(self, ary, newshape): + raise NotImplementedError("cannot add new axis") + + def _broadcast_scalar_input(self, ary, shape): + raise NotImplementedError + + +class GUFuncCallSteps(metaclass=ABCMeta): + """ + Implements memory management and kernel launch operations for GUFunc calls. + + One instance of this class is instantiated for each call, and the instance + is specific to the arguments given to the GUFunc call. + + The base class implements the overall logic; subclasses provide + target-specific implementations of individual functions. + """ + + # The base class uses these slots; subclasses may provide additional slots. + __slots__ = [ + 'outputs', + 'inputs', + '_copy_result_to_host', + ] + + @abstractmethod + def launch_kernel(self, kernel, nelem, args): + """Implement the kernel launch""" + + @abstractmethod + def is_device_array(self, obj): + """ + Return True if `obj` is a device array for this target, False + otherwise. + """ + + @abstractmethod + def as_device_array(self, obj): + """ + Return `obj` as a device array on this target. + + May return `obj` directly if it is already on the target. + """ + + @abstractmethod + def to_device(self, hostary): + """ + Copy `hostary` to the device and return the device array. + """ + + @abstractmethod + def allocate_device_array(self, shape, dtype): + """ + Allocate a new uninitialized device array with the given shape and + dtype. + """ + + def __init__(self, nin, nout, args, kwargs): + outputs = kwargs.get('out') + + # Ensure the user has passed a correct number of arguments + if outputs is None and len(args) not in (nin, (nin + nout)): + def pos_argn(n): + return f'{n} positional argument{"s" * (n != 1)}' + + msg = (f'This gufunc accepts {pos_argn(nin)} (when providing ' + f'input only) or {pos_argn(nin + nout)} (when providing ' + f'input and output). Got {pos_argn(len(args))}.') + raise TypeError(msg) + + if outputs is not None and len(args) > nin: + raise ValueError("cannot specify argument 'out' as both positional " + "and keyword") + else: + # If the user did not pass outputs either in the out kwarg or as + # positional arguments, then we need to generate an initial list of + # "placeholder" outputs using None as a sentry value + outputs = [outputs] * nout + + # Ensure all output device arrays are Numba device arrays - for + # example, any output passed in that supports the CUDA Array Interface + # is converted to a Numba CUDA device array; others are left untouched. + all_user_outputs_are_host = True + self.outputs = [] + for output in outputs: + if self.is_device_array(output): + self.outputs.append(self.as_device_array(output)) + all_user_outputs_are_host = False + else: + self.outputs.append(output) + + all_host_arrays = not any([self.is_device_array(a) for a in args]) + + # - If any of the arguments are device arrays, we leave the output on + # the device. + self._copy_result_to_host = (all_host_arrays and + all_user_outputs_are_host) + + # Normalize arguments - ensure they are either device- or host-side + # arrays (as opposed to lists, tuples, etc). + def normalize_arg(a): + if self.is_device_array(a): + convert = self.as_device_array + else: + convert = np.asarray + + return convert(a) + + normalized_args = [normalize_arg(a) for a in args] + self.inputs = normalized_args[:nin] + + # Check if there are extra arguments for outputs. + unused_inputs = normalized_args[nin:] + if unused_inputs: + self.outputs = unused_inputs + + def adjust_input_types(self, indtypes): + """ + Attempt to cast the inputs to the required types if necessary + and if they are not device arrays. + + Side effect: Only affects the elements of `inputs` that require + a type cast. + """ + for i, (ity, val) in enumerate(zip(indtypes, self.inputs)): + if ity != val.dtype: + if not hasattr(val, 'astype'): + msg = ("compatible signature is possible by casting but " + "{0} does not support .astype()").format(type(val)) + raise TypeError(msg) + # Cast types + self.inputs[i] = val.astype(ity) + + def prepare_outputs(self, schedule, outdtypes): + """ + Returns a list of output parameters that all reside on the target + device. + + Outputs that were passed-in to the GUFunc are used if they reside on the + device; other outputs are allocated as necessary. + """ + outputs = [] + for shape, dtype, output in zip(schedule.output_shapes, outdtypes, + self.outputs): + if output is None or self._copy_result_to_host: + output = self.allocate_device_array(shape, dtype) + outputs.append(output) + + return outputs + + def prepare_inputs(self): + """ + Returns a list of input parameters that all reside on the target device. + """ + def ensure_device(parameter): + if self.is_device_array(parameter): + convert = self.as_device_array + else: + convert = self.to_device + + return convert(parameter) + + return [ensure_device(p) for p in self.inputs] + + def post_process_outputs(self, outputs): + """ + Moves the given output(s) to the host if necessary. + + Returns a single value (e.g. an array) if there was one output, or a + tuple of arrays if there were multiple. Although this feels a little + jarring, it is consistent with the behavior of GUFuncs in general. + """ + if self._copy_result_to_host: + outputs = [self.to_host(output, self_output) + for output, self_output in zip(outputs, self.outputs)] + elif self.outputs[0] is not None: + outputs = self.outputs + + if len(outputs) == 1: + return outputs[0] + else: + return tuple(outputs) diff --git a/lib/python3.10/site-packages/numba/cuda/dispatcher.py b/lib/python3.10/site-packages/numba/cuda/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..4a750a1aa4ce21935b7e305e89c5d7eee690465f --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/dispatcher.py @@ -0,0 +1,1057 @@ +import numpy as np +import os +import sys +import ctypes +import functools + +from numba.core import config, serialize, sigutils, types, typing, utils +from numba.core.caching import Cache, CacheImpl +from numba.core.compiler_lock import global_compiler_lock +from numba.core.dispatcher import Dispatcher +from numba.core.errors import NumbaPerformanceWarning +from numba.core.typing.typeof import Purpose, typeof + +from numba.cuda.api import get_current_device +from numba.cuda.args import wrap_arg +from numba.cuda.compiler import compile_cuda, CUDACompiler +from numba.cuda.cudadrv import driver +from numba.cuda.cudadrv.devices import get_context +from numba.cuda.descriptor import cuda_target +from numba.cuda.errors import (missing_launch_config_msg, + normalize_kernel_dimensions) +from numba.cuda import types as cuda_types + +from numba import cuda +from numba import _dispatcher + +from warnings import warn + +cuda_fp16_math_funcs = ['hsin', 'hcos', + 'hlog', 'hlog10', + 'hlog2', + 'hexp', 'hexp10', + 'hexp2', + 'hsqrt', 'hrsqrt', + 'hfloor', 'hceil', + 'hrcp', 'hrint', + 'htrunc', 'hdiv'] + + +class _Kernel(serialize.ReduceMixin): + ''' + CUDA Kernel specialized for a given set of argument types. When called, this + object launches the kernel on the device. + ''' + + @global_compiler_lock + def __init__(self, py_func, argtypes, link=None, debug=False, + lineinfo=False, inline=False, fastmath=False, extensions=None, + max_registers=None, opt=True, device=False): + + if device: + raise RuntimeError('Cannot compile a device function as a kernel') + + super().__init__() + + # _DispatcherBase.nopython_signatures() expects this attribute to be + # present, because it assumes an overload is a CompileResult. In the + # CUDA target, _Kernel instances are stored instead, so we provide this + # attribute here to avoid duplicating nopython_signatures() in the CUDA + # target with slight modifications. + self.objectmode = False + + # The finalizer constructed by _DispatcherBase._make_finalizer also + # expects overloads to be a CompileResult. It uses the entry_point to + # remove a CompileResult from a target context. However, since we never + # insert kernels into a target context (there is no need because they + # cannot be called by other functions, only through the dispatcher) it + # suffices to pretend we have an entry point of None. + self.entry_point = None + + self.py_func = py_func + self.argtypes = argtypes + self.debug = debug + self.lineinfo = lineinfo + self.extensions = extensions or [] + + nvvm_options = { + 'fastmath': fastmath, + 'opt': 3 if opt else 0 + } + + cc = get_current_device().compute_capability + cres = compile_cuda(self.py_func, types.void, self.argtypes, + debug=self.debug, + lineinfo=lineinfo, + inline=inline, + fastmath=fastmath, + nvvm_options=nvvm_options, + cc=cc) + tgt_ctx = cres.target_context + code = self.py_func.__code__ + filename = code.co_filename + linenum = code.co_firstlineno + lib, kernel = tgt_ctx.prepare_cuda_kernel(cres.library, cres.fndesc, + debug, lineinfo, nvvm_options, + filename, linenum, + max_registers) + + if not link: + link = [] + + # A kernel needs cooperative launch if grid_sync is being used. + self.cooperative = 'cudaCGGetIntrinsicHandle' in lib.get_asm_str() + # We need to link against cudadevrt if grid sync is being used. + if self.cooperative: + lib.needs_cudadevrt = True + + res = [fn for fn in cuda_fp16_math_funcs + if (f'__numba_wrapper_{fn}' in lib.get_asm_str())] + + if res: + # Path to the source containing the foreign function + basedir = os.path.dirname(os.path.abspath(__file__)) + functions_cu_path = os.path.join(basedir, + 'cpp_function_wrappers.cu') + link.append(functions_cu_path) + + for filepath in link: + lib.add_linking_file(filepath) + + # populate members + self.entry_name = kernel.name + self.signature = cres.signature + self._type_annotation = cres.type_annotation + self._codelibrary = lib + self.call_helper = cres.call_helper + + # The following are referred to by the cache implementation. Note: + # - There are no referenced environments in CUDA. + # - Kernels don't have lifted code. + # - reload_init is only for parfors. + self.target_context = tgt_ctx + self.fndesc = cres.fndesc + self.environment = cres.environment + self._referenced_environments = [] + self.lifted = [] + self.reload_init = [] + + @property + def library(self): + return self._codelibrary + + @property + def type_annotation(self): + return self._type_annotation + + def _find_referenced_environments(self): + return self._referenced_environments + + @property + def codegen(self): + return self.target_context.codegen() + + @property + def argument_types(self): + return tuple(self.signature.args) + + @classmethod + def _rebuild(cls, cooperative, name, signature, codelibrary, + debug, lineinfo, call_helper, extensions): + """ + Rebuild an instance. + """ + instance = cls.__new__(cls) + # invoke parent constructor + super(cls, instance).__init__() + # populate members + instance.entry_point = None + instance.cooperative = cooperative + instance.entry_name = name + instance.signature = signature + instance._type_annotation = None + instance._codelibrary = codelibrary + instance.debug = debug + instance.lineinfo = lineinfo + instance.call_helper = call_helper + instance.extensions = extensions + return instance + + def _reduce_states(self): + """ + Reduce the instance for serialization. + Compiled definitions are serialized in PTX form. + Type annotation are discarded. + Thread, block and shared memory configuration are serialized. + Stream information is discarded. + """ + return dict(cooperative=self.cooperative, name=self.entry_name, + signature=self.signature, codelibrary=self._codelibrary, + debug=self.debug, lineinfo=self.lineinfo, + call_helper=self.call_helper, extensions=self.extensions) + + def bind(self): + """ + Force binding to current CUDA context + """ + self._codelibrary.get_cufunc() + + @property + def regs_per_thread(self): + ''' + The number of registers used by each thread for this kernel. + ''' + return self._codelibrary.get_cufunc().attrs.regs + + @property + def const_mem_size(self): + ''' + The amount of constant memory used by this kernel. + ''' + return self._codelibrary.get_cufunc().attrs.const + + @property + def shared_mem_per_block(self): + ''' + The amount of shared memory used per block for this kernel. + ''' + return self._codelibrary.get_cufunc().attrs.shared + + @property + def max_threads_per_block(self): + ''' + The maximum allowable threads per block. + ''' + return self._codelibrary.get_cufunc().attrs.maxthreads + + @property + def local_mem_per_thread(self): + ''' + The amount of local memory used per thread for this kernel. + ''' + return self._codelibrary.get_cufunc().attrs.local + + def inspect_llvm(self): + ''' + Returns the LLVM IR for this kernel. + ''' + return self._codelibrary.get_llvm_str() + + def inspect_asm(self, cc): + ''' + Returns the PTX code for this kernel. + ''' + return self._codelibrary.get_asm_str(cc=cc) + + def inspect_sass_cfg(self): + ''' + Returns the CFG of the SASS for this kernel. + + Requires nvdisasm to be available on the PATH. + ''' + return self._codelibrary.get_sass_cfg() + + def inspect_sass(self): + ''' + Returns the SASS code for this kernel. + + Requires nvdisasm to be available on the PATH. + ''' + return self._codelibrary.get_sass() + + def inspect_types(self, file=None): + ''' + Produce a dump of the Python source of this function annotated with the + corresponding Numba IR and type information. The dump is written to + *file*, or *sys.stdout* if *file* is *None*. + ''' + if self._type_annotation is None: + raise ValueError("Type annotation is not available") + + if file is None: + file = sys.stdout + + print("%s %s" % (self.entry_name, self.argument_types), file=file) + print('-' * 80, file=file) + print(self._type_annotation, file=file) + print('=' * 80, file=file) + + def max_cooperative_grid_blocks(self, blockdim, dynsmemsize=0): + ''' + Calculates the maximum number of blocks that can be launched for this + kernel in a cooperative grid in the current context, for the given block + and dynamic shared memory sizes. + + :param blockdim: Block dimensions, either as a scalar for a 1D block, or + a tuple for 2D or 3D blocks. + :param dynsmemsize: Dynamic shared memory size in bytes. + :return: The maximum number of blocks in the grid. + ''' + ctx = get_context() + cufunc = self._codelibrary.get_cufunc() + + if isinstance(blockdim, tuple): + blockdim = functools.reduce(lambda x, y: x * y, blockdim) + active_per_sm = ctx.get_active_blocks_per_multiprocessor(cufunc, + blockdim, + dynsmemsize) + sm_count = ctx.device.MULTIPROCESSOR_COUNT + return active_per_sm * sm_count + + def launch(self, args, griddim, blockdim, stream=0, sharedmem=0): + # Prepare kernel + cufunc = self._codelibrary.get_cufunc() + + if self.debug: + excname = cufunc.name + "__errcode__" + excmem, excsz = cufunc.module.get_global_symbol(excname) + assert excsz == ctypes.sizeof(ctypes.c_int) + excval = ctypes.c_int() + excmem.memset(0, stream=stream) + + # Prepare arguments + retr = [] # hold functors for writeback + + kernelargs = [] + for t, v in zip(self.argument_types, args): + self._prepare_args(t, v, stream, retr, kernelargs) + + if driver.USE_NV_BINDING: + zero_stream = driver.binding.CUstream(0) + else: + zero_stream = None + + stream_handle = stream and stream.handle or zero_stream + + # Invoke kernel + driver.launch_kernel(cufunc.handle, + *griddim, + *blockdim, + sharedmem, + stream_handle, + kernelargs, + cooperative=self.cooperative) + + if self.debug: + driver.device_to_host(ctypes.addressof(excval), excmem, excsz) + if excval.value != 0: + # An error occurred + def load_symbol(name): + mem, sz = cufunc.module.get_global_symbol("%s__%s__" % + (cufunc.name, + name)) + val = ctypes.c_int() + driver.device_to_host(ctypes.addressof(val), mem, sz) + return val.value + + tid = [load_symbol("tid" + i) for i in 'zyx'] + ctaid = [load_symbol("ctaid" + i) for i in 'zyx'] + code = excval.value + exccls, exc_args, loc = self.call_helper.get_exception(code) + # Prefix the exception message with the source location + if loc is None: + locinfo = '' + else: + sym, filepath, lineno = loc + filepath = os.path.abspath(filepath) + locinfo = 'In function %r, file %s, line %s, ' % (sym, + filepath, + lineno,) + # Prefix the exception message with the thread position + prefix = "%stid=%s ctaid=%s" % (locinfo, tid, ctaid) + if exc_args: + exc_args = ("%s: %s" % (prefix, exc_args[0]),) + \ + exc_args[1:] + else: + exc_args = prefix, + raise exccls(*exc_args) + + # retrieve auto converted arrays + for wb in retr: + wb() + + def _prepare_args(self, ty, val, stream, retr, kernelargs): + """ + Convert arguments to ctypes and append to kernelargs + """ + + # map the arguments using any extension you've registered + for extension in reversed(self.extensions): + ty, val = extension.prepare_args( + ty, + val, + stream=stream, + retr=retr) + + if isinstance(ty, types.Array): + devary = wrap_arg(val).to_device(retr, stream) + + c_intp = ctypes.c_ssize_t + + meminfo = ctypes.c_void_p(0) + parent = ctypes.c_void_p(0) + nitems = c_intp(devary.size) + itemsize = c_intp(devary.dtype.itemsize) + + ptr = driver.device_pointer(devary) + + if driver.USE_NV_BINDING: + ptr = int(ptr) + + data = ctypes.c_void_p(ptr) + + kernelargs.append(meminfo) + kernelargs.append(parent) + kernelargs.append(nitems) + kernelargs.append(itemsize) + kernelargs.append(data) + for ax in range(devary.ndim): + kernelargs.append(c_intp(devary.shape[ax])) + for ax in range(devary.ndim): + kernelargs.append(c_intp(devary.strides[ax])) + + elif isinstance(ty, types.Integer): + cval = getattr(ctypes, "c_%s" % ty)(val) + kernelargs.append(cval) + + elif ty == types.float16: + cval = ctypes.c_uint16(np.float16(val).view(np.uint16)) + kernelargs.append(cval) + + elif ty == types.float64: + cval = ctypes.c_double(val) + kernelargs.append(cval) + + elif ty == types.float32: + cval = ctypes.c_float(val) + kernelargs.append(cval) + + elif ty == types.boolean: + cval = ctypes.c_uint8(int(val)) + kernelargs.append(cval) + + elif ty == types.complex64: + kernelargs.append(ctypes.c_float(val.real)) + kernelargs.append(ctypes.c_float(val.imag)) + + elif ty == types.complex128: + kernelargs.append(ctypes.c_double(val.real)) + kernelargs.append(ctypes.c_double(val.imag)) + + elif isinstance(ty, (types.NPDatetime, types.NPTimedelta)): + kernelargs.append(ctypes.c_int64(val.view(np.int64))) + + elif isinstance(ty, types.Record): + devrec = wrap_arg(val).to_device(retr, stream) + ptr = devrec.device_ctypes_pointer + if driver.USE_NV_BINDING: + ptr = ctypes.c_void_p(int(ptr)) + kernelargs.append(ptr) + + elif isinstance(ty, types.BaseTuple): + assert len(ty) == len(val) + for t, v in zip(ty, val): + self._prepare_args(t, v, stream, retr, kernelargs) + + elif isinstance(ty, types.EnumMember): + try: + self._prepare_args( + ty.dtype, val.value, stream, retr, kernelargs + ) + except NotImplementedError: + raise NotImplementedError(ty, val) + + else: + raise NotImplementedError(ty, val) + + +class ForAll(object): + def __init__(self, dispatcher, ntasks, tpb, stream, sharedmem): + if ntasks < 0: + raise ValueError("Can't create ForAll with negative task count: %s" + % ntasks) + self.dispatcher = dispatcher + self.ntasks = ntasks + self.thread_per_block = tpb + self.stream = stream + self.sharedmem = sharedmem + + def __call__(self, *args): + if self.ntasks == 0: + return + + if self.dispatcher.specialized: + specialized = self.dispatcher + else: + specialized = self.dispatcher.specialize(*args) + blockdim = self._compute_thread_per_block(specialized) + griddim = (self.ntasks + blockdim - 1) // blockdim + + return specialized[griddim, blockdim, self.stream, + self.sharedmem](*args) + + def _compute_thread_per_block(self, dispatcher): + tpb = self.thread_per_block + # Prefer user-specified config + if tpb != 0: + return tpb + # Else, ask the driver to give a good config + else: + ctx = get_context() + # Dispatcher is specialized, so there's only one definition - get + # it so we can get the cufunc from the code library + kernel = next(iter(dispatcher.overloads.values())) + kwargs = dict( + func=kernel._codelibrary.get_cufunc(), + b2d_func=0, # dynamic-shared memory is constant to blksz + memsize=self.sharedmem, + blocksizelimit=1024, + ) + _, tpb = ctx.get_max_potential_block_size(**kwargs) + return tpb + + +class _LaunchConfiguration: + def __init__(self, dispatcher, griddim, blockdim, stream, sharedmem): + self.dispatcher = dispatcher + self.griddim = griddim + self.blockdim = blockdim + self.stream = stream + self.sharedmem = sharedmem + + if config.CUDA_LOW_OCCUPANCY_WARNINGS: + # Warn when the grid has fewer than 128 blocks. This number is + # chosen somewhat heuristically - ideally the minimum is 2 times + # the number of SMs, but the number of SMs varies between devices - + # some very small GPUs might only have 4 SMs, but an H100-SXM5 has + # 132. In general kernels should be launched with large grids + # (hundreds or thousands of blocks), so warning when fewer than 128 + # blocks are used will likely catch most beginner errors, where the + # grid tends to be very small (single-digit or low tens of blocks). + min_grid_size = 128 + grid_size = griddim[0] * griddim[1] * griddim[2] + if grid_size < min_grid_size: + msg = (f"Grid size {grid_size} will likely result in GPU " + "under-utilization due to low occupancy.") + warn(NumbaPerformanceWarning(msg)) + + def __call__(self, *args): + return self.dispatcher.call(args, self.griddim, self.blockdim, + self.stream, self.sharedmem) + + +class CUDACacheImpl(CacheImpl): + def reduce(self, kernel): + return kernel._reduce_states() + + def rebuild(self, target_context, payload): + return _Kernel._rebuild(**payload) + + def check_cachable(self, cres): + # CUDA Kernels are always cachable - the reasons for an entity not to + # be cachable are: + # + # - The presence of lifted loops, or + # - The presence of dynamic globals. + # + # neither of which apply to CUDA kernels. + return True + + +class CUDACache(Cache): + """ + Implements a cache that saves and loads CUDA kernels and compile results. + """ + _impl_class = CUDACacheImpl + + def load_overload(self, sig, target_context): + # Loading an overload refreshes the context to ensure it is + # initialized. To initialize the correct (i.e. CUDA) target, we need to + # enforce that the current target is the CUDA target. + from numba.core.target_extension import target_override + with target_override('cuda'): + return super().load_overload(sig, target_context) + + +class CUDADispatcher(Dispatcher, serialize.ReduceMixin): + ''' + CUDA Dispatcher object. When configured and called, the dispatcher will + specialize itself for the given arguments (if no suitable specialized + version already exists) & compute capability, and launch on the device + associated with the current context. + + Dispatcher objects are not to be constructed by the user, but instead are + created using the :func:`numba.cuda.jit` decorator. + ''' + + # Whether to fold named arguments and default values. Default values are + # presently unsupported on CUDA, so we can leave this as False in all + # cases. + _fold_args = False + + targetdescr = cuda_target + + def __init__(self, py_func, targetoptions, pipeline_class=CUDACompiler): + super().__init__(py_func, targetoptions=targetoptions, + pipeline_class=pipeline_class) + + # The following properties are for specialization of CUDADispatchers. A + # specialized CUDADispatcher is one that is compiled for exactly one + # set of argument types, and bypasses some argument type checking for + # faster kernel launches. + + # Is this a specialized dispatcher? + self._specialized = False + + # If we produced specialized dispatchers, we cache them for each set of + # argument types + self.specializations = {} + + @property + def _numba_type_(self): + return cuda_types.CUDADispatcher(self) + + def enable_caching(self): + self._cache = CUDACache(self.py_func) + + @functools.lru_cache(maxsize=128) + def configure(self, griddim, blockdim, stream=0, sharedmem=0): + griddim, blockdim = normalize_kernel_dimensions(griddim, blockdim) + return _LaunchConfiguration(self, griddim, blockdim, stream, sharedmem) + + def __getitem__(self, args): + if len(args) not in [2, 3, 4]: + raise ValueError('must specify at least the griddim and blockdim') + return self.configure(*args) + + def forall(self, ntasks, tpb=0, stream=0, sharedmem=0): + """Returns a 1D-configured dispatcher for a given number of tasks. + + This assumes that: + + - the kernel maps the Global Thread ID ``cuda.grid(1)`` to tasks on a + 1-1 basis. + - the kernel checks that the Global Thread ID is upper-bounded by + ``ntasks``, and does nothing if it is not. + + :param ntasks: The number of tasks. + :param tpb: The size of a block. An appropriate value is chosen if this + parameter is not supplied. + :param stream: The stream on which the configured dispatcher will be + launched. + :param sharedmem: The number of bytes of dynamic shared memory required + by the kernel. + :return: A configured dispatcher, ready to launch on a set of + arguments.""" + + return ForAll(self, ntasks, tpb=tpb, stream=stream, sharedmem=sharedmem) + + @property + def extensions(self): + ''' + A list of objects that must have a `prepare_args` function. When a + specialized kernel is called, each argument will be passed through + to the `prepare_args` (from the last object in this list to the + first). The arguments to `prepare_args` are: + + - `ty` the numba type of the argument + - `val` the argument value itself + - `stream` the CUDA stream used for the current call to the kernel + - `retr` a list of zero-arg functions that you may want to append + post-call cleanup work to. + + The `prepare_args` function must return a tuple `(ty, val)`, which + will be passed in turn to the next right-most `extension`. After all + the extensions have been called, the resulting `(ty, val)` will be + passed into Numba's default argument marshalling logic. + ''' + return self.targetoptions.get('extensions') + + def __call__(self, *args, **kwargs): + # An attempt to launch an unconfigured kernel + raise ValueError(missing_launch_config_msg) + + def call(self, args, griddim, blockdim, stream, sharedmem): + ''' + Compile if necessary and invoke this kernel with *args*. + ''' + if self.specialized: + kernel = next(iter(self.overloads.values())) + else: + kernel = _dispatcher.Dispatcher._cuda_call(self, *args) + + kernel.launch(args, griddim, blockdim, stream, sharedmem) + + def _compile_for_args(self, *args, **kws): + # Based on _DispatcherBase._compile_for_args. + assert not kws + argtypes = [self.typeof_pyval(a) for a in args] + return self.compile(tuple(argtypes)) + + def typeof_pyval(self, val): + # Based on _DispatcherBase.typeof_pyval, but differs from it to support + # the CUDA Array Interface. + try: + return typeof(val, Purpose.argument) + except ValueError: + if cuda.is_cuda_array(val): + # When typing, we don't need to synchronize on the array's + # stream - this is done when the kernel is launched. + return typeof(cuda.as_cuda_array(val, sync=False), + Purpose.argument) + else: + raise + + def specialize(self, *args): + ''' + Create a new instance of this dispatcher specialized for the given + *args*. + ''' + if self.specialized: + raise RuntimeError('Dispatcher already specialized') + + cc = get_current_device().compute_capability + argtypes = tuple(self.typeof_pyval(a) for a in args) + + specialization = self.specializations.get((cc, argtypes)) + if specialization: + return specialization + + targetoptions = self.targetoptions + specialization = CUDADispatcher(self.py_func, + targetoptions=targetoptions) + specialization.compile(argtypes) + specialization.disable_compile() + specialization._specialized = True + self.specializations[cc, argtypes] = specialization + return specialization + + @property + def specialized(self): + """ + True if the Dispatcher has been specialized. + """ + return self._specialized + + def get_regs_per_thread(self, signature=None): + ''' + Returns the number of registers used by each thread in this kernel for + the device in the current context. + + :param signature: The signature of the compiled kernel to get register + usage for. This may be omitted for a specialized + kernel. + :return: The number of registers used by the compiled variant of the + kernel for the given signature and current device. + ''' + if signature is not None: + return self.overloads[signature.args].regs_per_thread + if self.specialized: + return next(iter(self.overloads.values())).regs_per_thread + else: + return {sig: overload.regs_per_thread + for sig, overload in self.overloads.items()} + + def get_const_mem_size(self, signature=None): + ''' + Returns the size in bytes of constant memory used by this kernel for + the device in the current context. + + :param signature: The signature of the compiled kernel to get constant + memory usage for. This may be omitted for a + specialized kernel. + :return: The size in bytes of constant memory allocated by the + compiled variant of the kernel for the given signature and + current device. + ''' + if signature is not None: + return self.overloads[signature.args].const_mem_size + if self.specialized: + return next(iter(self.overloads.values())).const_mem_size + else: + return {sig: overload.const_mem_size + for sig, overload in self.overloads.items()} + + def get_shared_mem_per_block(self, signature=None): + ''' + Returns the size in bytes of statically allocated shared memory + for this kernel. + + :param signature: The signature of the compiled kernel to get shared + memory usage for. This may be omitted for a + specialized kernel. + :return: The amount of shared memory allocated by the compiled variant + of the kernel for the given signature and current device. + ''' + if signature is not None: + return self.overloads[signature.args].shared_mem_per_block + if self.specialized: + return next(iter(self.overloads.values())).shared_mem_per_block + else: + return {sig: overload.shared_mem_per_block + for sig, overload in self.overloads.items()} + + def get_max_threads_per_block(self, signature=None): + ''' + Returns the maximum allowable number of threads per block + for this kernel. Exceeding this threshold will result in + the kernel failing to launch. + + :param signature: The signature of the compiled kernel to get the max + threads per block for. This may be omitted for a + specialized kernel. + :return: The maximum allowable threads per block for the compiled + variant of the kernel for the given signature and current + device. + ''' + if signature is not None: + return self.overloads[signature.args].max_threads_per_block + if self.specialized: + return next(iter(self.overloads.values())).max_threads_per_block + else: + return {sig: overload.max_threads_per_block + for sig, overload in self.overloads.items()} + + def get_local_mem_per_thread(self, signature=None): + ''' + Returns the size in bytes of local memory per thread + for this kernel. + + :param signature: The signature of the compiled kernel to get local + memory usage for. This may be omitted for a + specialized kernel. + :return: The amount of local memory allocated by the compiled variant + of the kernel for the given signature and current device. + ''' + if signature is not None: + return self.overloads[signature.args].local_mem_per_thread + if self.specialized: + return next(iter(self.overloads.values())).local_mem_per_thread + else: + return {sig: overload.local_mem_per_thread + for sig, overload in self.overloads.items()} + + def get_call_template(self, args, kws): + # Originally copied from _DispatcherBase.get_call_template. This + # version deviates slightly from the _DispatcherBase version in order + # to force casts when calling device functions. See e.g. + # TestDeviceFunc.test_device_casting, added in PR #7496. + """ + Get a typing.ConcreteTemplate for this dispatcher and the given + *args* and *kws* types. This allows resolution of the return type. + + A (template, pysig, args, kws) tuple is returned. + """ + # Ensure an exactly-matching overload is available if we can + # compile. We proceed with the typing even if we can't compile + # because we may be able to force a cast on the caller side. + if self._can_compile: + self.compile_device(tuple(args)) + + # Create function type for typing + func_name = self.py_func.__name__ + name = "CallTemplate({0})".format(func_name) + + call_template = typing.make_concrete_template( + name, key=func_name, signatures=self.nopython_signatures) + pysig = utils.pysignature(self.py_func) + + return call_template, pysig, args, kws + + def compile_device(self, args, return_type=None): + """Compile the device function for the given argument types. + + Each signature is compiled once by caching the compiled function inside + this object. + + Returns the `CompileResult`. + """ + if args not in self.overloads: + with self._compiling_counter: + + debug = self.targetoptions.get('debug') + lineinfo = self.targetoptions.get('lineinfo') + inline = self.targetoptions.get('inline') + fastmath = self.targetoptions.get('fastmath') + + nvvm_options = { + 'opt': 3 if self.targetoptions.get('opt') else 0, + 'fastmath': fastmath + } + + cc = get_current_device().compute_capability + cres = compile_cuda(self.py_func, return_type, args, + debug=debug, + lineinfo=lineinfo, + inline=inline, + fastmath=fastmath, + nvvm_options=nvvm_options, + cc=cc) + self.overloads[args] = cres + + cres.target_context.insert_user_function(cres.entry_point, + cres.fndesc, + [cres.library]) + else: + cres = self.overloads[args] + + return cres + + def add_overload(self, kernel, argtypes): + c_sig = [a._code for a in argtypes] + self._insert(c_sig, kernel, cuda=True) + self.overloads[argtypes] = kernel + + def compile(self, sig): + ''' + Compile and bind to the current context a version of this kernel + specialized for the given signature. + ''' + argtypes, return_type = sigutils.normalize_signature(sig) + assert return_type is None or return_type == types.none + + # Do we already have an in-memory compiled kernel? + if self.specialized: + return next(iter(self.overloads.values())) + else: + kernel = self.overloads.get(argtypes) + if kernel is not None: + return kernel + + # Can we load from the disk cache? + kernel = self._cache.load_overload(sig, self.targetctx) + + if kernel is not None: + self._cache_hits[sig] += 1 + else: + # We need to compile a new kernel + self._cache_misses[sig] += 1 + if not self._can_compile: + raise RuntimeError("Compilation disabled") + + kernel = _Kernel(self.py_func, argtypes, **self.targetoptions) + # We call bind to force codegen, so that there is a cubin to cache + kernel.bind() + self._cache.save_overload(sig, kernel) + + self.add_overload(kernel, argtypes) + + return kernel + + def inspect_llvm(self, signature=None): + ''' + Return the LLVM IR for this kernel. + + :param signature: A tuple of argument types. + :return: The LLVM IR for the given signature, or a dict of LLVM IR + for all previously-encountered signatures. + + ''' + device = self.targetoptions.get('device') + if signature is not None: + if device: + return self.overloads[signature].library.get_llvm_str() + else: + return self.overloads[signature].inspect_llvm() + else: + if device: + return {sig: overload.library.get_llvm_str() + for sig, overload in self.overloads.items()} + else: + return {sig: overload.inspect_llvm() + for sig, overload in self.overloads.items()} + + def inspect_asm(self, signature=None): + ''' + Return this kernel's PTX assembly code for for the device in the + current context. + + :param signature: A tuple of argument types. + :return: The PTX code for the given signature, or a dict of PTX codes + for all previously-encountered signatures. + ''' + cc = get_current_device().compute_capability + device = self.targetoptions.get('device') + if signature is not None: + if device: + return self.overloads[signature].library.get_asm_str(cc) + else: + return self.overloads[signature].inspect_asm(cc) + else: + if device: + return {sig: overload.library.get_asm_str(cc) + for sig, overload in self.overloads.items()} + else: + return {sig: overload.inspect_asm(cc) + for sig, overload in self.overloads.items()} + + def inspect_sass_cfg(self, signature=None): + ''' + Return this kernel's CFG for the device in the current context. + + :param signature: A tuple of argument types. + :return: The CFG for the given signature, or a dict of CFGs + for all previously-encountered signatures. + + The CFG for the device in the current context is returned. + + Requires nvdisasm to be available on the PATH. + ''' + if self.targetoptions.get('device'): + raise RuntimeError('Cannot get the CFG of a device function') + + if signature is not None: + return self.overloads[signature].inspect_sass_cfg() + else: + return {sig: defn.inspect_sass_cfg() + for sig, defn in self.overloads.items()} + + def inspect_sass(self, signature=None): + ''' + Return this kernel's SASS assembly code for for the device in the + current context. + + :param signature: A tuple of argument types. + :return: The SASS code for the given signature, or a dict of SASS codes + for all previously-encountered signatures. + + SASS for the device in the current context is returned. + + Requires nvdisasm to be available on the PATH. + ''' + if self.targetoptions.get('device'): + raise RuntimeError('Cannot inspect SASS of a device function') + + if signature is not None: + return self.overloads[signature].inspect_sass() + else: + return {sig: defn.inspect_sass() + for sig, defn in self.overloads.items()} + + def inspect_types(self, file=None): + ''' + Produce a dump of the Python source of this function annotated with the + corresponding Numba IR and type information. The dump is written to + *file*, or *sys.stdout* if *file* is *None*. + ''' + if file is None: + file = sys.stdout + + for _, defn in self.overloads.items(): + defn.inspect_types(file=file) + + @classmethod + def _rebuild(cls, py_func, targetoptions): + """ + Rebuild an instance. + """ + instance = cls(py_func, targetoptions) + return instance + + def _reduce_states(self): + """ + Reduce the instance for serialization. + Compiled definitions are discarded. + """ + return dict(py_func=self.py_func, + targetoptions=self.targetoptions) diff --git a/lib/python3.10/site-packages/numba/cuda/errors.py b/lib/python3.10/site-packages/numba/cuda/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..653a0db6ef2f80e50aebc14c6c576fe3570606ee --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/errors.py @@ -0,0 +1,59 @@ +import numbers +from numba.core.errors import LoweringError + + +class KernelRuntimeError(RuntimeError): + def __init__(self, msg, tid=None, ctaid=None): + self.tid = tid + self.ctaid = ctaid + self.msg = msg + t = ("An exception was raised in thread=%s block=%s\n" + "\t%s") + msg = t % (self.tid, self.ctaid, self.msg) + super(KernelRuntimeError, self).__init__(msg) + + +class CudaLoweringError(LoweringError): + pass + + +_launch_help_url = ("https://numba.readthedocs.io/en/stable/cuda/" + "kernels.html#kernel-invocation") +missing_launch_config_msg = """ +Kernel launch configuration was not specified. Use the syntax: + +kernel_function[blockspergrid, threadsperblock](arg0, arg1, ..., argn) + +See {} for help. + +""".format(_launch_help_url) + + +def normalize_kernel_dimensions(griddim, blockdim): + """ + Normalize and validate the user-supplied kernel dimensions. + """ + + def check_dim(dim, name): + if not isinstance(dim, (tuple, list)): + dim = [dim] + else: + dim = list(dim) + if len(dim) > 3: + raise ValueError('%s must be a sequence of 1, 2 or 3 integers, ' + 'got %r' % (name, dim)) + for v in dim: + if not isinstance(v, numbers.Integral): + raise TypeError('%s must be a sequence of integers, got %r' + % (name, dim)) + while len(dim) < 3: + dim.append(1) + return tuple(dim) + + if None in (griddim, blockdim): + raise ValueError(missing_launch_config_msg) + + griddim = check_dim(griddim, 'griddim') + blockdim = check_dim(blockdim, 'blockdim') + + return griddim, blockdim diff --git a/lib/python3.10/site-packages/numba/cuda/extending.py b/lib/python3.10/site-packages/numba/cuda/extending.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc482aaac40c6ec995466581a5079ef280f9595 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/extending.py @@ -0,0 +1,7 @@ +""" +Added for symmetry with the core API +""" + +from numba.core.extending import intrinsic as _intrinsic + +intrinsic = _intrinsic(target='cuda') diff --git a/lib/python3.10/site-packages/numba/cuda/initialize.py b/lib/python3.10/site-packages/numba/cuda/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..e90c95b3110c65387fdd8a7a1714109ce2d05aff --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/initialize.py @@ -0,0 +1,13 @@ +def initialize_all(): + # Import models to register them with the data model manager + import numba.cuda.models # noqa: F401 + + from numba.cuda.decorators import jit + from numba.cuda.dispatcher import CUDADispatcher + from numba.core.target_extension import (target_registry, + dispatcher_registry, + jit_registry) + + cuda_target = target_registry["cuda"] + jit_registry[cuda_target] = jit + dispatcher_registry[cuda_target] = CUDADispatcher diff --git a/lib/python3.10/site-packages/numba/cuda/intrinsic_wrapper.py b/lib/python3.10/site-packages/numba/cuda/intrinsic_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..e02639f2122e2d6ca2b0c6ceecf2de63b4cf5395 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/intrinsic_wrapper.py @@ -0,0 +1,77 @@ +from .decorators import jit +import numba + + +@jit(device=True) +def all_sync(mask, predicate): + """ + If for all threads in the masked warp the predicate is true, then + a non-zero value is returned, otherwise 0 is returned. + """ + return numba.cuda.vote_sync_intrinsic(mask, 0, predicate)[1] + + +@jit(device=True) +def any_sync(mask, predicate): + """ + If for any thread in the masked warp the predicate is true, then + a non-zero value is returned, otherwise 0 is returned. + """ + return numba.cuda.vote_sync_intrinsic(mask, 1, predicate)[1] + + +@jit(device=True) +def eq_sync(mask, predicate): + """ + If for all threads in the masked warp the boolean predicate is the same, + then a non-zero value is returned, otherwise 0 is returned. + """ + return numba.cuda.vote_sync_intrinsic(mask, 2, predicate)[1] + + +@jit(device=True) +def ballot_sync(mask, predicate): + """ + Returns a mask of all threads in the warp whose predicate is true, + and are within the given mask. + """ + return numba.cuda.vote_sync_intrinsic(mask, 3, predicate)[0] + + +@jit(device=True) +def shfl_sync(mask, value, src_lane): + """ + Shuffles value across the masked warp and returns the value + from src_lane. If this is outside the warp, then the + given value is returned. + """ + return numba.cuda.shfl_sync_intrinsic(mask, 0, value, src_lane, 0x1f)[0] + + +@jit(device=True) +def shfl_up_sync(mask, value, delta): + """ + Shuffles value across the masked warp and returns the value + from (laneid - delta). If this is outside the warp, then the + given value is returned. + """ + return numba.cuda.shfl_sync_intrinsic(mask, 1, value, delta, 0)[0] + + +@jit(device=True) +def shfl_down_sync(mask, value, delta): + """ + Shuffles value across the masked warp and returns the value + from (laneid + delta). If this is outside the warp, then the + given value is returned. + """ + return numba.cuda.shfl_sync_intrinsic(mask, 2, value, delta, 0x1f)[0] + + +@jit(device=True) +def shfl_xor_sync(mask, value, lane_mask): + """ + Shuffles value across the masked warp and returns the value + from (laneid ^ lane_mask). + """ + return numba.cuda.shfl_sync_intrinsic(mask, 3, value, lane_mask, 0x1f)[0] diff --git a/lib/python3.10/site-packages/numba/cuda/intrinsics.py b/lib/python3.10/site-packages/numba/cuda/intrinsics.py new file mode 100644 index 0000000000000000000000000000000000000000..3bf0b59932c93e60e716bc4550a683dc0e53a983 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/intrinsics.py @@ -0,0 +1,198 @@ +from llvmlite import ir + +from numba import cuda, types +from numba.core import cgutils +from numba.core.errors import RequireLiteralValue, NumbaValueError +from numba.core.typing import signature +from numba.core.extending import overload_attribute +from numba.cuda import nvvmutils +from numba.cuda.extending import intrinsic + + +#------------------------------------------------------------------------------- +# Grid functions + +def _type_grid_function(ndim): + val = ndim.literal_value + if val == 1: + restype = types.int64 + elif val in (2, 3): + restype = types.UniTuple(types.int64, val) + else: + raise NumbaValueError('argument can only be 1, 2, 3') + + return signature(restype, types.int32) + + +@intrinsic +def grid(typingctx, ndim): + '''grid(ndim) + + Return the absolute position of the current thread in the entire grid of + blocks. *ndim* should correspond to the number of dimensions declared when + instantiating the kernel. If *ndim* is 1, a single integer is returned. + If *ndim* is 2 or 3, a tuple of the given number of integers is returned. + + Computation of the first integer is as follows:: + + cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x + + and is similar for the other two indices, but using the ``y`` and ``z`` + attributes. + ''' + + if not isinstance(ndim, types.IntegerLiteral): + raise RequireLiteralValue(ndim) + + sig = _type_grid_function(ndim) + + def codegen(context, builder, sig, args): + restype = sig.return_type + if restype == types.int64: + return nvvmutils.get_global_id(builder, dim=1) + elif isinstance(restype, types.UniTuple): + ids = nvvmutils.get_global_id(builder, dim=restype.count) + return cgutils.pack_array(builder, ids) + + return sig, codegen + + +@intrinsic +def gridsize(typingctx, ndim): + '''gridsize(ndim) + + Return the absolute size (or shape) in threads of the entire grid of + blocks. *ndim* should correspond to the number of dimensions declared when + instantiating the kernel. If *ndim* is 1, a single integer is returned. + If *ndim* is 2 or 3, a tuple of the given number of integers is returned. + + Computation of the first integer is as follows:: + + cuda.blockDim.x * cuda.gridDim.x + + and is similar for the other two indices, but using the ``y`` and ``z`` + attributes. + ''' + + if not isinstance(ndim, types.IntegerLiteral): + raise RequireLiteralValue(ndim) + + sig = _type_grid_function(ndim) + + def _nthreads_for_dim(builder, dim): + i64 = ir.IntType(64) + ntid = nvvmutils.call_sreg(builder, f"ntid.{dim}") + nctaid = nvvmutils.call_sreg(builder, f"nctaid.{dim}") + return builder.mul(builder.sext(ntid, i64), builder.sext(nctaid, i64)) + + def codegen(context, builder, sig, args): + restype = sig.return_type + nx = _nthreads_for_dim(builder, 'x') + + if restype == types.int64: + return nx + elif isinstance(restype, types.UniTuple): + ny = _nthreads_for_dim(builder, 'y') + + if restype.count == 2: + return cgutils.pack_array(builder, (nx, ny)) + elif restype.count == 3: + nz = _nthreads_for_dim(builder, 'z') + return cgutils.pack_array(builder, (nx, ny, nz)) + + return sig, codegen + + +@intrinsic +def _warpsize(typingctx): + sig = signature(types.int32) + + def codegen(context, builder, sig, args): + return nvvmutils.call_sreg(builder, 'warpsize') + + return sig, codegen + + +@overload_attribute(types.Module(cuda), 'warpsize', target='cuda') +def cuda_warpsize(mod): + ''' + The size of a warp. All architectures implemented to date have a warp size + of 32. + ''' + def get(mod): + return _warpsize() + return get + + +#------------------------------------------------------------------------------- +# syncthreads + +@intrinsic +def syncthreads(typingctx): + ''' + Synchronize all threads in the same thread block. This function implements + the same pattern as barriers in traditional multi-threaded programming: this + function waits until all threads in the block call it, at which point it + returns control to all its callers. + ''' + sig = signature(types.none) + + def codegen(context, builder, sig, args): + fname = 'llvm.nvvm.barrier0' + lmod = builder.module + fnty = ir.FunctionType(ir.VoidType(), ()) + sync = cgutils.get_or_insert_function(lmod, fnty, fname) + builder.call(sync, ()) + return context.get_dummy_value() + + return sig, codegen + + +def _syncthreads_predicate(typingctx, predicate, fname): + if not isinstance(predicate, types.Integer): + return None + + sig = signature(types.i4, types.i4) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),)) + sync = cgutils.get_or_insert_function(builder.module, fnty, fname) + return builder.call(sync, args) + + return sig, codegen + + +@intrinsic +def syncthreads_count(typingctx, predicate): + ''' + syncthreads_count(predicate) + + An extension to numba.cuda.syncthreads where the return value is a count + of the threads where predicate is true. + ''' + fname = 'llvm.nvvm.barrier0.popc' + return _syncthreads_predicate(typingctx, predicate, fname) + + +@intrinsic +def syncthreads_and(typingctx, predicate): + ''' + syncthreads_and(predicate) + + An extension to numba.cuda.syncthreads where 1 is returned if predicate is + true for all threads or 0 otherwise. + ''' + fname = 'llvm.nvvm.barrier0.and' + return _syncthreads_predicate(typingctx, predicate, fname) + + +@intrinsic +def syncthreads_or(typingctx, predicate): + ''' + syncthreads_or(predicate) + + An extension to numba.cuda.syncthreads where 1 is returned if predicate is + true for any thread or 0 otherwise. + ''' + fname = 'llvm.nvvm.barrier0.or' + return _syncthreads_predicate(typingctx, predicate, fname) diff --git a/lib/python3.10/site-packages/numba/cuda/libdevice.py b/lib/python3.10/site-packages/numba/cuda/libdevice.py new file mode 100644 index 0000000000000000000000000000000000000000..303ade74b650b6176cbd7f6590cddd5dc9c0278a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/libdevice.py @@ -0,0 +1,3382 @@ +def abs(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_abs.html + + :param x: Argument. + :type x: int32 + :rtype: int32 +""" + + +def acos(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acos.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def acosf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acosf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def acosh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acosh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def acoshf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_acoshf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def asin(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asin.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def asinf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def asinh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def asinhf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_asinhf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def atan(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def atan2(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan2.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def atan2f(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atan2f.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def atanf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def atanh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def atanhf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_atanhf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def brev(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_brev.html + + :param x: Argument. + :type x: int32 + :rtype: int32 +""" + + +def brevll(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_brevll.html + + :param x: Argument. + :type x: int64 + :rtype: int64 +""" + + +def byte_perm(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_byte_perm.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :param z: Argument. + :type z: int32 + :rtype: int32 +""" + + +def cbrt(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrt.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def cbrtf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cbrtf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def ceil(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ceil.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def ceilf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ceilf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def clz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_clz.html + + :param x: Argument. + :type x: int32 + :rtype: int32 +""" + + +def clzll(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_clzll.html + + :param x: Argument. + :type x: int64 + :rtype: int32 +""" + + +def copysign(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_copysign.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def copysignf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_copysignf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def cos(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cos.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def cosf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cosf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def cosh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cosh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def coshf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_coshf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def cospi(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cospi.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def cospif(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_cospif.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def dadd_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rd.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dadd_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rn.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dadd_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_ru.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dadd_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_rz.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def ddiv_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rd.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def ddiv_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rn.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def ddiv_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_ru.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def ddiv_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ddiv_rz.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dmul_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rd.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dmul_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rn.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dmul_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_ru.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def dmul_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dmul_rz.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def double2float_rd(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rd.html + + :param d: Argument. + :type d: float64 + :rtype: float32 +""" + + +def double2float_rn(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rn.html + + :param d: Argument. + :type d: float64 + :rtype: float32 +""" + + +def double2float_ru(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_ru.html + + :param d: Argument. + :type d: float64 + :rtype: float32 +""" + + +def double2float_rz(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2float_rz.html + + :param d: Argument. + :type d: float64 + :rtype: float32 +""" + + +def double2hiint(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2hiint.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2int_rd(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rd.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2int_rn(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rn.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2int_ru(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_ru.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2int_rz(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2int_rz.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2ll_rd(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rd.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ll_rn(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rn.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ll_ru(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_ru.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ll_rz(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ll_rz.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2loint(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2loint.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2uint_rd(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rd.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2uint_rn(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rn.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2uint_ru(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_ru.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2uint_rz(d): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2uint_rz.html + + :param d: Argument. + :type d: float64 + :rtype: int32 +""" + + +def double2ull_rd(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rd.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ull_rn(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rn.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ull_ru(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_ru.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double2ull_rz(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double2ull_rz.html + + :param f: Argument. + :type f: float64 + :rtype: int64 +""" + + +def double_as_longlong(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_double_as_longlong.html + + :param x: Argument. + :type x: float64 + :rtype: int64 +""" + + +def drcp_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rd.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def drcp_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rn.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def drcp_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_ru.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def drcp_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_drcp_rz.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def dsqrt_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rd.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def dsqrt_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rn.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def dsqrt_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_ru.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def dsqrt_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dsqrt_rz.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erf.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erfc(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfc.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erfcf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def erfcinv(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcinv.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erfcinvf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcinvf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def erfcx(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcx.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erfcxf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfcxf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def erff(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erff.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def erfinv(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfinv.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def erfinvf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_erfinvf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def exp(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def exp10(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp10.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def exp10f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp10f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def exp2(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp2.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def exp2f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_exp2f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def expf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def expm1(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expm1.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def expm1f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_expm1f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fabs(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fabs.html + + :param f: Argument. + :type f: float64 + :rtype: float64 +""" + + +def fabsf(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fabsf.html + + :param f: Argument. + :type f: float32 + :rtype: float32 +""" + + +def fadd_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rd.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fadd_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rn.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fadd_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_ru.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fadd_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rz.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fast_cosf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_cosf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_exp10f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_exp10f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_expf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_expf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_fdividef(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_fdividef.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fast_log10f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log10f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_log2f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log2f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_logf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_logf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_powf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_powf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fast_sincosf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sincosf.html + + :param x: Argument. + :type x: float32 + :rtype: UniTuple(float32 x 2) +""" + + +def fast_sinf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sinf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fast_tanf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_tanf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fdim(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdim.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def fdimf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdimf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fdiv_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rd.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fdiv_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rn.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fdiv_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_ru.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fdiv_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rz.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def ffs(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffs.html + + :param x: Argument. + :type x: int32 + :rtype: int32 +""" + + +def ffsll(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffsll.html + + :param x: Argument. + :type x: int64 + :rtype: int32 +""" + + +def finitef(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_finitef.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def float2half_rn(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2half_rn.html + + :param f: Argument. + :type f: float32 + :rtype: int16 +""" + + +def float2int_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rd.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2int_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rn.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2int_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_ru.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2int_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rz.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2ll_rd(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rd.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ll_rn(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rn.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ll_ru(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_ru.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ll_rz(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rz.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2uint_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rd.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2uint_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rn.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2uint_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_ru.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2uint_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rz.html + + :param in: Argument. + :type in: float32 + :rtype: int32 +""" + + +def float2ull_rd(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rd.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ull_rn(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rn.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ull_ru(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_ru.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float2ull_rz(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rz.html + + :param f: Argument. + :type f: float32 + :rtype: int64 +""" + + +def float_as_int(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float_as_int.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def floor(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floor.html + + :param f: Argument. + :type f: float64 + :rtype: float64 +""" + + +def floorf(f): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floorf.html + + :param f: Argument. + :type f: float32 + :rtype: float32 +""" + + +def fma(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :param z: Argument. + :type z: float64 + :rtype: float64 +""" + + +def fma_rd(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rd.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :param z: Argument. + :type z: float64 + :rtype: float64 +""" + + +def fma_rn(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rn.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :param z: Argument. + :type z: float64 + :rtype: float64 +""" + + +def fma_ru(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_ru.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :param z: Argument. + :type z: float64 + :rtype: float64 +""" + + +def fma_rz(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rz.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :param z: Argument. + :type z: float64 + :rtype: float64 +""" + + +def fmaf(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :param z: Argument. + :type z: float32 + :rtype: float32 +""" + + +def fmaf_rd(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rd.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :param z: Argument. + :type z: float32 + :rtype: float32 +""" + + +def fmaf_rn(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rn.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :param z: Argument. + :type z: float32 + :rtype: float32 +""" + + +def fmaf_ru(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_ru.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :param z: Argument. + :type z: float32 + :rtype: float32 +""" + + +def fmaf_rz(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rz.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :param z: Argument. + :type z: float32 + :rtype: float32 +""" + + +def fmax(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmax.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def fmaxf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaxf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmin(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmin.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def fminf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fminf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmod(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmod.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def fmodf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmodf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmul_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rd.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmul_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rn.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmul_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_ru.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fmul_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rz.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def frcp_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rd.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def frcp_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rn.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def frcp_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_ru.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def frcp_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rz.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def frexp(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexp.html + + :param x: Argument. + :type x: float64 + :rtype: Tuple(float64, int32) +""" + + +def frexpf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexpf.html + + :param x: Argument. + :type x: float32 + :rtype: Tuple(float32, int32) +""" + + +def frsqrt_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frsqrt_rn.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fsqrt_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rd.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fsqrt_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rn.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fsqrt_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_ru.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fsqrt_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rz.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def fsub_rd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rd.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fsub_rn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rn.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fsub_ru(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_ru.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def fsub_rz(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rz.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def hadd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hadd.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def half2float(h): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_half2float.html + + :param h: Argument. + :type h: int16 + :rtype: float32 +""" + + +def hiloint2double(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hiloint2double.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: float64 +""" + + +def hypot(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hypot.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def hypotf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hypotf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def ilogb(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ilogb.html + + :param x: Argument. + :type x: float64 + :rtype: int32 +""" + + +def ilogbf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ilogbf.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def int2double_rn(i): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2double_rn.html + + :param i: Argument. + :type i: int32 + :rtype: float64 +""" + + +def int2float_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rd.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def int2float_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rn.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def int2float_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_ru.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def int2float_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int2float_rz.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def int_as_float(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_int_as_float.html + + :param x: Argument. + :type x: int32 + :rtype: float32 +""" + + +def isfinited(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isfinited.html + + :param x: Argument. + :type x: float64 + :rtype: int32 +""" + + +def isinfd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isinfd.html + + :param x: Argument. + :type x: float64 + :rtype: int32 +""" + + +def isinff(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isinff.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def isnand(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isnand.html + + :param x: Argument. + :type x: float64 + :rtype: int32 +""" + + +def isnanf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_isnanf.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def j0(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j0.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def j0f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j0f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def j1(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j1.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def j1f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_j1f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def jn(n, x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_jn.html + + :param n: Argument. + :type n: int32 + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def jnf(n, x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_jnf.html + + :param n: Argument. + :type n: int32 + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def ldexp(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ldexp.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: int32 + :rtype: float64 +""" + + +def ldexpf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ldexpf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: int32 + :rtype: float32 +""" + + +def lgamma(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_lgamma.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def lgammaf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_lgammaf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def ll2double_rd(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rd.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ll2double_rn(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rn.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ll2double_ru(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_ru.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ll2double_rz(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2double_rz.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ll2float_rd(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rd.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ll2float_rn(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rn.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ll2float_ru(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_ru.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ll2float_rz(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ll2float_rz.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def llabs(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llabs.html + + :param x: Argument. + :type x: int64 + :rtype: int64 +""" + + +def llmax(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llmax.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def llmin(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llmin.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def llrint(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llrint.html + + :param x: Argument. + :type x: float64 + :rtype: int64 +""" + + +def llrintf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llrintf.html + + :param x: Argument. + :type x: float32 + :rtype: int64 +""" + + +def llround(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llround.html + + :param x: Argument. + :type x: float64 + :rtype: int64 +""" + + +def llroundf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_llroundf.html + + :param x: Argument. + :type x: float32 + :rtype: int64 +""" + + +def log(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def log10(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log10.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def log10f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log10f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def log1p(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log1p.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def log1pf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log1pf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def log2(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log2.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def log2f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_log2f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def logb(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logb.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def logbf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logbf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def logf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_logf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def longlong_as_double(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_longlong_as_double.html + + :param x: Argument. + :type x: int64 + :rtype: float64 +""" + + +def max(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_max.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def min(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_min.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def modf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_modf.html + + :param x: Argument. + :type x: float64 + :rtype: UniTuple(float64 x 2) +""" + + +def modff(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_modff.html + + :param x: Argument. + :type x: float32 + :rtype: UniTuple(float32 x 2) +""" + + +def mul24(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mul24.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def mul64hi(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mul64hi.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def mulhi(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_mulhi.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def nearbyint(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nearbyint.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def nearbyintf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nearbyintf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def nextafter(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nextafter.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def nextafterf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_nextafterf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def normcdf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdf.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def normcdff(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdff.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def normcdfinv(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdfinv.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def normcdfinvf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_normcdfinvf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def popc(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_popc.html + + :param x: Argument. + :type x: int32 + :rtype: int32 +""" + + +def popcll(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_popcll.html + + :param x: Argument. + :type x: int64 + :rtype: int32 +""" + + +def pow(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_pow.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def powf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def powi(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powi.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: int32 + :rtype: float64 +""" + + +def powif(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_powif.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: int32 + :rtype: float32 +""" + + +def rcbrt(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rcbrt.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def rcbrtf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rcbrtf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def remainder(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remainder.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: float64 +""" + + +def remainderf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remainderf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: float32 +""" + + +def remquo(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remquo.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: float64 + :rtype: Tuple(float64, int32) +""" + + +def remquof(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_remquof.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: float32 + :rtype: Tuple(float32, int32) +""" + + +def rhadd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rhadd.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def rint(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rint.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def rintf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rintf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def round(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_round.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def roundf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_roundf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def rsqrt(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rsqrt.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def rsqrtf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_rsqrtf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def sad(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sad.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :param z: Argument. + :type z: int32 + :rtype: int32 +""" + + +def saturatef(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_saturatef.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def scalbn(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_scalbn.html + + :param x: Argument. + :type x: float64 + :param y: Argument. + :type y: int32 + :rtype: float64 +""" + + +def scalbnf(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_scalbnf.html + + :param x: Argument. + :type x: float32 + :param y: Argument. + :type y: int32 + :rtype: float32 +""" + + +def signbitd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_signbitd.html + + :param x: Argument. + :type x: float64 + :rtype: int32 +""" + + +def signbitf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_signbitf.html + + :param x: Argument. + :type x: float32 + :rtype: int32 +""" + + +def sin(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sin.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def sincos(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincos.html + + :param x: Argument. + :type x: float64 + :rtype: UniTuple(float64 x 2) +""" + + +def sincosf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincosf.html + + :param x: Argument. + :type x: float32 + :rtype: UniTuple(float32 x 2) +""" + + +def sincospi(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincospi.html + + :param x: Argument. + :type x: float64 + :rtype: UniTuple(float64 x 2) +""" + + +def sincospif(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sincospif.html + + :param x: Argument. + :type x: float32 + :rtype: UniTuple(float32 x 2) +""" + + +def sinf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def sinh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def sinhf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinhf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def sinpi(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinpi.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def sinpif(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sinpif.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def sqrt(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sqrt.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def sqrtf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_sqrtf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def tan(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tan.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def tanf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def tanh(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanh.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def tanhf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tanhf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def tgamma(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tgamma.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def tgammaf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_tgammaf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def trunc(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_trunc.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def truncf(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_truncf.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def uhadd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uhadd.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def uint2double_rn(i): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2double_rn.html + + :param i: Argument. + :type i: int32 + :rtype: float64 +""" + + +def uint2float_rd(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rd.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def uint2float_rn(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rn.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def uint2float_ru(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_ru.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def uint2float_rz(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_uint2float_rz.html + + :param in: Argument. + :type in: int32 + :rtype: float32 +""" + + +def ull2double_rd(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rd.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ull2double_rn(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rn.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ull2double_ru(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_ru.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ull2double_rz(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2double_rz.html + + :param l: Argument. + :type l: int64 + :rtype: float64 +""" + + +def ull2float_rd(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rd.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ull2float_rn(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rn.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ull2float_ru(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_ru.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ull2float_rz(l): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ull2float_rz.html + + :param l: Argument. + :type l: int64 + :rtype: float32 +""" + + +def ullmax(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ullmax.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def ullmin(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ullmin.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def umax(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umax.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def umin(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umin.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def umul24(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umul24.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def umul64hi(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umul64hi.html + + :param x: Argument. + :type x: int64 + :param y: Argument. + :type y: int64 + :rtype: int64 +""" + + +def umulhi(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_umulhi.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def urhadd(x, y): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_urhadd.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :rtype: int32 +""" + + +def usad(x, y, z): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_usad.html + + :param x: Argument. + :type x: int32 + :param y: Argument. + :type y: int32 + :param z: Argument. + :type z: int32 + :rtype: int32 +""" + + +def y0(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y0.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def y0f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y0f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def y1(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y1.html + + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def y1f(x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_y1f.html + + :param x: Argument. + :type x: float32 + :rtype: float32 +""" + + +def yn(n, x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_yn.html + + :param n: Argument. + :type n: int32 + :param x: Argument. + :type x: float64 + :rtype: float64 +""" + + +def ynf(n, x): + """ + See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ynf.html + + :param n: Argument. + :type n: int32 + :param x: Argument. + :type x: float32 + :rtype: float32 +""" diff --git a/lib/python3.10/site-packages/numba/cuda/libdevicedecl.py b/lib/python3.10/site-packages/numba/cuda/libdevicedecl.py new file mode 100644 index 0000000000000000000000000000000000000000..e82646dca585232ec86496875d5dc3e00a940ccc --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/libdevicedecl.py @@ -0,0 +1,17 @@ +from numba.cuda import libdevice, libdevicefuncs +from numba.core.typing.templates import ConcreteTemplate, Registry + +registry = Registry() +register_global = registry.register_global + + +def libdevice_declare(func, retty, args): + class Libdevice_function(ConcreteTemplate): + cases = [libdevicefuncs.create_signature(retty, args)] + + pyfunc = getattr(libdevice, func[5:]) + register_global(pyfunc)(Libdevice_function) + + +for func, (retty, args) in libdevicefuncs.functions.items(): + libdevice_declare(func, retty, args) diff --git a/lib/python3.10/site-packages/numba/cuda/libdevicefuncs.py b/lib/python3.10/site-packages/numba/cuda/libdevicefuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..ba3bb5accaa55cb96bb369350eaf80bf356ddd89 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/libdevicefuncs.py @@ -0,0 +1,1057 @@ +from collections import namedtuple +from textwrap import indent + +from numba.types import float32, float64, int16, int32, int64, void, Tuple +from numba.core.typing.templates import signature + +arg = namedtuple("arg", ("name", "ty", "is_ptr")) + +functions = { + "__nv_abs": (int32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_acos": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_acosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_acosh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_acoshf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_asin": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_asinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_asinh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_asinhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_atan": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_atan2": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_atan2f": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_atanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_atanh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_atanhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_brev": (int32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_brevll": (int64, [arg(name="x", ty=int64, is_ptr=False)]), + "__nv_byte_perm": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + arg(name="z", ty=int32, is_ptr=False), + ], + ), + "__nv_cbrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_cbrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_ceil": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_ceilf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_clz": (int32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_clzll": (int32, [arg(name="x", ty=int64, is_ptr=False)]), + "__nv_copysign": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_copysignf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_cos": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_cosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_cosh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_coshf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_cospi": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_cospif": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_dadd_rd": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dadd_rn": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dadd_ru": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dadd_rz": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_ddiv_rd": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_ddiv_rn": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_ddiv_ru": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_ddiv_rz": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dmul_rd": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dmul_rn": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dmul_ru": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_dmul_rz": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_double2float_rd": ( + float32, + [arg(name="d", ty=float64, is_ptr=False)], + ), + "__nv_double2float_rn": ( + float32, + [arg(name="d", ty=float64, is_ptr=False)], + ), + "__nv_double2float_ru": ( + float32, + [arg(name="d", ty=float64, is_ptr=False)], + ), + "__nv_double2float_rz": ( + float32, + [arg(name="d", ty=float64, is_ptr=False)], + ), + "__nv_double2hiint": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2int_rd": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2int_rn": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2int_ru": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2int_rz": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2ll_rd": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ll_rn": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ll_ru": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ll_rz": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2loint": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2uint_rd": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2uint_rn": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2uint_ru": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2uint_rz": (int32, [arg(name="d", ty=float64, is_ptr=False)]), + "__nv_double2ull_rd": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ull_rn": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ull_ru": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double2ull_rz": (int64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_double_as_longlong": ( + int64, + [arg(name="x", ty=float64, is_ptr=False)], + ), + "__nv_drcp_rd": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_drcp_rn": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_drcp_ru": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_drcp_rz": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_dsqrt_rd": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_dsqrt_rn": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_dsqrt_ru": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_dsqrt_rz": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erf": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erfc": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erfcf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_erfcinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erfcinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_erfcx": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erfcxf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_erff": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_erfinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_erfinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_exp": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_exp10": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_exp10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_exp2": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_exp2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_expf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_expm1": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_expm1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fabs": (float64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_fabsf": (float32, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_fadd_rd": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fadd_rn": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fadd_ru": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fadd_rz": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fast_cosf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_exp10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_expf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_fdividef": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fast_log10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_log2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_logf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_powf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fast_sincosf": ( + void, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="sptr", ty=float32, is_ptr=True), + arg(name="cptr", ty=float32, is_ptr=True), + ], + ), + "__nv_fast_sinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fast_tanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fdim": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_fdimf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fdiv_rd": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fdiv_rn": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fdiv_ru": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fdiv_rz": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_ffs": (int32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_ffsll": (int32, [arg(name="x", ty=int64, is_ptr=False)]), + "__nv_finitef": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_float2half_rn": (int16, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2int_rd": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2int_rn": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2int_ru": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2int_rz": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2ll_rd": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ll_rn": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ll_ru": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ll_rz": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2uint_rd": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2uint_rn": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2uint_ru": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2uint_rz": (int32, [arg(name="in", ty=float32, is_ptr=False)]), + "__nv_float2ull_rd": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ull_rn": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ull_ru": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float2ull_rz": (int64, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_float_as_int": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_floor": (float64, [arg(name="f", ty=float64, is_ptr=False)]), + "__nv_floorf": (float32, [arg(name="f", ty=float32, is_ptr=False)]), + "__nv_fma": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="z", ty=float64, is_ptr=False), + ], + ), + "__nv_fma_rd": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="z", ty=float64, is_ptr=False), + ], + ), + "__nv_fma_rn": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="z", ty=float64, is_ptr=False), + ], + ), + "__nv_fma_ru": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="z", ty=float64, is_ptr=False), + ], + ), + "__nv_fma_rz": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="z", ty=float64, is_ptr=False), + ], + ), + "__nv_fmaf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="z", ty=float32, is_ptr=False), + ], + ), + "__nv_fmaf_rd": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="z", ty=float32, is_ptr=False), + ], + ), + "__nv_fmaf_rn": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="z", ty=float32, is_ptr=False), + ], + ), + "__nv_fmaf_ru": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="z", ty=float32, is_ptr=False), + ], + ), + "__nv_fmaf_rz": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="z", ty=float32, is_ptr=False), + ], + ), + "__nv_fmax": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_fmaxf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmin": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_fminf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmod": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_fmodf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmul_rd": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmul_rn": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmul_ru": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fmul_rz": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_frcp_rd": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_frcp_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_frcp_ru": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_frcp_rz": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_frexp": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="b", ty=int32, is_ptr=True), + ], + ), + "__nv_frexpf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="b", ty=int32, is_ptr=True), + ], + ), + "__nv_frsqrt_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fsqrt_rd": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fsqrt_rn": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fsqrt_ru": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fsqrt_rz": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_fsub_rd": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fsub_rn": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fsub_ru": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_fsub_rz": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_hadd": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_half2float": (float32, [arg(name="h", ty=int16, is_ptr=False)]), + "__nv_hiloint2double": ( + float64, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_hypot": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_hypotf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_ilogb": (int32, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_ilogbf": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_int2double_rn": (float64, [arg(name="i", ty=int32, is_ptr=False)]), + "__nv_int2float_rd": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_int2float_rn": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_int2float_ru": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_int2float_rz": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_int_as_float": (float32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_isfinited": (int32, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_isinfd": (int32, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_isinff": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_isnand": (int32, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_isnanf": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_j0": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_j0f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_j1": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_j1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_jn": ( + float64, + [ + arg(name="n", ty=int32, is_ptr=False), + arg(name="x", ty=float64, is_ptr=False), + ], + ), + "__nv_jnf": ( + float32, + [ + arg(name="n", ty=int32, is_ptr=False), + arg(name="x", ty=float32, is_ptr=False), + ], + ), + "__nv_ldexp": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_ldexpf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_lgamma": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_lgammaf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_ll2double_rd": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2double_rn": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2double_ru": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2double_rz": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2float_rd": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2float_rn": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2float_ru": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ll2float_rz": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_llabs": (int64, [arg(name="x", ty=int64, is_ptr=False)]), + "__nv_llmax": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_llmin": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_llrint": (int64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_llrintf": (int64, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_llround": (int64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_llroundf": (int64, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_log": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_log10": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_log10f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_log1p": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_log1pf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_log2": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_log2f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_logb": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_logbf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_logf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_longlong_as_double": ( + float64, + [arg(name="x", ty=int64, is_ptr=False)], + ), + "__nv_max": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_min": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_modf": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="b", ty=float64, is_ptr=True), + ], + ), + "__nv_modff": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="b", ty=float32, is_ptr=True), + ], + ), + "__nv_mul24": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_mul64hi": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_mulhi": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + # __nv_nan and __nv_nanf are excluded - they return a representation of a + # quiet NaN, but the argument they take seems to be undocumented, and + # follows a strange form - it is not an output like every other pointer + # argument. If a NaN is required, one can be obtained in CUDA Python by + # other means, e.g. `math.nan`. They are left in this list for completeness + # / reference. + # "__nv_nan": (float64, [arg(name="tagp", ty=int8, is_ptr=True)]), + # "__nv_nanf": (float32, [arg(name="tagp", ty=int8, is_ptr=True)]), + "__nv_nearbyint": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_nearbyintf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_nextafter": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_nextafterf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_normcdf": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_normcdff": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_normcdfinv": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_normcdfinvf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_popc": (int32, [arg(name="x", ty=int32, is_ptr=False)]), + "__nv_popcll": (int32, [arg(name="x", ty=int64, is_ptr=False)]), + "__nv_pow": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_powf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_powi": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_powif": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_rcbrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_rcbrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_remainder": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + ], + ), + "__nv_remainderf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + ], + ), + "__nv_remquo": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=float64, is_ptr=False), + arg(name="c", ty=int32, is_ptr=True), + ], + ), + "__nv_remquof": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=float32, is_ptr=False), + arg(name="quo", ty=int32, is_ptr=True), + ], + ), + "__nv_rhadd": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_rint": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_rintf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_round": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_roundf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_rsqrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_rsqrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_sad": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + arg(name="z", ty=int32, is_ptr=False), + ], + ), + "__nv_saturatef": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_scalbn": ( + float64, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_scalbnf": ( + float32, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_signbitd": (int32, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_signbitf": (int32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_sin": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_sincos": ( + void, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="sptr", ty=float64, is_ptr=True), + arg(name="cptr", ty=float64, is_ptr=True), + ], + ), + "__nv_sincosf": ( + void, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="sptr", ty=float32, is_ptr=True), + arg(name="cptr", ty=float32, is_ptr=True), + ], + ), + "__nv_sincospi": ( + void, + [ + arg(name="x", ty=float64, is_ptr=False), + arg(name="sptr", ty=float64, is_ptr=True), + arg(name="cptr", ty=float64, is_ptr=True), + ], + ), + "__nv_sincospif": ( + void, + [ + arg(name="x", ty=float32, is_ptr=False), + arg(name="sptr", ty=float32, is_ptr=True), + arg(name="cptr", ty=float32, is_ptr=True), + ], + ), + "__nv_sinf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_sinh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_sinhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_sinpi": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_sinpif": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_sqrt": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_sqrtf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_tan": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_tanf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_tanh": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_tanhf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_tgamma": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_tgammaf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_trunc": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_truncf": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_uhadd": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_uint2double_rn": (float64, [arg(name="i", ty=int32, is_ptr=False)]), + "__nv_uint2float_rd": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_uint2float_rn": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_uint2float_ru": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_uint2float_rz": (float32, [arg(name="in", ty=int32, is_ptr=False)]), + "__nv_ull2double_rd": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2double_rn": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2double_ru": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2double_rz": (float64, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2float_rd": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2float_rn": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2float_ru": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ull2float_rz": (float32, [arg(name="l", ty=int64, is_ptr=False)]), + "__nv_ullmax": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_ullmin": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_umax": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_umin": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_umul24": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_umul64hi": ( + int64, + [ + arg(name="x", ty=int64, is_ptr=False), + arg(name="y", ty=int64, is_ptr=False), + ], + ), + "__nv_umulhi": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_urhadd": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + ], + ), + "__nv_usad": ( + int32, + [ + arg(name="x", ty=int32, is_ptr=False), + arg(name="y", ty=int32, is_ptr=False), + arg(name="z", ty=int32, is_ptr=False), + ], + ), + "__nv_y0": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_y0f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_y1": (float64, [arg(name="x", ty=float64, is_ptr=False)]), + "__nv_y1f": (float32, [arg(name="x", ty=float32, is_ptr=False)]), + "__nv_yn": ( + float64, + [ + arg(name="n", ty=int32, is_ptr=False), + arg(name="x", ty=float64, is_ptr=False), + ], + ), + "__nv_ynf": ( + float32, + [ + arg(name="n", ty=int32, is_ptr=False), + arg(name="x", ty=float32, is_ptr=False), + ], + ), +} + + +def create_signature(retty, args): + """ + Given the return type and arguments for a libdevice function, return the + signature of the stub function used to call it from CUDA Python. + """ + + # Any pointer arguments should be part of the return type. + return_types = [arg.ty for arg in args if arg.is_ptr] + # If the return type is void, there is no point adding it to the list of + # return types. + if retty != void: + return_types.insert(0, retty) + + if len(return_types) > 1: + retty = Tuple(return_types) + else: + retty = return_types[0] + + argtypes = [arg.ty for arg in args if not arg.is_ptr] + + return signature(retty, *argtypes) + + +# The following code generates the stubs for libdevice functions. +# +# Stubs can be regenerated (e.g. if the functions dict above is modified) with: +# +# python -c "from numba.cuda.libdevicefuncs import generate_stubs; \ +# generate_stubs()" > numba/cuda/libdevice.py + +docstring_template = """ +See https://docs.nvidia.com/cuda/libdevice-users-guide/{func}.html + +{param_types} +:rtype: {retty} +""" + +param_template = """\ +:param {a.name}: Argument. +:type {a.name}: {a.ty}""" + + +def generate_stubs(): + for name, (retty, args) in functions.items(): + # Some libdevice functions have arguments called `in`, which causes a + # syntax error in Python, so we rename these to `x`. + def argname(arg): + if arg.name == "in": + return "x" + else: + return arg.name + + argnames = [argname(a) for a in args if not a.is_ptr] + argstr = ", ".join(argnames) + signature = create_signature(retty, args) + + param_types = "\n".join( + [param_template.format(a=a) for a in args if not a.is_ptr] + ) + docstring = docstring_template.format( + param_types=param_types, retty=signature.return_type, func=name + ) + docstring = indent(docstring, " ") + print(f'def {name[5:]}({argstr}):\n """{docstring}"""\n\n') diff --git a/lib/python3.10/site-packages/numba/cuda/libdeviceimpl.py b/lib/python3.10/site-packages/numba/cuda/libdeviceimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..4bb2e905e033d02cb74d1f83d9749387ce58954d --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/libdeviceimpl.py @@ -0,0 +1,83 @@ +from llvmlite import ir +from numba.core import cgutils, types +from numba.core.imputils import Registry +from numba.cuda import libdevice, libdevicefuncs + +registry = Registry() +lower = registry.lower + + +def libdevice_implement(func, retty, nbargs): + def core(context, builder, sig, args): + lmod = builder.module + fretty = context.get_value_type(retty) + fargtys = [context.get_value_type(arg.ty) for arg in nbargs] + fnty = ir.FunctionType(fretty, fargtys) + fn = cgutils.get_or_insert_function(lmod, fnty, func) + return builder.call(fn, args) + + key = getattr(libdevice, func[5:]) + + argtys = [arg.ty for arg in args if not arg.is_ptr] + lower(key, *argtys)(core) + + +def libdevice_implement_multiple_returns(func, retty, prototype_args): + sig = libdevicefuncs.create_signature(retty, prototype_args) + nb_retty = sig.return_type + + def core(context, builder, sig, args): + lmod = builder.module + + fargtys = [] + for arg in prototype_args: + ty = context.get_value_type(arg.ty) + if arg.is_ptr: + ty = ty.as_pointer() + fargtys.append(ty) + + fretty = context.get_value_type(retty) + + fnty = ir.FunctionType(fretty, fargtys) + fn = cgutils.get_or_insert_function(lmod, fnty, func) + + # For returned values that are returned through a pointer, we need to + # allocate variables on the stack and pass a pointer to them. + actual_args = [] + virtual_args = [] + arg_idx = 0 + for arg in prototype_args: + if arg.is_ptr: + # Allocate space for return value and add to args + tmp_arg = cgutils.alloca_once(builder, + context.get_value_type(arg.ty)) + actual_args.append(tmp_arg) + virtual_args.append(tmp_arg) + else: + actual_args.append(args[arg_idx]) + arg_idx += 1 + + ret = builder.call(fn, actual_args) + + # Following the call, we need to assemble the returned values into a + # tuple for returning back to the caller. + tuple_args = [] + if retty != types.void: + tuple_args.append(ret) + for arg in virtual_args: + tuple_args.append(builder.load(arg)) + + if isinstance(nb_retty, types.UniTuple): + return cgutils.pack_array(builder, tuple_args) + else: + return cgutils.pack_struct(builder, tuple_args) + + key = getattr(libdevice, func[5:]) + lower(key, *sig.args)(core) + + +for func, (retty, args) in libdevicefuncs.functions.items(): + if any([arg.is_ptr for arg in args]): + libdevice_implement_multiple_returns(func, retty, args) + else: + libdevice_implement(func, retty, args) diff --git a/lib/python3.10/site-packages/numba/cuda/mathimpl.py b/lib/python3.10/site-packages/numba/cuda/mathimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..c22deb56470e553407c07d3820af0f31a584823b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/mathimpl.py @@ -0,0 +1,448 @@ +import math +import operator +from llvmlite import ir +from numba.core import types, typing, cgutils, targetconfig +from numba.core.imputils import Registry +from numba.types import float32, float64, int64, uint64 +from numba.cuda import libdevice +from numba import cuda + +registry = Registry() +lower = registry.lower + + +booleans = [] +booleans += [('isnand', 'isnanf', math.isnan)] +booleans += [('isinfd', 'isinff', math.isinf)] +booleans += [('isfinited', 'finitef', math.isfinite)] + +unarys = [] +unarys += [('ceil', 'ceilf', math.ceil)] +unarys += [('floor', 'floorf', math.floor)] +unarys += [('fabs', 'fabsf', math.fabs)] +unarys += [('exp', 'expf', math.exp)] +unarys += [('expm1', 'expm1f', math.expm1)] +unarys += [('erf', 'erff', math.erf)] +unarys += [('erfc', 'erfcf', math.erfc)] +unarys += [('tgamma', 'tgammaf', math.gamma)] +unarys += [('lgamma', 'lgammaf', math.lgamma)] +unarys += [('sqrt', 'sqrtf', math.sqrt)] +unarys += [('log', 'logf', math.log)] +unarys += [('log2', 'log2f', math.log2)] +unarys += [('log10', 'log10f', math.log10)] +unarys += [('log1p', 'log1pf', math.log1p)] +unarys += [('acosh', 'acoshf', math.acosh)] +unarys += [('acos', 'acosf', math.acos)] +unarys += [('cos', 'cosf', math.cos)] +unarys += [('cosh', 'coshf', math.cosh)] +unarys += [('asinh', 'asinhf', math.asinh)] +unarys += [('asin', 'asinf', math.asin)] +unarys += [('sin', 'sinf', math.sin)] +unarys += [('sinh', 'sinhf', math.sinh)] +unarys += [('atan', 'atanf', math.atan)] +unarys += [('atanh', 'atanhf', math.atanh)] +unarys += [('tan', 'tanf', math.tan)] +unarys += [('trunc', 'truncf', math.trunc)] + +unarys_fastmath = {} +unarys_fastmath['cosf'] = 'fast_cosf' +unarys_fastmath['sinf'] = 'fast_sinf' +unarys_fastmath['tanf'] = 'fast_tanf' +unarys_fastmath['expf'] = 'fast_expf' +unarys_fastmath['log2f'] = 'fast_log2f' +unarys_fastmath['log10f'] = 'fast_log10f' +unarys_fastmath['logf'] = 'fast_logf' + +binarys = [] +binarys += [('copysign', 'copysignf', math.copysign)] +binarys += [('atan2', 'atan2f', math.atan2)] +binarys += [('pow', 'powf', math.pow)] +binarys += [('fmod', 'fmodf', math.fmod)] +binarys += [('hypot', 'hypotf', math.hypot)] +binarys += [('remainder', 'remainderf', math.remainder)] + +binarys_fastmath = {} +binarys_fastmath['powf'] = 'fast_powf' + + +@lower(math.isinf, types.Integer) +@lower(math.isnan, types.Integer) +def math_isinf_isnan_int(context, builder, sig, args): + return context.get_constant(types.boolean, 0) + + +@lower(operator.truediv, types.float32, types.float32) +def maybe_fast_truediv(context, builder, sig, args): + if context.fastmath: + sig = typing.signature(float32, float32, float32) + impl = context.get_function(libdevice.fast_fdividef, sig) + return impl(builder, args) + else: + with cgutils.if_zero(builder, args[1]): + context.error_model.fp_zero_division(builder, ("division by zero",)) + res = builder.fdiv(*args) + return res + + +@lower(math.isfinite, types.Integer) +def math_isfinite_int(context, builder, sig, args): + return context.get_constant(types.boolean, 1) + + +@lower(math.sin, types.float16) +def fp16_sin_impl(context, builder, sig, args): + def fp16_sin(x): + return cuda.fp16.hsin(x) + + return context.compile_internal(builder, fp16_sin, sig, args) + + +@lower(math.cos, types.float16) +def fp16_cos_impl(context, builder, sig, args): + def fp16_cos(x): + return cuda.fp16.hcos(x) + + return context.compile_internal(builder, fp16_cos, sig, args) + + +@lower(math.log, types.float16) +def fp16_log_impl(context, builder, sig, args): + def fp16_log(x): + return cuda.fp16.hlog(x) + + return context.compile_internal(builder, fp16_log, sig, args) + + +@lower(math.log10, types.float16) +def fp16_log10_impl(context, builder, sig, args): + def fp16_log10(x): + return cuda.fp16.hlog10(x) + + return context.compile_internal(builder, fp16_log10, sig, args) + + +@lower(math.log2, types.float16) +def fp16_log2_impl(context, builder, sig, args): + def fp16_log2(x): + return cuda.fp16.hlog2(x) + + return context.compile_internal(builder, fp16_log2, sig, args) + + +@lower(math.exp, types.float16) +def fp16_exp_impl(context, builder, sig, args): + def fp16_exp(x): + return cuda.fp16.hexp(x) + + return context.compile_internal(builder, fp16_exp, sig, args) + + +@lower(math.floor, types.float16) +def fp16_floor_impl(context, builder, sig, args): + def fp16_floor(x): + return cuda.fp16.hfloor(x) + + return context.compile_internal(builder, fp16_floor, sig, args) + + +@lower(math.ceil, types.float16) +def fp16_ceil_impl(context, builder, sig, args): + def fp16_ceil(x): + return cuda.fp16.hceil(x) + + return context.compile_internal(builder, fp16_ceil, sig, args) + + +@lower(math.sqrt, types.float16) +def fp16_sqrt_impl(context, builder, sig, args): + def fp16_sqrt(x): + return cuda.fp16.hsqrt(x) + + return context.compile_internal(builder, fp16_sqrt, sig, args) + + +@lower(math.fabs, types.float16) +def fp16_fabs_impl(context, builder, sig, args): + def fp16_fabs(x): + return cuda.fp16.habs(x) + + return context.compile_internal(builder, fp16_fabs, sig, args) + + +@lower(math.trunc, types.float16) +def fp16_trunc_impl(context, builder, sig, args): + def fp16_trunc(x): + return cuda.fp16.htrunc(x) + + return context.compile_internal(builder, fp16_trunc, sig, args) + + +def impl_boolean(key, ty, libfunc): + def lower_boolean_impl(context, builder, sig, args): + libfunc_impl = context.get_function(libfunc, + typing.signature(types.int32, ty)) + result = libfunc_impl(builder, args) + return context.cast(builder, result, types.int32, types.boolean) + + lower(key, ty)(lower_boolean_impl) + + +def get_lower_unary_impl(key, ty, libfunc): + def lower_unary_impl(context, builder, sig, args): + actual_libfunc = libfunc + fast_replacement = None + if ty == float32 and context.fastmath: + fast_replacement = unarys_fastmath.get(libfunc.__name__) + + if fast_replacement is not None: + actual_libfunc = getattr(libdevice, fast_replacement) + + libfunc_impl = context.get_function(actual_libfunc, + typing.signature(ty, ty)) + return libfunc_impl(builder, args) + return lower_unary_impl + + +def get_unary_impl_for_fn_and_ty(fn, ty): + # tanh is a special case - because it is not registered like the other + # unary implementations, it does not appear in the unarys list. However, + # its implementation can be looked up by key like the other + # implementations, so we add it to the list we search here. + tanh_impls = ('tanh', 'tanhf', math.tanh) + for fname64, fname32, key in unarys + [tanh_impls]: + if fn == key: + if ty == float32: + impl = getattr(libdevice, fname32) + elif ty == float64: + impl = getattr(libdevice, fname64) + + return get_lower_unary_impl(key, ty, impl) + + raise RuntimeError(f"Implementation of {fn} for {ty} not found") + + +def impl_unary(key, ty, libfunc): + lower_unary_impl = get_lower_unary_impl(key, ty, libfunc) + lower(key, ty)(lower_unary_impl) + + +def impl_unary_int(key, ty, libfunc): + def lower_unary_int_impl(context, builder, sig, args): + if sig.args[0] == int64: + convert = builder.sitofp + elif sig.args[0] == uint64: + convert = builder.uitofp + else: + m = 'Only 64-bit integers are supported for generic unary int ops' + raise TypeError(m) + + arg = convert(args[0], ir.DoubleType()) + sig = typing.signature(float64, float64) + libfunc_impl = context.get_function(libfunc, sig) + return libfunc_impl(builder, [arg]) + + lower(key, ty)(lower_unary_int_impl) + + +def get_lower_binary_impl(key, ty, libfunc): + def lower_binary_impl(context, builder, sig, args): + actual_libfunc = libfunc + fast_replacement = None + if ty == float32 and context.fastmath: + fast_replacement = binarys_fastmath.get(libfunc.__name__) + + if fast_replacement is not None: + actual_libfunc = getattr(libdevice, fast_replacement) + + libfunc_impl = context.get_function(actual_libfunc, + typing.signature(ty, ty, ty)) + return libfunc_impl(builder, args) + return lower_binary_impl + + +def get_binary_impl_for_fn_and_ty(fn, ty): + for fname64, fname32, key in binarys: + if fn == key: + if ty == float32: + impl = getattr(libdevice, fname32) + elif ty == float64: + impl = getattr(libdevice, fname64) + + return get_lower_binary_impl(key, ty, impl) + + raise RuntimeError(f"Implementation of {fn} for {ty} not found") + + +def impl_binary(key, ty, libfunc): + lower_binary_impl = get_lower_binary_impl(key, ty, libfunc) + lower(key, ty, ty)(lower_binary_impl) + + +def impl_binary_int(key, ty, libfunc): + def lower_binary_int_impl(context, builder, sig, args): + if sig.args[0] == int64: + convert = builder.sitofp + elif sig.args[0] == uint64: + convert = builder.uitofp + else: + m = 'Only 64-bit integers are supported for generic binary int ops' + raise TypeError(m) + + args = [convert(arg, ir.DoubleType()) for arg in args] + sig = typing.signature(float64, float64, float64) + libfunc_impl = context.get_function(libfunc, sig) + return libfunc_impl(builder, args) + + lower(key, ty, ty)(lower_binary_int_impl) + + +for fname64, fname32, key in booleans: + impl32 = getattr(libdevice, fname32) + impl64 = getattr(libdevice, fname64) + impl_boolean(key, float32, impl32) + impl_boolean(key, float64, impl64) + + +for fname64, fname32, key in unarys: + impl32 = getattr(libdevice, fname32) + impl64 = getattr(libdevice, fname64) + impl_unary(key, float32, impl32) + impl_unary(key, float64, impl64) + impl_unary_int(key, int64, impl64) + impl_unary_int(key, uint64, impl64) + + +for fname64, fname32, key in binarys: + impl32 = getattr(libdevice, fname32) + impl64 = getattr(libdevice, fname64) + impl_binary(key, float32, impl32) + impl_binary(key, float64, impl64) + impl_binary_int(key, int64, impl64) + impl_binary_int(key, uint64, impl64) + + +def impl_pow_int(ty, libfunc): + def lower_pow_impl_int(context, builder, sig, args): + powi_sig = typing.signature(ty, ty, types.int32) + libfunc_impl = context.get_function(libfunc, powi_sig) + return libfunc_impl(builder, args) + + lower(math.pow, ty, types.int32)(lower_pow_impl_int) + + +impl_pow_int(types.float32, libdevice.powif) +impl_pow_int(types.float64, libdevice.powi) + + +def impl_modf(ty, libfunc): + retty = types.UniTuple(ty, 2) + + def lower_modf_impl(context, builder, sig, args): + modf_sig = typing.signature(retty, ty) + libfunc_impl = context.get_function(libfunc, modf_sig) + return libfunc_impl(builder, args) + + lower(math.modf, ty)(lower_modf_impl) + + +impl_modf(types.float32, libdevice.modff) +impl_modf(types.float64, libdevice.modf) + + +def impl_frexp(ty, libfunc): + retty = types.Tuple((ty, types.int32)) + + def lower_frexp_impl(context, builder, sig, args): + frexp_sig = typing.signature(retty, ty) + libfunc_impl = context.get_function(libfunc, frexp_sig) + return libfunc_impl(builder, args) + + lower(math.frexp, ty)(lower_frexp_impl) + + +impl_frexp(types.float32, libdevice.frexpf) +impl_frexp(types.float64, libdevice.frexp) + + +def impl_ldexp(ty, libfunc): + def lower_ldexp_impl(context, builder, sig, args): + ldexp_sig = typing.signature(ty, ty, types.int32) + libfunc_impl = context.get_function(libfunc, ldexp_sig) + return libfunc_impl(builder, args) + + lower(math.ldexp, ty, types.int32)(lower_ldexp_impl) + + +impl_ldexp(types.float32, libdevice.ldexpf) +impl_ldexp(types.float64, libdevice.ldexp) + + +def impl_tanh(ty, libfunc): + def lower_tanh_impl(context, builder, sig, args): + def get_compute_capability(): + flags = targetconfig.ConfigStack().top() + return flags.compute_capability + + def tanh_impl_libdevice(): + tanh_sig = typing.signature(ty, ty) + libfunc_impl = context.get_function(libfunc, tanh_sig) + return libfunc_impl(builder, args) + + def tanhf_impl_fastmath(): + fnty = ir.FunctionType(ir.FloatType(), [ir.FloatType()]) + asm = ir.InlineAsm(fnty, 'tanh.approx.f32 $0, $1;', '=f,f') + return builder.call(asm, args) + + if ty == float32 and context.fastmath: + cc = get_compute_capability() + if cc >= (7,5): + return tanhf_impl_fastmath() + + return tanh_impl_libdevice() + + lower(math.tanh, ty)(lower_tanh_impl) + + +impl_tanh(types.float32, libdevice.tanhf) +impl_tanh(types.float64, libdevice.tanh) + +impl_unary_int(math.tanh, int64, libdevice.tanh) +impl_unary_int(math.tanh, uint64, libdevice.tanh) + +# Complex power implementations - translations of _Py_c_pow from CPython +# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151 +# +# The complex64 variant casts all constants and some variables to ensure that +# as much computation is done in single precision as possible. A small number +# of operations are still done in 64-bit, but these come from libdevice code. + + +def cpow_implement(fty, cty): + def core(context, builder, sig, args): + def cpow_internal(a, b): + + if b.real == fty(0.0) and b.imag == fty(0.0): + return cty(1.0) + cty(0.0j) + elif a.real == fty(0.0) and b.real == fty(0.0): + return cty(0.0) + cty(0.0j) + + vabs = math.hypot(a.real, a.imag) + len = math.pow(vabs, b.real) + at = math.atan2(a.imag, a.real) + phase = at * b.real + if b.imag != fty(0.0): + len /= math.exp(at * b.imag) + phase += b.imag * math.log(vabs) + + return len * (cty(math.cos(phase)) + + cty(math.sin(phase) * cty(1.0j))) + + return context.compile_internal(builder, cpow_internal, sig, args) + + lower(operator.pow, cty, cty)(core) + lower(operator.ipow, cty, cty)(core) + lower(pow, cty, cty)(core) + + +cpow_implement(types.float32, types.complex64) +cpow_implement(types.float64, types.complex128) diff --git a/lib/python3.10/site-packages/numba/cuda/models.py b/lib/python3.10/site-packages/numba/cuda/models.py new file mode 100644 index 0000000000000000000000000000000000000000..21d115125150a284f463a2c0c2ad2f52a78a434f --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/models.py @@ -0,0 +1,48 @@ +import functools + +from llvmlite import ir + +from numba.core.datamodel.registry import DataModelManager, register +from numba.core.extending import models +from numba.core import types +from numba.cuda.types import Dim3, GridGroup, CUDADispatcher + + +cuda_data_manager = DataModelManager() + +register_model = functools.partial(register, cuda_data_manager) + + +@register_model(Dim3) +class Dim3Model(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('x', types.int32), + ('y', types.int32), + ('z', types.int32) + ] + super().__init__(dmm, fe_type, members) + + +@register_model(GridGroup) +class GridGroupModel(models.PrimitiveModel): + def __init__(self, dmm, fe_type): + be_type = ir.IntType(64) + super().__init__(dmm, fe_type, be_type) + + +@register_model(types.Float) +class FloatModel(models.PrimitiveModel): + def __init__(self, dmm, fe_type): + if fe_type == types.float16: + be_type = ir.IntType(16) + elif fe_type == types.float32: + be_type = ir.FloatType() + elif fe_type == types.float64: + be_type = ir.DoubleType() + else: + raise NotImplementedError(fe_type) + super(FloatModel, self).__init__(dmm, fe_type, be_type) + + +register_model(CUDADispatcher)(models.OpaqueModel) diff --git a/lib/python3.10/site-packages/numba/cuda/nvvmutils.py b/lib/python3.10/site-packages/numba/cuda/nvvmutils.py new file mode 100644 index 0000000000000000000000000000000000000000..9a7dcde02b4f15c8d0b15aa4501cd6efc278b81a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/nvvmutils.py @@ -0,0 +1,235 @@ +import itertools +from llvmlite import ir +from numba.core import cgutils, targetconfig +from .cudadrv import nvvm + + +def declare_atomic_cas_int(lmod, isize): + fname = '___numba_atomic_i' + str(isize) + '_cas_hack' + fnty = ir.FunctionType(ir.IntType(isize), + (ir.PointerType(ir.IntType(isize)), + ir.IntType(isize), + ir.IntType(isize))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def atomic_cmpxchg(builder, lmod, isize, ptr, cmp, val): + out = builder.cmpxchg(ptr, cmp, val, 'monotonic', 'monotonic') + return builder.extract_value(out, 0) + + +def declare_atomic_add_float32(lmod): + fname = 'llvm.nvvm.atomic.load.add.f32.p0f32' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType(), 0), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_add_float64(lmod): + flags = targetconfig.ConfigStack().top() + if flags.compute_capability >= (6, 0): + fname = 'llvm.nvvm.atomic.load.add.f64.p0f64' + else: + fname = '___numba_atomic_double_add' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_sub_float32(lmod): + fname = '___numba_atomic_float_sub' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType()), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_sub_float64(lmod): + fname = '___numba_atomic_double_sub' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_inc_int32(lmod): + fname = 'llvm.nvvm.atomic.load.inc.32.p0i32' + fnty = ir.FunctionType(ir.IntType(32), + (ir.PointerType(ir.IntType(32)), ir.IntType(32))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_inc_int64(lmod): + fname = '___numba_atomic_u64_inc' + fnty = ir.FunctionType(ir.IntType(64), + (ir.PointerType(ir.IntType(64)), ir.IntType(64))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_dec_int32(lmod): + fname = 'llvm.nvvm.atomic.load.dec.32.p0i32' + fnty = ir.FunctionType(ir.IntType(32), + (ir.PointerType(ir.IntType(32)), ir.IntType(32))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_dec_int64(lmod): + fname = '___numba_atomic_u64_dec' + fnty = ir.FunctionType(ir.IntType(64), + (ir.PointerType(ir.IntType(64)), ir.IntType(64))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_max_float32(lmod): + fname = '___numba_atomic_float_max' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType()), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_max_float64(lmod): + fname = '___numba_atomic_double_max' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_min_float32(lmod): + fname = '___numba_atomic_float_min' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType()), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_min_float64(lmod): + fname = '___numba_atomic_double_min' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_nanmax_float32(lmod): + fname = '___numba_atomic_float_nanmax' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType()), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_nanmax_float64(lmod): + fname = '___numba_atomic_double_nanmax' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_nanmin_float32(lmod): + fname = '___numba_atomic_float_nanmin' + fnty = ir.FunctionType(ir.FloatType(), + (ir.PointerType(ir.FloatType()), ir.FloatType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_atomic_nanmin_float64(lmod): + fname = '___numba_atomic_double_nanmin' + fnty = ir.FunctionType(ir.DoubleType(), + (ir.PointerType(ir.DoubleType()), ir.DoubleType())) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_cudaCGGetIntrinsicHandle(lmod): + fname = 'cudaCGGetIntrinsicHandle' + fnty = ir.FunctionType(ir.IntType(64), + (ir.IntType(32),)) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_cudaCGSynchronize(lmod): + fname = 'cudaCGSynchronize' + fnty = ir.FunctionType(ir.IntType(32), + (ir.IntType(64), ir.IntType(32))) + return cgutils.get_or_insert_function(lmod, fnty, fname) + + +def declare_string(builder, value): + lmod = builder.basic_block.function.module + cval = cgutils.make_bytearray(value.encode("utf-8") + b"\x00") + gl = cgutils.add_global_variable(lmod, cval.type, name="_str", + addrspace=nvvm.ADDRSPACE_CONSTANT) + gl.linkage = 'internal' + gl.global_constant = True + gl.initializer = cval + + return builder.addrspacecast(gl, ir.PointerType(ir.IntType(8)), 'generic') + + +def declare_vprint(lmod): + voidptrty = ir.PointerType(ir.IntType(8)) + # NOTE: the second argument to vprintf() points to the variable-length + # array of arguments (after the format) + vprintfty = ir.FunctionType(ir.IntType(32), [voidptrty, voidptrty]) + vprintf = cgutils.get_or_insert_function(lmod, vprintfty, "vprintf") + return vprintf + + +# ----------------------------------------------------------------------------- + +SREG_MAPPING = { + 'tid.x': 'llvm.nvvm.read.ptx.sreg.tid.x', + 'tid.y': 'llvm.nvvm.read.ptx.sreg.tid.y', + 'tid.z': 'llvm.nvvm.read.ptx.sreg.tid.z', + + 'ntid.x': 'llvm.nvvm.read.ptx.sreg.ntid.x', + 'ntid.y': 'llvm.nvvm.read.ptx.sreg.ntid.y', + 'ntid.z': 'llvm.nvvm.read.ptx.sreg.ntid.z', + + 'ctaid.x': 'llvm.nvvm.read.ptx.sreg.ctaid.x', + 'ctaid.y': 'llvm.nvvm.read.ptx.sreg.ctaid.y', + 'ctaid.z': 'llvm.nvvm.read.ptx.sreg.ctaid.z', + + 'nctaid.x': 'llvm.nvvm.read.ptx.sreg.nctaid.x', + 'nctaid.y': 'llvm.nvvm.read.ptx.sreg.nctaid.y', + 'nctaid.z': 'llvm.nvvm.read.ptx.sreg.nctaid.z', + + 'warpsize': 'llvm.nvvm.read.ptx.sreg.warpsize', + 'laneid': 'llvm.nvvm.read.ptx.sreg.laneid', +} + + +def call_sreg(builder, name): + module = builder.module + fnty = ir.FunctionType(ir.IntType(32), ()) + fn = cgutils.get_or_insert_function(module, fnty, SREG_MAPPING[name]) + return builder.call(fn, ()) + + +class SRegBuilder(object): + def __init__(self, builder): + self.builder = builder + + def tid(self, xyz): + return call_sreg(self.builder, 'tid.%s' % xyz) + + def ctaid(self, xyz): + return call_sreg(self.builder, 'ctaid.%s' % xyz) + + def ntid(self, xyz): + return call_sreg(self.builder, 'ntid.%s' % xyz) + + def nctaid(self, xyz): + return call_sreg(self.builder, 'nctaid.%s' % xyz) + + def getdim(self, xyz): + i64 = ir.IntType(64) + tid = self.builder.sext(self.tid(xyz), i64) + ntid = self.builder.sext(self.ntid(xyz), i64) + nctaid = self.builder.sext(self.ctaid(xyz), i64) + res = self.builder.add(self.builder.mul(ntid, nctaid), tid) + return res + + +def get_global_id(builder, dim): + sreg = SRegBuilder(builder) + it = (sreg.getdim(xyz) for xyz in 'xyz') + seq = list(itertools.islice(it, None, dim)) + if dim == 1: + return seq[0] + else: + return seq diff --git a/lib/python3.10/site-packages/numba/cuda/printimpl.py b/lib/python3.10/site-packages/numba/cuda/printimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..dac1780706bb4fa512d66fa36ea18134b2e5d4c8 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/printimpl.py @@ -0,0 +1,86 @@ +from functools import singledispatch +from llvmlite import ir +from numba.core import types, cgutils +from numba.core.errors import NumbaWarning +from numba.core.imputils import Registry +from numba.cuda import nvvmutils +from warnings import warn + +registry = Registry() +lower = registry.lower + +voidptr = ir.PointerType(ir.IntType(8)) + + +# NOTE: we don't use @lower here since print_item() doesn't return a LLVM value + +@singledispatch +def print_item(ty, context, builder, val): + """ + Handle printing of a single value of the given Numba type. + A (format string, [list of arguments]) is returned that will allow + forming the final printf()-like call. + """ + raise NotImplementedError("printing unimplemented for values of type %s" + % (ty,)) + + +@print_item.register(types.Integer) +@print_item.register(types.IntegerLiteral) +def int_print_impl(ty, context, builder, val): + if ty in types.unsigned_domain: + rawfmt = "%llu" + dsttype = types.uint64 + else: + rawfmt = "%lld" + dsttype = types.int64 + lld = context.cast(builder, val, ty, dsttype) + return rawfmt, [lld] + + +@print_item.register(types.Float) +def real_print_impl(ty, context, builder, val): + lld = context.cast(builder, val, ty, types.float64) + return "%f", [lld] + + +@print_item.register(types.StringLiteral) +def const_print_impl(ty, context, builder, sigval): + pyval = ty.literal_value + assert isinstance(pyval, str) # Ensured by lowering + rawfmt = "%s" + val = context.insert_string_const_addrspace(builder, pyval) + return rawfmt, [val] + + +@lower(print, types.VarArg(types.Any)) +def print_varargs(context, builder, sig, args): + """This function is a generic 'print' wrapper for arbitrary types. + It dispatches to the appropriate 'print' implementations above + depending on the detected real types in the signature.""" + + vprint = nvvmutils.declare_vprint(builder.module) + + formats = [] + values = [] + + for i, (argtype, argval) in enumerate(zip(sig.args, args)): + argfmt, argvals = print_item(argtype, context, builder, argval) + formats.append(argfmt) + values.extend(argvals) + + rawfmt = " ".join(formats) + "\n" + if len(args) > 32: + msg = ('CUDA print() cannot print more than 32 items. ' + 'The raw format string will be emitted by the kernel instead.') + warn(msg, NumbaWarning) + + rawfmt = rawfmt.replace('%', '%%') + fmt = context.insert_string_const_addrspace(builder, rawfmt) + array = cgutils.make_anonymous_struct(builder, values) + arrayptr = cgutils.alloca_once_value(builder, array) + + vprint = nvvmutils.declare_vprint(builder.module) + builder.call(vprint, (fmt, builder.bitcast(arrayptr, voidptr))) + + return context.get_dummy_value() diff --git a/lib/python3.10/site-packages/numba/cuda/random.py b/lib/python3.10/site-packages/numba/cuda/random.py new file mode 100644 index 0000000000000000000000000000000000000000..460c7fc212077742e728e5c5c981ef2c1b1e6af0 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/random.py @@ -0,0 +1,292 @@ +import math + +from numba import (config, cuda, float32, float64, uint32, int64, uint64, + from_dtype, jit) + +import numpy as np + +# This implementation is based upon the xoroshiro128+ and splitmix64 algorithms +# described at: +# +# http://xoroshiro.di.unimi.it/ +# +# and originally implemented by David Blackman and Sebastiano Vigna. +# +# The implementations below are based on the C source code: +# +# * http://xoroshiro.di.unimi.it/xoroshiro128plus.c +# * http://xoroshiro.di.unimi.it/splitmix64.c +# +# Splitmix64 is used to generate the initial state of the xoroshiro128+ +# generator to ensure that small seeds don't result in predictable output. + +# **WARNING**: There is a lot of verbose casting in this file to ensure that +# NumPy casting conventions (which cast uint64 [op] int32 to float64) don't +# turn integers into floats when using these functions in the CUDA simulator. +# +# There are also no function type signatures to ensure that compilation is +# deferred so that import is quick, and Sphinx autodoc works. We are also +# using the CPU @jit decorator everywhere to create functions that work as +# both CPU and CUDA device functions. + +xoroshiro128p_dtype = np.dtype([('s0', np.uint64), ('s1', np.uint64)], + align=True) +xoroshiro128p_type = from_dtype(xoroshiro128p_dtype) + +# When cudasim is enabled, Fake CUDA arrays are passed to some of the +# @jit-decorated functions. This required fallback to object mode. With +# Numba 0.59.0 object mode must be explicitly enabled. +# https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit +# In order to avoid the warning / future error, we explicitly specify that +# object mode with loop lifting is acceptable when using the simulator. +_forceobj = _looplift = config.ENABLE_CUDASIM +_nopython = not config.ENABLE_CUDASIM + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def init_xoroshiro128p_state(states, index, seed): + '''Use SplitMix64 to generate an xoroshiro128p state from 64-bit seed. + + This ensures that manually set small seeds don't result in a predictable + initial sequence from the random number generator. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: uint64 + :param index: offset in states to update + :type seed: int64 + :param seed: seed value to use when initializing state + ''' + index = int64(index) + seed = uint64(seed) + + z = seed + uint64(0x9E3779B97F4A7C15) + z = (z ^ (z >> uint32(30))) * uint64(0xBF58476D1CE4E5B9) + z = (z ^ (z >> uint32(27))) * uint64(0x94D049BB133111EB) + z = z ^ (z >> uint32(31)) + + states[index]['s0'] = z + states[index]['s1'] = z + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def rotl(x, k): + '''Left rotate x by k bits.''' + x = uint64(x) + k = uint32(k) + return (x << k) | (x >> uint32(64 - k)) + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_next(states, index): + '''Return the next random uint64 and advance the RNG in states[index]. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + :rtype: uint64 + ''' + index = int64(index) + s0 = states[index]['s0'] + s1 = states[index]['s1'] + result = s0 + s1 + + s1 ^= s0 + states[index]['s0'] = uint64(rotl(s0, uint32(55))) ^ s1 ^ (s1 << uint32(14)) + states[index]['s1'] = uint64(rotl(s1, uint32(36))) + + return result + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_jump(states, index): + '''Advance the RNG in ``states[index]`` by 2**64 steps. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + ''' + index = int64(index) + + jump = (uint64(0xbeac0467eba5facb), uint64(0xd86b048b86aa9922)) + + s0 = uint64(0) + s1 = uint64(0) + + for i in range(2): + for b in range(64): + if jump[i] & (uint64(1) << uint32(b)): + s0 ^= states[index]['s0'] + s1 ^= states[index]['s1'] + xoroshiro128p_next(states, index) + + states[index]['s0'] = s0 + states[index]['s1'] = s1 + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def uint64_to_unit_float64(x): + '''Convert uint64 to float64 value in the range [0.0, 1.0)''' + x = uint64(x) + return (x >> uint32(11)) * (float64(1) / (uint64(1) << uint32(53))) + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def uint64_to_unit_float32(x): + '''Convert uint64 to float32 value in the range [0.0, 1.0)''' + x = uint64(x) + return float32(uint64_to_unit_float64(x)) + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_uniform_float32(states, index): + '''Return a float32 in range [0.0, 1.0) and advance ``states[index]``. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + :rtype: float32 + ''' + index = int64(index) + return uint64_to_unit_float32(xoroshiro128p_next(states, index)) + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_uniform_float64(states, index): + '''Return a float64 in range [0.0, 1.0) and advance ``states[index]``. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + :rtype: float64 + ''' + index = int64(index) + return uint64_to_unit_float64(xoroshiro128p_next(states, index)) + + +TWO_PI_FLOAT32 = np.float32(2 * math.pi) +TWO_PI_FLOAT64 = np.float64(2 * math.pi) + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_normal_float32(states, index): + '''Return a normally distributed float32 and advance ``states[index]``. + + The return value is drawn from a Gaussian of mean=0 and sigma=1 using the + Box-Muller transform. This advances the RNG sequence by two steps. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + :rtype: float32 + ''' + index = int64(index) + + u1 = xoroshiro128p_uniform_float32(states, index) + u2 = xoroshiro128p_uniform_float32(states, index) + + z0 = math.sqrt(-float32(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT32 * u2) + # discarding second normal value + # z1 = math.sqrt(-float32(2.0) * math.log(u1)) + # * math.sin(TWO_PI_FLOAT32 * u2) + return z0 + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def xoroshiro128p_normal_float64(states, index): + '''Return a normally distributed float32 and advance ``states[index]``. + + The return value is drawn from a Gaussian of mean=0 and sigma=1 using the + Box-Muller transform. This advances the RNG sequence by two steps. + + :type states: 1D array, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type index: int64 + :param index: offset in states to update + :rtype: float64 + ''' + index = int64(index) + + u1 = xoroshiro128p_uniform_float32(states, index) + u2 = xoroshiro128p_uniform_float32(states, index) + + z0 = math.sqrt(-float64(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT64 * u2) + # discarding second normal value + # z1 = math.sqrt(-float64(2.0) * math.log(u1)) + # * math.sin(TWO_PI_FLOAT64 * u2) + return z0 + + +@jit(forceobj=_forceobj, looplift=_looplift, nopython=_nopython) +def init_xoroshiro128p_states_cpu(states, seed, subsequence_start): + n = states.shape[0] + seed = uint64(seed) + subsequence_start = uint64(subsequence_start) + + if n >= 1: + init_xoroshiro128p_state(states, 0, seed) + + # advance to starting subsequence number + for _ in range(subsequence_start): + xoroshiro128p_jump(states, 0) + + # populate the rest of the array + for i in range(1, n): + states[i] = states[i - 1] # take state of previous generator + xoroshiro128p_jump(states, i) # and jump forward 2**64 steps + + +def init_xoroshiro128p_states(states, seed, subsequence_start=0, stream=0): + '''Initialize RNG states on the GPU for parallel generators. + + This initializes the RNG states so that each state in the array corresponds + subsequences in the separated by 2**64 steps from each other in the main + sequence. Therefore, as long no CUDA thread requests more than 2**64 + random numbers, all of the RNG states produced by this function are + guaranteed to be independent. + + The subsequence_start parameter can be used to advance the first RNG state + by a multiple of 2**64 steps. + + :type states: 1D DeviceNDArray, dtype=xoroshiro128p_dtype + :param states: array of RNG states + :type seed: uint64 + :param seed: starting seed for list of generators + ''' + + # Initialization on CPU is much faster than the GPU + states_cpu = np.empty(shape=states.shape, dtype=xoroshiro128p_dtype) + init_xoroshiro128p_states_cpu(states_cpu, seed, subsequence_start) + + states.copy_to_device(states_cpu, stream=stream) + + +def create_xoroshiro128p_states(n, seed, subsequence_start=0, stream=0): + '''Returns a new device array initialized for n random number generators. + + This initializes the RNG states so that each state in the array corresponds + subsequences in the separated by 2**64 steps from each other in the main + sequence. Therefore, as long no CUDA thread requests more than 2**64 + random numbers, all of the RNG states produced by this function are + guaranteed to be independent. + + The subsequence_start parameter can be used to advance the first RNG state + by a multiple of 2**64 steps. + + :type n: int + :param n: number of RNG states to create + :type seed: uint64 + :param seed: starting seed for list of generators + :type subsequence_start: uint64 + :param subsequence_start: + :type stream: CUDA stream + :param stream: stream to run initialization kernel on + ''' + states = cuda.device_array(n, dtype=xoroshiro128p_dtype, stream=stream) + init_xoroshiro128p_states(states, seed, subsequence_start, stream) + return states diff --git a/lib/python3.10/site-packages/numba/cuda/simulator_init.py b/lib/python3.10/site-packages/numba/cuda/simulator_init.py new file mode 100644 index 0000000000000000000000000000000000000000..9d7dd124a9ef539a85545c017653a9fab9264261 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator_init.py @@ -0,0 +1,17 @@ +# We import * from simulator here because * is imported from simulator_init by +# numba.cuda.__init__. +from .simulator import * # noqa: F403, F401 + + +def is_available(): + """Returns a boolean to indicate the availability of a CUDA GPU. + """ + # Simulator is always available + return True + + +def cuda_error(): + """Returns None or an exception if the CUDA driver fails to initialize. + """ + # Simulator never fails to initialize + return None diff --git a/lib/python3.10/site-packages/numba/cuda/stubs.py b/lib/python3.10/site-packages/numba/cuda/stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..fb6979a448185945a8ce1e5dbad83f2f23cc36a3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/stubs.py @@ -0,0 +1,902 @@ +""" +This scripts specifies all PTX special objects. +""" +import numpy as np +from collections import defaultdict +import functools +import itertools +from inspect import Signature, Parameter + + +class Stub(object): + ''' + A stub object to represent special objects that are meaningless + outside the context of a CUDA kernel + ''' + _description_ = '' + __slots__ = () # don't allocate __dict__ + + def __new__(cls): + raise NotImplementedError("%s is not instantiable" % cls) + + def __repr__(self): + return self._description_ + + +def stub_function(fn): + ''' + A stub function to represent special functions that are meaningless + outside the context of a CUDA kernel + ''' + @functools.wraps(fn) + def wrapped(*args, **kwargs): + raise NotImplementedError("%s cannot be called from host code" % fn) + return wrapped + + +#------------------------------------------------------------------------------- +# Thread and grid indices and dimensions + + +class Dim3(Stub): + '''A triple, (x, y, z)''' + _description_ = '' + + @property + def x(self): + pass + + @property + def y(self): + pass + + @property + def z(self): + pass + + +class threadIdx(Dim3): + ''' + The thread indices in the current thread block. Each index is an integer + spanning the range from 0 inclusive to the corresponding value of the + attribute in :attr:`numba.cuda.blockDim` exclusive. + ''' + _description_ = '' + + +class blockIdx(Dim3): + ''' + The block indices in the grid of thread blocks. Each index is an integer + spanning the range from 0 inclusive to the corresponding value of the + attribute in :attr:`numba.cuda.gridDim` exclusive. + ''' + _description_ = '' + + +class blockDim(Dim3): + ''' + The shape of a block of threads, as declared when instantiating the kernel. + This value is the same for all threads in a given kernel launch, even if + they belong to different blocks (i.e. each block is "full"). + ''' + _description_ = '' + + +class gridDim(Dim3): + ''' + The shape of the grid of blocks. This value is the same for all threads in + a given kernel launch. + ''' + _description_ = '' + + +class warpsize(Stub): + ''' + The size of a warp. All architectures implemented to date have a warp size + of 32. + ''' + _description_ = '' + + +class laneid(Stub): + ''' + This thread's lane within a warp. Ranges from 0 to + :attr:`numba.cuda.warpsize` - 1. + ''' + _description_ = '' + + +#------------------------------------------------------------------------------- +# Array creation + +class shared(Stub): + ''' + Shared memory namespace + ''' + _description_ = '' + + @stub_function + def array(shape, dtype): + ''' + Allocate a shared array of the given *shape* and *type*. *shape* is + either an integer or a tuple of integers representing the array's + dimensions. *type* is a :ref:`Numba type ` of the + elements needing to be stored in the array. + + The returned array-like object can be read and written to like any + normal device array (e.g. through indexing). + ''' + + +class local(Stub): + ''' + Local memory namespace + ''' + _description_ = '' + + @stub_function + def array(shape, dtype): + ''' + Allocate a local array of the given *shape* and *type*. The array is + private to the current thread, and resides in global memory. An + array-like object is returned which can be read and written to like any + standard array (e.g. through indexing). + ''' + + +class const(Stub): + ''' + Constant memory namespace + ''' + + @stub_function + def array_like(ndarray): + ''' + Create a const array from *ndarry*. The resulting const array will have + the same shape, type, and values as *ndarray*. + ''' + + +# ------------------------------------------------------------------------------- +# warp level operations + +class syncwarp(Stub): + ''' + syncwarp(mask=0xFFFFFFFF) + + Synchronizes a masked subset of threads in a warp. + ''' + _description_ = '' + + +class shfl_sync_intrinsic(Stub): + ''' + shfl_sync_intrinsic(mask, mode, value, mode_offset, clamp) + + Nvvm intrinsic for shuffling data across a warp + docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-datamove + ''' + _description_ = '' + + +class vote_sync_intrinsic(Stub): + ''' + vote_sync_intrinsic(mask, mode, predictate) + + Nvvm intrinsic for performing a reduce and broadcast across a warp + docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-vote + ''' + _description_ = '' + + +class match_any_sync(Stub): + ''' + match_any_sync(mask, value) + + Nvvm intrinsic for performing a compare and broadcast across a warp. + Returns a mask of threads that have same value as the given value from + within the masked warp. + ''' + _description_ = '' + + +class match_all_sync(Stub): + ''' + match_all_sync(mask, value) + + Nvvm intrinsic for performing a compare and broadcast across a warp. + Returns a tuple of (mask, pred), where mask is a mask of threads that have + same value as the given value from within the masked warp, if they + all have the same value, otherwise it is 0. Pred is a boolean of whether + or not all threads in the mask warp have the same warp. + ''' + _description_ = '' + + +class activemask(Stub): + ''' + activemask() + + Returns a 32-bit integer mask of all currently active threads in the + calling warp. The Nth bit is set if the Nth lane in the warp is active when + activemask() is called. Inactive threads are represented by 0 bits in the + returned mask. Threads which have exited the kernel are always marked as + inactive. + ''' + _description_ = '' + + +class lanemask_lt(Stub): + ''' + lanemask_lt() + + Returns a 32-bit integer mask of all lanes (including inactive ones) with + ID less than the current lane. + ''' + _description_ = '' + + +# ------------------------------------------------------------------------------- +# memory fences + +class threadfence_block(Stub): + ''' + A memory fence at thread block level + ''' + _description_ = '' + + +class threadfence_system(Stub): + ''' + A memory fence at system level: across devices + ''' + _description_ = '' + + +class threadfence(Stub): + ''' + A memory fence at device level + ''' + _description_ = '' + + +#------------------------------------------------------------------------------- +# bit manipulation + +class popc(Stub): + """ + popc(x) + + Returns the number of set bits in x. + """ + + +class brev(Stub): + """ + brev(x) + + Returns the reverse of the bit pattern of x. For example, 0b10110110 + becomes 0b01101101. + """ + + +class clz(Stub): + """ + clz(x) + + Returns the number of leading zeros in z. + """ + + +class ffs(Stub): + """ + ffs(x) + + Returns the position of the first (least significant) bit set to 1 in x, + where the least significant bit position is 1. ffs(0) returns 0. + """ + + +#------------------------------------------------------------------------------- +# comparison and selection instructions + +class selp(Stub): + """ + selp(a, b, c) + + Select between source operands, based on the value of the predicate source + operand. + """ + + +#------------------------------------------------------------------------------- +# single / double precision arithmetic + +class fma(Stub): + """ + fma(a, b, c) + + Perform the fused multiply-add operation. + """ + + +class cbrt(Stub): + """" + cbrt(a) + + Perform the cube root operation. + """ + + +#------------------------------------------------------------------------------- +# atomic + +class atomic(Stub): + """Namespace for atomic operations + """ + _description_ = '' + + class add(Stub): + """add(ary, idx, val) + + Perform atomic ``ary[idx] += val``. Supported on int32, float32, and + float64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class sub(Stub): + """sub(ary, idx, val) + + Perform atomic ``ary[idx] -= val``. Supported on int32, float32, and + float64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class and_(Stub): + """and_(ary, idx, val) + + Perform atomic ``ary[idx] &= val``. Supported on int32, int64, uint32 + and uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class or_(Stub): + """or_(ary, idx, val) + + Perform atomic ``ary[idx] |= val``. Supported on int32, int64, uint32 + and uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class xor(Stub): + """xor(ary, idx, val) + + Perform atomic ``ary[idx] ^= val``. Supported on int32, int64, uint32 + and uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class inc(Stub): + """inc(ary, idx, val) + + Perform atomic ``ary[idx] += 1`` up to val, then reset to 0. Supported + on uint32, and uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class dec(Stub): + """dec(ary, idx, val) + + Performs:: + + ary[idx] = (value if (array[idx] == 0) or + (array[idx] > value) else array[idx] - 1) + + Supported on uint32, and uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class exch(Stub): + """exch(ary, idx, val) + + Perform atomic ``ary[idx] = val``. Supported on int32, int64, uint32 and + uint64 operands only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class max(Stub): + """max(ary, idx, val) + + Perform atomic ``ary[idx] = max(ary[idx], val)``. + + Supported on int32, int64, uint32, uint64, float32, float64 operands + only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class min(Stub): + """min(ary, idx, val) + + Perform atomic ``ary[idx] = min(ary[idx], val)``. + + Supported on int32, int64, uint32, uint64, float32, float64 operands + only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class nanmax(Stub): + """nanmax(ary, idx, val) + + Perform atomic ``ary[idx] = max(ary[idx], val)``. + + NOTE: NaN is treated as a missing value such that: + nanmax(NaN, n) == n, nanmax(n, NaN) == n + + Supported on int32, int64, uint32, uint64, float32, float64 operands + only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class nanmin(Stub): + """nanmin(ary, idx, val) + + Perform atomic ``ary[idx] = min(ary[idx], val)``. + + NOTE: NaN is treated as a missing value, such that: + nanmin(NaN, n) == n, nanmin(n, NaN) == n + + Supported on int32, int64, uint32, uint64, float32, float64 operands + only. + + Returns the old value at the index location as if it is loaded + atomically. + """ + + class compare_and_swap(Stub): + """compare_and_swap(ary, old, val) + + Conditionally assign ``val`` to the first element of an 1D array ``ary`` + if the current value matches ``old``. + + Supported on int32, int64, uint32, uint64 operands only. + + Returns the old value as if it is loaded atomically. + """ + + class cas(Stub): + """cas(ary, idx, old, val) + + Conditionally assign ``val`` to the element ``idx`` of an array + ``ary`` if the current value of ``ary[idx]`` matches ``old``. + + Supported on int32, int64, uint32, uint64 operands only. + + Returns the old value as if it is loaded atomically. + """ + + +#------------------------------------------------------------------------------- +# timers + +class nanosleep(Stub): + ''' + nanosleep(ns) + + Suspends the thread for a sleep duration approximately close to the delay + `ns`, specified in nanoseconds. + ''' + _description_ = '' + +#------------------------------------------------------------------------------- +# Floating point 16 + + +class fp16(Stub): + """Namespace for fp16 operations + """ + _description_ = '' + + class hadd(Stub): + """hadd(a, b) + + Perform fp16 addition, (a + b) in round to nearest mode. Supported + on fp16 operands only. + + Returns the fp16 result of the addition. + + """ + + class hsub(Stub): + """hsub(a, b) + + Perform fp16 subtraction, (a - b) in round to nearest mode. Supported + on fp16 operands only. + + Returns the fp16 result of the subtraction. + + """ + + class hmul(Stub): + """hmul(a, b) + + Perform fp16 multiplication, (a * b) in round to nearest mode. Supported + on fp16 operands only. + + Returns the fp16 result of the multiplication. + + """ + + class hdiv(Stub): + """hdiv(a, b) + + Perform fp16 division, (a / b) in round to nearest mode. Supported + on fp16 operands only. + + Returns the fp16 result of the division + + """ + + class hfma(Stub): + """hfma(a, b, c) + + Perform fp16 multiply and accumulate, (a * b) + c in round to nearest + mode. Supported on fp16 operands only. + + Returns the fp16 result of the multiplication. + + """ + + class hneg(Stub): + """hneg(a) + + Perform fp16 negation, -(a). Supported on fp16 operands only. + + Returns the fp16 result of the negation. + + """ + + class habs(Stub): + """habs(a) + + Perform fp16 absolute value, |a|. Supported on fp16 operands only. + + Returns the fp16 result of the absolute value. + + """ + + class hsin(Stub): + """hsin(a) + + Calculate sine in round to nearest even mode. Supported on fp16 + operands only. + + Returns the sine result. + + """ + + class hcos(Stub): + """hsin(a) + + Calculate cosine in round to nearest even mode. Supported on fp16 + operands only. + + Returns the cosine result. + + """ + + class hlog(Stub): + """hlog(a) + + Calculate natural logarithm in round to nearest even mode. Supported + on fp16 operands only. + + Returns the natural logarithm result. + + """ + + class hlog10(Stub): + """hlog10(a) + + Calculate logarithm base 10 in round to nearest even mode. Supported + on fp16 operands only. + + Returns the logarithm base 10 result. + + """ + + class hlog2(Stub): + """hlog2(a) + + Calculate logarithm base 2 in round to nearest even mode. Supported + on fp16 operands only. + + Returns the logarithm base 2 result. + + """ + + class hexp(Stub): + """hexp(a) + + Calculate natural exponential, exp(a), in round to nearest mode. + Supported on fp16 operands only. + + Returns the natural exponential result. + + """ + + class hexp10(Stub): + """hexp10(a) + + Calculate exponential base 10 (10 ** a) in round to nearest mode. + Supported on fp16 operands only. + + Returns the exponential base 10 result. + + """ + + class hexp2(Stub): + """hexp2(a) + + Calculate exponential base 2 (2 ** a) in round to nearest mode. + Supported on fp16 operands only. + + Returns the exponential base 2 result. + + """ + + class hfloor(Stub): + """hfloor(a) + + Calculate the floor, the largest integer less than or equal to 'a'. + Supported on fp16 operands only. + + Returns the floor result. + + """ + + class hceil(Stub): + """hceil(a) + + Calculate the ceil, the smallest integer greater than or equal to 'a'. + Supported on fp16 operands only. + + Returns the ceil result. + + """ + + class hsqrt(Stub): + """hsqrt(a) + + Calculate the square root of the input argument in round to nearest + mode. Supported on fp16 operands only. + + Returns the square root result. + + """ + + class hrsqrt(Stub): + """hrsqrt(a) + + Calculate the reciprocal square root of the input argument in round + to nearest even mode. Supported on fp16 operands only. + + Returns the reciprocal square root result. + + """ + + class hrcp(Stub): + """hrcp(a) + + Calculate the reciprocal of the input argument in round to nearest + even mode. Supported on fp16 operands only. + + Returns the reciprocal result. + + """ + + class hrint(Stub): + """hrint(a) + + Round the input argument to nearest integer value. Supported on fp16 + operands only. + + Returns the rounded result. + + """ + + class htrunc(Stub): + """htrunc(a) + + Truncate the input argument to its integer portion. Supported + on fp16 operands only. + + Returns the truncated result. + + """ + + class heq(Stub): + """heq(a, b) + + Perform fp16 comparison, (a == b). Supported + on fp16 operands only. + + Returns True if a and b are equal and False otherwise. + + """ + + class hne(Stub): + """hne(a, b) + + Perform fp16 comparison, (a != b). Supported + on fp16 operands only. + + Returns True if a and b are not equal and False otherwise. + + """ + + class hge(Stub): + """hge(a, b) + + Perform fp16 comparison, (a >= b). Supported + on fp16 operands only. + + Returns True if a is >= b and False otherwise. + + """ + + class hgt(Stub): + """hgt(a, b) + + Perform fp16 comparison, (a > b). Supported + on fp16 operands only. + + Returns True if a is > b and False otherwise. + + """ + + class hle(Stub): + """hle(a, b) + + Perform fp16 comparison, (a <= b). Supported + on fp16 operands only. + + Returns True if a is <= b and False otherwise. + + """ + + class hlt(Stub): + """hlt(a, b) + + Perform fp16 comparison, (a < b). Supported + on fp16 operands only. + + Returns True if a is < b and False otherwise. + + """ + + class hmax(Stub): + """hmax(a, b) + + Perform fp16 maximum operation, max(a,b) Supported + on fp16 operands only. + + Returns a if a is greater than b, returns b otherwise. + + """ + + class hmin(Stub): + """hmin(a, b) + + Perform fp16 minimum operation, min(a,b). Supported + on fp16 operands only. + + Returns a if a is less than b, returns b otherwise. + + """ + + +#------------------------------------------------------------------------------- +# vector types + +def make_vector_type_stubs(): + """Make user facing objects for vector types""" + vector_type_stubs = [] + vector_type_prefix = ( + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "float32", + "float64" + ) + vector_type_element_counts = (1, 2, 3, 4) + vector_type_attribute_names = ("x", "y", "z", "w") + + for prefix, nelem in itertools.product( + vector_type_prefix, vector_type_element_counts + ): + type_name = f"{prefix}x{nelem}" + attr_names = vector_type_attribute_names[:nelem] + + vector_type_stub = type( + type_name, (Stub,), + { + **{attr: lambda self: None for attr in attr_names}, + **{ + "_description_": f"<{type_name}>", + "__signature__": Signature(parameters=[ + Parameter( + name=attr_name, kind=Parameter.POSITIONAL_ONLY + ) for attr_name in attr_names[:nelem] + ]), + "__doc__": f"A stub for {type_name} to be used in " + "CUDA kernels." + }, + **{"aliases": []} + } + ) + vector_type_stubs.append(vector_type_stub) + return vector_type_stubs + + +def map_vector_type_stubs_to_alias(vector_type_stubs): + """For each of the stubs, create its aliases. + + For example: float64x3 -> double3 + """ + # C-compatible type mapping, see: + # https://numpy.org/devdocs/reference/arrays.scalars.html#integer-types + base_type_to_alias = { + "char": f"int{np.dtype(np.byte).itemsize * 8}", + "short": f"int{np.dtype(np.short).itemsize * 8}", + "int": f"int{np.dtype(np.intc).itemsize * 8}", + "long": f"int{np.dtype(np.int_).itemsize * 8}", + "longlong": f"int{np.dtype(np.longlong).itemsize * 8}", + "uchar": f"uint{np.dtype(np.ubyte).itemsize * 8}", + "ushort": f"uint{np.dtype(np.ushort).itemsize * 8}", + "uint": f"uint{np.dtype(np.uintc).itemsize * 8}", + "ulong": f"uint{np.dtype(np.uint).itemsize * 8}", + "ulonglong": f"uint{np.dtype(np.ulonglong).itemsize * 8}", + "float": f"float{np.dtype(np.single).itemsize * 8}", + "double": f"float{np.dtype(np.double).itemsize * 8}" + } + + base_type_to_vector_type = defaultdict(list) + for stub in vector_type_stubs: + base_type_to_vector_type[stub.__name__[:-2]].append(stub) + + for alias, base_type in base_type_to_alias.items(): + vector_type_stubs = base_type_to_vector_type[base_type] + for stub in vector_type_stubs: + nelem = stub.__name__[-1] + stub.aliases.append(f"{alias}{nelem}") + + +_vector_type_stubs = make_vector_type_stubs() +map_vector_type_stubs_to_alias(_vector_type_stubs) diff --git a/lib/python3.10/site-packages/numba/cuda/target.py b/lib/python3.10/site-packages/numba/cuda/target.py new file mode 100644 index 0000000000000000000000000000000000000000..6402ff3574abf3a4e17a0515c80e3ab703754ca9 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/target.py @@ -0,0 +1,440 @@ +import re +from functools import cached_property +import llvmlite.binding as ll +from llvmlite import ir + +from numba.core import (cgutils, config, debuginfo, itanium_mangler, types, + typing, utils) +from numba.core.dispatcher import Dispatcher +from numba.core.base import BaseContext +from numba.core.callconv import BaseCallConv, MinimalCallConv +from numba.core.typing import cmathdecl +from numba.core import datamodel + +from .cudadrv import nvvm +from numba.cuda import codegen, nvvmutils, ufuncs +from numba.cuda.models import cuda_data_manager + +# ----------------------------------------------------------------------------- +# Typing + + +class CUDATypingContext(typing.BaseContext): + def load_additional_registries(self): + from . import cudadecl, cudamath, libdevicedecl, vector_types + from numba.core.typing import enumdecl, cffi_utils + + self.install_registry(cudadecl.registry) + self.install_registry(cffi_utils.registry) + self.install_registry(cudamath.registry) + self.install_registry(cmathdecl.registry) + self.install_registry(libdevicedecl.registry) + self.install_registry(enumdecl.registry) + self.install_registry(vector_types.typing_registry) + + def resolve_value_type(self, val): + # treat other dispatcher object as another device function + from numba.cuda.dispatcher import CUDADispatcher + if (isinstance(val, Dispatcher) and not + isinstance(val, CUDADispatcher)): + try: + # use cached device function + val = val.__dispatcher + except AttributeError: + if not val._can_compile: + raise ValueError('using cpu function on device ' + 'but its compilation is disabled') + targetoptions = val.targetoptions.copy() + targetoptions['device'] = True + targetoptions['debug'] = targetoptions.get('debug', False) + targetoptions['opt'] = targetoptions.get('opt', True) + disp = CUDADispatcher(val.py_func, targetoptions) + # cache the device function for future use and to avoid + # duplicated copy of the same function. + val.__dispatcher = disp + val = disp + + # continue with parent logic + return super(CUDATypingContext, self).resolve_value_type(val) + +# ----------------------------------------------------------------------------- +# Implementation + + +VALID_CHARS = re.compile(r'[^a-z0-9]', re.I) + + +class CUDATargetContext(BaseContext): + implement_powi_as_math_call = True + strict_alignment = True + + def __init__(self, typingctx, target='cuda'): + super().__init__(typingctx, target) + self.data_model_manager = cuda_data_manager.chain( + datamodel.default_manager + ) + + @property + def DIBuilder(self): + return debuginfo.DIBuilder + + @property + def enable_boundscheck(self): + # Unconditionally disabled + return False + + # Overrides + def create_module(self, name): + return self._internal_codegen._create_empty_module(name) + + def init(self): + self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit") + self._target_data = None + + def load_additional_registries(self): + # side effect of import needed for numba.cpython.*, the builtins + # registry is updated at import time. + from numba.cpython import numbers, tupleobj, slicing # noqa: F401 + from numba.cpython import rangeobj, iterators, enumimpl # noqa: F401 + from numba.cpython import unicode, charseq # noqa: F401 + from numba.cpython import cmathimpl + from numba.misc import cffiimpl + from numba.np import arrayobj # noqa: F401 + from numba.np import npdatetime # noqa: F401 + from . import ( + cudaimpl, printimpl, libdeviceimpl, mathimpl, vector_types + ) + # fix for #8940 + from numba.np.unsafe import ndarray # noqa F401 + + self.install_registry(cudaimpl.registry) + self.install_registry(cffiimpl.registry) + self.install_registry(printimpl.registry) + self.install_registry(libdeviceimpl.registry) + self.install_registry(cmathimpl.registry) + self.install_registry(mathimpl.registry) + self.install_registry(vector_types.impl_registry) + + def codegen(self): + return self._internal_codegen + + @property + def target_data(self): + if self._target_data is None: + self._target_data = ll.create_target_data(nvvm.NVVM().data_layout) + return self._target_data + + @cached_property + def nonconst_module_attrs(self): + """ + Some CUDA intrinsics are at the module level, but cannot be treated as + constants, because they are loaded from a special register in the PTX. + These include threadIdx, blockDim, etc. + """ + from numba import cuda + nonconsts = ('threadIdx', 'blockDim', 'blockIdx', 'gridDim', 'laneid', + 'warpsize') + nonconsts_with_mod = tuple([(types.Module(cuda), nc) + for nc in nonconsts]) + return nonconsts_with_mod + + @cached_property + def call_conv(self): + return CUDACallConv(self) + + def mangler(self, name, argtypes, *, abi_tags=(), uid=None): + return itanium_mangler.mangle(name, argtypes, abi_tags=abi_tags, + uid=uid) + + def prepare_cuda_kernel(self, codelib, fndesc, debug, lineinfo, + nvvm_options, filename, linenum, + max_registers=None): + """ + Adapt a code library ``codelib`` with the numba compiled CUDA kernel + with name ``fname`` and arguments ``argtypes`` for NVVM. + A new library is created with a wrapper function that can be used as + the kernel entry point for the given kernel. + + Returns the new code library and the wrapper function. + + Parameters: + + codelib: The CodeLibrary containing the device function to wrap + in a kernel call. + fndesc: The FunctionDescriptor of the source function. + debug: Whether to compile with debug. + lineinfo: Whether to emit line info. + nvvm_options: Dict of NVVM options used when compiling the new library. + filename: The source filename that the function is contained in. + linenum: The source line that the function is on. + max_registers: The max_registers argument for the code library. + """ + kernel_name = itanium_mangler.prepend_namespace( + fndesc.llvm_func_name, ns='cudapy', + ) + library = self.codegen().create_library(f'{codelib.name}_kernel_', + entry_name=kernel_name, + nvvm_options=nvvm_options, + max_registers=max_registers) + library.add_linking_library(codelib) + wrapper = self.generate_kernel_wrapper(library, fndesc, kernel_name, + debug, lineinfo, filename, + linenum) + return library, wrapper + + def generate_kernel_wrapper(self, library, fndesc, kernel_name, debug, + lineinfo, filename, linenum): + """ + Generate the kernel wrapper in the given ``library``. + The function being wrapped is described by ``fndesc``. + The wrapper function is returned. + """ + + argtypes = fndesc.argtypes + arginfo = self.get_arg_packer(argtypes) + argtys = list(arginfo.argument_types) + wrapfnty = ir.FunctionType(ir.VoidType(), argtys) + wrapper_module = self.create_module("cuda.kernel.wrapper") + fnty = ir.FunctionType(ir.IntType(32), + [self.call_conv.get_return_type(types.pyobject)] + + argtys) + func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name) + + prefixed = itanium_mangler.prepend_namespace(func.name, ns='cudapy') + wrapfn = ir.Function(wrapper_module, wrapfnty, prefixed) + builder = ir.IRBuilder(wrapfn.append_basic_block('')) + + if debug or lineinfo: + directives_only = lineinfo and not debug + debuginfo = self.DIBuilder(module=wrapper_module, + filepath=filename, + cgctx=self, + directives_only=directives_only) + debuginfo.mark_subprogram( + wrapfn, kernel_name, fndesc.args, argtypes, linenum, + ) + debuginfo.mark_location(builder, linenum) + + # Define error handling variable + def define_error_gv(postfix): + name = wrapfn.name + postfix + gv = cgutils.add_global_variable(wrapper_module, ir.IntType(32), + name) + gv.initializer = ir.Constant(gv.type.pointee, None) + return gv + + gv_exc = define_error_gv("__errcode__") + gv_tid = [] + gv_ctaid = [] + for i in 'xyz': + gv_tid.append(define_error_gv("__tid%s__" % i)) + gv_ctaid.append(define_error_gv("__ctaid%s__" % i)) + + callargs = arginfo.from_arguments(builder, wrapfn.args) + status, _ = self.call_conv.call_function( + builder, func, types.void, argtypes, callargs) + + if debug: + # Check error status + with cgutils.if_likely(builder, status.is_ok): + builder.ret_void() + + with builder.if_then(builder.not_(status.is_python_exc)): + # User exception raised + old = ir.Constant(gv_exc.type.pointee, None) + + # Use atomic cmpxchg to prevent rewriting the error status + # Only the first error is recorded + + xchg = builder.cmpxchg(gv_exc, old, status.code, + 'monotonic', 'monotonic') + changed = builder.extract_value(xchg, 1) + + # If the xchange is successful, save the thread ID. + sreg = nvvmutils.SRegBuilder(builder) + with builder.if_then(changed): + for dim, ptr, in zip("xyz", gv_tid): + val = sreg.tid(dim) + builder.store(val, ptr) + + for dim, ptr, in zip("xyz", gv_ctaid): + val = sreg.ctaid(dim) + builder.store(val, ptr) + + builder.ret_void() + + nvvm.set_cuda_kernel(wrapfn) + library.add_ir_module(wrapper_module) + if debug or lineinfo: + debuginfo.finalize() + library.finalize() + + if config.DUMP_LLVM: + utils.dump_llvm(fndesc, wrapper_module) + + return library.get_function(wrapfn.name) + + def make_constant_array(self, builder, aryty, arr): + """ + Unlike the parent version. This returns a a pointer in the constant + addrspace. + """ + + lmod = builder.module + + constvals = [ + self.get_constant(types.byte, i) + for i in iter(arr.tobytes(order='A')) + ] + constaryty = ir.ArrayType(ir.IntType(8), len(constvals)) + constary = ir.Constant(constaryty, constvals) + + addrspace = nvvm.ADDRSPACE_CONSTANT + gv = cgutils.add_global_variable(lmod, constary.type, "_cudapy_cmem", + addrspace=addrspace) + gv.linkage = 'internal' + gv.global_constant = True + gv.initializer = constary + + # Preserve the underlying alignment + lldtype = self.get_data_type(aryty.dtype) + align = self.get_abi_sizeof(lldtype) + gv.align = 2 ** (align - 1).bit_length() + + # Convert to generic address-space + ptrty = ir.PointerType(ir.IntType(8)) + genptr = builder.addrspacecast(gv, ptrty, 'generic') + + # Create array object + ary = self.make_array(aryty)(self, builder) + kshape = [self.get_constant(types.intp, s) for s in arr.shape] + kstrides = [self.get_constant(types.intp, s) for s in arr.strides] + self.populate_array(ary, data=builder.bitcast(genptr, ary.data.type), + shape=kshape, + strides=kstrides, + itemsize=ary.itemsize, parent=ary.parent, + meminfo=None) + + return ary._getvalue() + + def insert_const_string(self, mod, string): + """ + Unlike the parent version. This returns a a pointer in the constant + addrspace. + """ + text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00") + name = '$'.join(["__conststring__", + itanium_mangler.mangle_identifier(string)]) + # Try to reuse existing global + gv = mod.globals.get(name) + if gv is None: + # Not defined yet + gv = cgutils.add_global_variable(mod, text.type, name, + addrspace=nvvm.ADDRSPACE_CONSTANT) + gv.linkage = 'internal' + gv.global_constant = True + gv.initializer = text + + # Cast to a i8* pointer + charty = gv.type.pointee.element + return gv.bitcast(charty.as_pointer(nvvm.ADDRSPACE_CONSTANT)) + + def insert_string_const_addrspace(self, builder, string): + """ + Insert a constant string in the constant addresspace and return a + generic i8 pointer to the data. + + This function attempts to deduplicate. + """ + lmod = builder.module + gv = self.insert_const_string(lmod, string) + charptrty = ir.PointerType(ir.IntType(8)) + return builder.addrspacecast(gv, charptrty, 'generic') + + def optimize_function(self, func): + """Run O1 function passes + """ + pass + ## XXX skipped for now + # fpm = lp.FunctionPassManager.new(func.module) + # + # lp.PassManagerBuilder.new().populate(fpm) + # + # fpm.initialize() + # fpm.run(func) + # fpm.finalize() + + def get_ufunc_info(self, ufunc_key): + return ufuncs.get_ufunc_info(ufunc_key) + + +class CUDACallConv(MinimalCallConv): + pass + + +class CUDACABICallConv(BaseCallConv): + """ + Calling convention aimed at matching the CUDA C/C++ ABI. The implemented + function signature is: + + () + + Exceptions are unsupported in this convention. + """ + + def _make_call_helper(self, builder): + # Call helpers are used to help report exceptions back to Python, so + # none is required here. + return None + + def return_value(self, builder, retval): + return builder.ret(retval) + + def return_user_exc(self, builder, exc, exc_args=None, loc=None, + func_name=None): + msg = "Python exceptions are unsupported in the CUDA C/C++ ABI" + raise NotImplementedError(msg) + + def return_status_propagate(self, builder, status): + msg = "Return status is unsupported in the CUDA C/C++ ABI" + raise NotImplementedError(msg) + + def get_function_type(self, restype, argtypes): + """ + Get the LLVM IR Function type for *restype* and *argtypes*. + """ + arginfo = self._get_arg_packer(argtypes) + argtypes = list(arginfo.argument_types) + fnty = ir.FunctionType(self.get_return_type(restype), argtypes) + return fnty + + def decorate_function(self, fn, args, fe_argtypes, noalias=False): + """ + Set names and attributes of function arguments. + """ + assert not noalias + arginfo = self._get_arg_packer(fe_argtypes) + arginfo.assign_names(self.get_arguments(fn), + ['arg.' + a for a in args]) + + def get_arguments(self, func): + """ + Get the Python-level arguments of LLVM *func*. + """ + return func.args + + def call_function(self, builder, callee, resty, argtys, args): + """ + Call the Numba-compiled *callee*. + """ + arginfo = self._get_arg_packer(argtys) + realargs = arginfo.as_arguments(builder, args) + code = builder.call(callee, realargs) + # No status required as we don't support exceptions or a distinct None + # value in a C ABI. + status = None + out = self.context.get_returned_value(builder, resty, code) + return status, out + + def get_return_type(self, ty): + return self.context.data_model_manager[ty].get_return_type() diff --git a/lib/python3.10/site-packages/numba/cuda/testing.py b/lib/python3.10/site-packages/numba/cuda/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..ae62d3368662c0f8ff43ee403845b66d29410c77 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/testing.py @@ -0,0 +1,202 @@ +import os +import platform +import shutil + +from numba.tests.support import SerialMixin +from numba.cuda.cuda_paths import get_conda_ctk +from numba.cuda.cudadrv import driver, devices, libs +from numba.core import config +from numba.tests.support import TestCase +from pathlib import Path +import unittest + +numba_cuda_dir = Path(__file__).parent +test_data_dir = numba_cuda_dir / 'tests' / 'data' + + +class CUDATestCase(SerialMixin, TestCase): + """ + For tests that use a CUDA device. Test methods in a CUDATestCase must not + be run out of module order, because the ContextResettingTestCase may reset + the context and destroy resources used by a normal CUDATestCase if any of + its tests are run between tests from a CUDATestCase. + """ + + def setUp(self): + self._low_occupancy_warnings = config.CUDA_LOW_OCCUPANCY_WARNINGS + self._warn_on_implicit_copy = config.CUDA_WARN_ON_IMPLICIT_COPY + + # Disable warnings about low gpu utilization in the test suite + config.CUDA_LOW_OCCUPANCY_WARNINGS = 0 + # Disable warnings about host arrays in the test suite + config.CUDA_WARN_ON_IMPLICIT_COPY = 0 + + def tearDown(self): + config.CUDA_LOW_OCCUPANCY_WARNINGS = self._low_occupancy_warnings + config.CUDA_WARN_ON_IMPLICIT_COPY = self._warn_on_implicit_copy + + def skip_if_lto(self, reason): + # Some linkers need the compute capability to be specified, so we + # always specify it here. + cc = devices.get_context().device.compute_capability + linker = driver.Linker.new(cc=cc) + if linker.lto: + self.skipTest(reason) + + +class ContextResettingTestCase(CUDATestCase): + """ + For tests where the context needs to be reset after each test. Typically + these inspect or modify parts of the context that would usually be expected + to be internal implementation details (such as the state of allocations and + deallocations, etc.). + """ + + def tearDown(self): + super().tearDown() + from numba.cuda.cudadrv.devices import reset + reset() + + +def ensure_supported_ccs_initialized(): + from numba.cuda import is_available as cuda_is_available + from numba.cuda.cudadrv import nvvm + + if cuda_is_available(): + # Ensure that cudart.so is loaded and the list of supported compute + # capabilities in the nvvm module is populated before a fork. This is + # needed because some compilation tests don't require a CUDA context, + # but do use NVVM, and it is required that libcudart.so should be + # loaded before a fork (note that the requirement is not explicitly + # documented). + nvvm.get_supported_ccs() + + +def skip_on_cudasim(reason): + """Skip this test if running on the CUDA simulator""" + return unittest.skipIf(config.ENABLE_CUDASIM, reason) + + +def skip_unless_cudasim(reason): + """Skip this test if running on CUDA hardware""" + return unittest.skipUnless(config.ENABLE_CUDASIM, reason) + + +def skip_unless_conda_cudatoolkit(reason): + """Skip test if the CUDA toolkit was not installed by Conda""" + return unittest.skipUnless(get_conda_ctk() is not None, reason) + + +def skip_if_external_memmgr(reason): + """Skip test if an EMM Plugin is in use""" + return unittest.skipIf(config.CUDA_MEMORY_MANAGER != 'default', reason) + + +def skip_under_cuda_memcheck(reason): + return unittest.skipIf(os.environ.get('CUDA_MEMCHECK') is not None, reason) + + +def skip_without_nvdisasm(reason): + nvdisasm_path = shutil.which('nvdisasm') + return unittest.skipIf(nvdisasm_path is None, reason) + + +def skip_with_nvdisasm(reason): + nvdisasm_path = shutil.which('nvdisasm') + return unittest.skipIf(nvdisasm_path is not None, reason) + + +def skip_on_arm(reason): + cpu = platform.processor() + is_arm = cpu.startswith('arm') or cpu.startswith('aarch') + return unittest.skipIf(is_arm, reason) + + +def skip_if_cuda_includes_missing(fn): + # Skip when cuda.h is not available - generally this should indicate + # whether the CUDA includes are available or not + cuda_h = os.path.join(config.CUDA_INCLUDE_PATH, 'cuda.h') + cuda_h_file = (os.path.exists(cuda_h) and os.path.isfile(cuda_h)) + reason = 'CUDA include dir not available on this system' + return unittest.skipUnless(cuda_h_file, reason)(fn) + + +def skip_if_mvc_enabled(reason): + """Skip a test if Minor Version Compatibility is enabled""" + return unittest.skipIf(config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY, + reason) + + +def skip_if_mvc_libraries_unavailable(fn): + libs_available = False + try: + import cubinlinker # noqa: F401 + import ptxcompiler # noqa: F401 + libs_available = True + except ImportError: + pass + + return unittest.skipUnless(libs_available, + "Requires cubinlinker and ptxcompiler")(fn) + + +def cc_X_or_above(major, minor): + if not config.ENABLE_CUDASIM: + cc = devices.get_context().device.compute_capability + return cc >= (major, minor) + else: + return True + + +def skip_unless_cc_50(fn): + return unittest.skipUnless(cc_X_or_above(5, 0), "requires cc >= 5.0")(fn) + + +def skip_unless_cc_53(fn): + return unittest.skipUnless(cc_X_or_above(5, 3), "requires cc >= 5.3")(fn) + + +def skip_unless_cc_60(fn): + return unittest.skipUnless(cc_X_or_above(6, 0), "requires cc >= 6.0")(fn) + + +def skip_unless_cc_75(fn): + return unittest.skipUnless(cc_X_or_above(7, 5), "requires cc >= 7.5")(fn) + + +def xfail_unless_cudasim(fn): + if config.ENABLE_CUDASIM: + return fn + else: + return unittest.expectedFailure(fn) + + +def skip_with_cuda_python(reason): + return unittest.skipIf(driver.USE_NV_BINDING, reason) + + +def cudadevrt_missing(): + if config.ENABLE_CUDASIM: + return False + try: + path = libs.get_cudalib('cudadevrt', static=True) + libs.check_static_lib(path) + except FileNotFoundError: + return True + return False + + +def skip_if_cudadevrt_missing(fn): + return unittest.skipIf(cudadevrt_missing(), 'cudadevrt missing')(fn) + + +class ForeignArray(object): + """ + Class for emulating an array coming from another library through the CUDA + Array interface. This just hides a DeviceNDArray so that it doesn't look + like a DeviceNDArray. + """ + + def __init__(self, arr): + self._arr = arr + self.__cuda_array_interface__ = arr.__cuda_array_interface__ diff --git a/lib/python3.10/site-packages/numba/cuda/tests/__init__.py b/lib/python3.10/site-packages/numba/cuda/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6171b01f83cd2d9569baf0cdd33686c1d17d687 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/__init__.py @@ -0,0 +1,24 @@ +from numba.cuda.testing import ensure_supported_ccs_initialized +from numba.testing import unittest +from numba.testing import load_testsuite +from numba import cuda +from os.path import dirname, join + + +def load_tests(loader, tests, pattern): + suite = unittest.TestSuite() + this_dir = dirname(__file__) + ensure_supported_ccs_initialized() + suite.addTests(load_testsuite(loader, join(this_dir, 'nocuda'))) + if cuda.is_available(): + suite.addTests(load_testsuite(loader, join(this_dir, 'cudasim'))) + gpus = cuda.list_devices() + if gpus and gpus[0].compute_capability >= (2, 0): + suite.addTests(load_testsuite(loader, join(this_dir, 'cudadrv'))) + suite.addTests(load_testsuite(loader, join(this_dir, 'cudapy'))) + suite.addTests(load_testsuite(loader, join(this_dir, 'doc_examples'))) + else: + print("skipped CUDA tests because GPU CC < 2.0") + else: + print("skipped CUDA tests") + return suite diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/__init__.py b/lib/python3.10/site-packages/numba/cuda/tests/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/cuda_include.cu b/lib/python3.10/site-packages/numba/cuda/tests/data/cuda_include.cu new file mode 100644 index 0000000000000000000000000000000000000000..69a0efd9a1f53037770384faa1ad049dcac5ab09 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/data/cuda_include.cu @@ -0,0 +1,5 @@ +// Not all CUDA includes are safe to include in device code compiled by NVRTC, +// because it does not have paths to all system include directories. Headers +// such as cuda_device_runtime_api.h are safe to use in NVRTC without adding +// additional includes. +#include diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/error.cu b/lib/python3.10/site-packages/numba/cuda/tests/data/error.cu new file mode 100644 index 0000000000000000000000000000000000000000..402f3138dce7a5c70c3677912d0e1c033150e3c8 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/data/error.cu @@ -0,0 +1,7 @@ +extern "C" __device__ +int bar(int* out, int a) { + // Explicitly placed to generate an error + SYNTAX ERROR + *out = a * 2; + return 0; +} diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.cu b/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.cu new file mode 100644 index 0000000000000000000000000000000000000000..4d245366c64a9b7a0308d422d3c8fdcb779b1269 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.cu @@ -0,0 +1,23 @@ +// Compile with: +// +// nvcc -gencode arch=compute_50,code=compute_50 -rdc true -ptx jitlink.cu +// +// using the oldest supported toolkit version (10.2 at the time of writing). + +extern "C" __device__ +int bar(int *out, int a) +{ + *out = a * 2; + return 0; +} + + +// The out argument is necessary due to Numba's CUDA calling convention, which +// always reserves the first parameter for a pointer to a returned value, even +// if there is no return value. +extern "C" __device__ +int array_mutator(void *out, int *a) +{ + a[0] = a[1]; + return 0; +} diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.ptx b/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.ptx new file mode 100644 index 0000000000000000000000000000000000000000..dde0cc214aac1c6561534a483b60b7baa78629d2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/data/jitlink.ptx @@ -0,0 +1,51 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-27506705 +// Cuda compilation tools, release 10.2, V10.2.89 +// Based on LLVM 3.4svn +// + +.version 6.5 +.target sm_50 +.address_size 64 + + // .globl bar + +.visible .func (.param .b32 func_retval0) bar( + .param .b64 bar_param_0, + .param .b32 bar_param_1 +) +{ + .reg .b32 %r<4>; + .reg .b64 %rd<2>; + + + ld.param.u64 %rd1, [bar_param_0]; + ld.param.u32 %r1, [bar_param_1]; + shl.b32 %r2, %r1, 1; + st.u32 [%rd1], %r2; + mov.u32 %r3, 0; + st.param.b32 [func_retval0+0], %r3; + ret; +} + + // .globl array_mutator +.visible .func (.param .b32 func_retval0) array_mutator( + .param .b64 array_mutator_param_0, + .param .b64 array_mutator_param_1 +) +{ + .reg .b32 %r<3>; + .reg .b64 %rd<2>; + + + ld.param.u64 %rd1, [array_mutator_param_1]; + ld.u32 %r1, [%rd1+4]; + st.u32 [%rd1], %r1; + mov.u32 %r2, 0; + st.param.b32 [func_retval0+0], %r2; + ret; +} + + diff --git a/lib/python3.10/site-packages/numba/cuda/tests/data/warn.cu b/lib/python3.10/site-packages/numba/cuda/tests/data/warn.cu new file mode 100644 index 0000000000000000000000000000000000000000..4f31e951d97513c12234ef7fc2f38f29e5077e42 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/data/warn.cu @@ -0,0 +1,7 @@ +extern "C" __device__ +int bar(int* out, int a) { + // Explicitly placed to generate a warning for testing the NVRTC program log + int unused; + *out = a * 2; + return 0; +} diff --git a/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..b879a12d27729868c8646435cedaae18311c855e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py @@ -0,0 +1,76 @@ +import unittest + +from numba.cuda.testing import CUDATestCase, skip_on_cudasim +from numba.tests.support import captured_stdout +import numpy as np + + +@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level") +class TestCpuGpuCompat(CUDATestCase): + """ + Test compatibility of CPU and GPU functions + """ + + def setUp(self): + # Prevent output from this test showing up when running the test suite + self._captured_stdout = captured_stdout() + self._captured_stdout.__enter__() + super().setUp() + + def tearDown(self): + # No exception type, value, or traceback + self._captured_stdout.__exit__(None, None, None) + super().tearDown() + + def test_ex_cpu_gpu_compat(self): + # ex_cpu_gpu_compat.import.begin + from math import pi + + import numba + from numba import cuda + # ex_cpu_gpu_compat.import.end + + # ex_cpu_gpu_compat.allocate.begin + X = cuda.to_device([1, 10, 234]) + Y = cuda.to_device([2, 2, 4014]) + Z = cuda.to_device([3, 14, 2211]) + results = cuda.to_device([0.0, 0.0, 0.0]) + # ex_cpu_gpu_compat.allocate.end + + # ex_cpu_gpu_compat.define.begin + @numba.jit + def business_logic(x, y, z): + return 4 * z * (2 * x - (4 * y) / 2 * pi) + # ex_cpu_gpu_compat.define.end + + # ex_cpu_gpu_compat.cpurun.begin + print(business_logic(1, 2, 3)) # -126.79644737231007 + # ex_cpu_gpu_compat.cpurun.end + + # ex_cpu_gpu_compat.usegpu.begin + @cuda.jit + def f(res, xarr, yarr, zarr): + tid = cuda.grid(1) + if tid < len(xarr): + # The function decorated with numba.jit may be directly reused + res[tid] = business_logic(xarr[tid], yarr[tid], zarr[tid]) + # ex_cpu_gpu_compat.usegpu.end + + # ex_cpu_gpu_compat.launch.begin + f.forall(len(X))(results, X, Y, Z) + print(results) + # [-126.79644737231007, 416.28324559588634, -218912930.2987788] + # ex_cpu_gpu_compat.launch.end + + expect = [ + business_logic(x, y, z) for x, y, z in zip(X, Y, Z) + ] + + np.testing.assert_equal( + expect, + results.copy_to_host() + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_montecarlo.py b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_montecarlo.py new file mode 100644 index 0000000000000000000000000000000000000000..92627084f468b4944c059dbdff90812334a54551 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_montecarlo.py @@ -0,0 +1,109 @@ +import unittest + +from numba.cuda.testing import CUDATestCase, skip_on_cudasim +from numba.tests.support import captured_stdout + + +@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level") +class TestMonteCarlo(CUDATestCase): + """ + Test monte-carlo integration + """ + + def setUp(self): + # Prevent output from this test showing up when running the test suite + self._captured_stdout = captured_stdout() + self._captured_stdout.__enter__() + super().setUp() + + def tearDown(self): + # No exception type, value, or traceback + self._captured_stdout.__exit__(None, None, None) + super().tearDown() + + def test_ex_montecarlo(self): + # ex_montecarlo.import.begin + import numba + import numpy as np + from numba import cuda + from numba.cuda.random import ( + create_xoroshiro128p_states, + xoroshiro128p_uniform_float32, + ) + # ex_montecarlo.import.end + + # ex_montecarlo.define.begin + # number of samples, higher will lead to a more accurate answer + nsamps = 1000000 + # ex_montecarlo.define.end + + # ex_montecarlo.kernel.begin + @cuda.jit + def mc_integrator_kernel(out, rng_states, lower_lim, upper_lim): + """ + kernel to draw random samples and evaluate the function to + be integrated at those sample values + """ + size = len(out) + + gid = cuda.grid(1) + if gid < size: + # draw a sample between 0 and 1 on this thread + samp = xoroshiro128p_uniform_float32(rng_states, gid) + + # normalize this sample to the limit range + samp = samp * (upper_lim - lower_lim) + lower_lim + + # evaluate the function to be + # integrated at the normalized + # value of the sample + y = func(samp) + out[gid] = y + # ex_montecarlo.kernel.end + + # ex_montecarlo.callfunc.begin + @cuda.reduce + def sum_reduce(a, b): + return a + b + + def mc_integrate(lower_lim, upper_lim, nsamps): + """ + approximate the definite integral of `func` from + `lower_lim` to `upper_lim` + """ + out = cuda.to_device(np.zeros(nsamps, dtype="float32")) + rng_states = create_xoroshiro128p_states(nsamps, seed=42) + + # jit the function for use in CUDA kernels + + mc_integrator_kernel.forall(nsamps)( + out, rng_states, lower_lim, upper_lim + ) + # normalization factor to convert + # to the average: (b - a)/(N - 1) + factor = (upper_lim - lower_lim) / (nsamps - 1) + + return sum_reduce(out) * factor + # ex_montecarlo.callfunc.end + + # ex_montecarlo.launch.begin + # define a function to integrate + @numba.jit + def func(x): + return 1.0 / x + + mc_integrate(1, 2, nsamps) # array(0.6929643, dtype=float32) + mc_integrate(2, 3, nsamps) # array(0.4054021, dtype=float32) + # ex_montecarlo.launch.end + + # values computed independently using maple + np.testing.assert_allclose( + mc_integrate(1, 2, nsamps), 0.69315, atol=0.001 + ) + np.testing.assert_allclose( + mc_integrate(2, 3, nsamps), 0.4055, atol=0.001 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_ufunc.py b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f56b07e92189057dbc76a8b40a53b8daf7b678 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/doc_examples/test_ufunc.py @@ -0,0 +1,50 @@ +import unittest + +from numba.cuda.testing import CUDATestCase, skip_on_cudasim +from numba.tests.support import captured_stdout + + +@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level") +class TestUFunc(CUDATestCase): + """ + Test calling a UFunc + """ + + def setUp(self): + # Prevent output from this test showing + # up when running the test suite + self._captured_stdout = captured_stdout() + self._captured_stdout.__enter__() + super().setUp() + + def tearDown(self): + # No exception type, value, or traceback + self._captured_stdout.__exit__(None, None, None) + super().tearDown() + + def test_ex_cuda_ufunc_call(self): + # ex_cuda_ufunc.begin + import numpy as np + from numba import cuda + + # A kernel calling a ufunc (sin, in this case) + @cuda.jit + def f(r, x): + # Compute sin(x) with result written to r + np.sin(x, r) + + # Declare input and output arrays + x = np.arange(10, dtype=np.float32) - 5 + r = np.zeros_like(x) + + # Launch kernel that calls the ufunc + f[1, 1](r, x) + + # A quick sanity check demonstrating equality of the sine computed by + # the sin ufunc inside the kernel, and NumPy's sin ufunc + np.testing.assert_allclose(r, np.sin(x)) + # ex_cuda_ufunc.end + + +if __name__ == "__main__": + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/__init__.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e7d31af3b99e121a9ae04bc855a6c80cc4594d --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/__init__.py @@ -0,0 +1,8 @@ +from numba.cuda.testing import ensure_supported_ccs_initialized +from numba.testing import load_testsuite +import os + + +def load_tests(loader, tests, pattern): + ensure_supported_ccs_initialized() + return load_testsuite(loader, os.path.dirname(__file__)) diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_dummyarray.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_dummyarray.py new file mode 100644 index 0000000000000000000000000000000000000000..e4ad7d0fd6638f5f5e84b00cf60430b505bf3fee --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_dummyarray.py @@ -0,0 +1,359 @@ +import unittest +import itertools +import numpy as np +from numba.cuda.cudadrv.dummyarray import Array +from numba.cuda.testing import skip_on_cudasim + + +@skip_on_cudasim("Tests internals of the CUDA driver device array") +class TestSlicing(unittest.TestCase): + + def assertSameContig(self, arr, nparr): + attrs = 'C_CONTIGUOUS', 'F_CONTIGUOUS' + for attr in attrs: + if arr.flags[attr] != nparr.flags[attr]: + if arr.size == 0 and nparr.size == 0: + # numpy <=1.7 bug that some empty array are contiguous and + # some are not + pass + else: + self.fail("contiguous flag mismatch:\ngot=%s\nexpect=%s" % + (arr.flags, nparr.flags)) + + #### 1D + + def test_slice0_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + self.assertSameContig(arr, nparr) + xx = -2, -1, 0, 1, 2 + for x in xx: + expect = nparr[x:] + got = arr[x:] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_slice1_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, -1, 0, 1, 2 + for x in xx: + expect = nparr[:x] + got = arr[:x] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_slice2_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, -1, 0, 1, 2 + for x, y in itertools.product(xx, xx): + expect = nparr[x:y] + got = arr[x:y] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + #### 2D + + def test_slice0_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, 0, 1, 2 + for x in xx: + expect = nparr[x:] + got = arr[x:] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + for x, y in itertools.product(xx, xx): + expect = nparr[x:, y:] + got = arr[x:, y:] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_slice1_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, 0, 2 + for x in xx: + expect = nparr[:x] + got = arr[:x] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + self.assertSameContig(got, expect) + + for x, y in itertools.product(xx, xx): + expect = nparr[:x, :y] + got = arr[:x, :y] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + self.assertSameContig(got, expect) + + def test_slice2_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, 0, 2 + for s, t, u, v in itertools.product(xx, xx, xx, xx): + expect = nparr[s:t, u:v] + got = arr[s:t, u:v] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + for x, y in itertools.product(xx, xx): + expect = nparr[s:t, u:v] + got = arr[s:t, u:v] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + #### Strided + + def test_strided_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, -1, 1, 2 + for x in xx: + expect = nparr[::x] + got = arr[::x] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_strided_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, -1, 1, 2 + for a, b in itertools.product(xx, xx): + expect = nparr[::a, ::b] + got = arr[::a, ::b] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_strided_3d(self): + nparr = np.empty((4, 5, 6)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + xx = -2, -1, 1, 2 + for a, b, c in itertools.product(xx, xx, xx): + expect = nparr[::a, ::b, ::c] + got = arr[::a, ::b, ::c] + self.assertSameContig(got, expect) + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_issue_2766(self): + z = np.empty((1, 2, 3)) + z = np.transpose(z, axes=(2, 0, 1)) + arr = Array.from_desc(0, z.shape, z.strides, z.itemsize) + self.assertEqual(z.flags['C_CONTIGUOUS'], arr.flags['C_CONTIGUOUS']) + self.assertEqual(z.flags['F_CONTIGUOUS'], arr.flags['F_CONTIGUOUS']) + + +@skip_on_cudasim("Tests internals of the CUDA driver device array") +class TestReshape(unittest.TestCase): + def test_reshape_2d2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(5, 4) + got = arr.reshape(5, 4)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_2d1d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(5 * 4) + got = arr.reshape(5 * 4)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_3d3d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(5, 3, 4) + got = arr.reshape(5, 3, 4)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_3d2d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(3 * 4, 5) + got = arr.reshape(3 * 4, 5)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_3d1d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(3 * 4 * 5) + got = arr.reshape(3 * 4 * 5)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer2d2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(-1, 4) + got = arr.reshape(-1, 4)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer2d1d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(-1) + got = arr.reshape(-1)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer3d3d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(5, -1, 4) + got = arr.reshape(5, -1, 4)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer3d2d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(3, -1) + got = arr.reshape(3, -1)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer3d1d(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + expect = nparr.reshape(-1) + got = arr.reshape(-1)[0] + self.assertEqual(got.shape, expect.shape) + self.assertEqual(got.strides, expect.strides) + + def test_reshape_infer_two_unknowns(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + + with self.assertRaises(ValueError) as raises: + arr.reshape(-1, -1, 3) + self.assertIn('can only specify one unknown dimension', + str(raises.exception)) + + def test_reshape_infer_invalid_shape(self): + nparr = np.empty((3, 4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + + with self.assertRaises(ValueError) as raises: + arr.reshape(-1, 7) + + expected_message = 'cannot infer valid shape for unknown dimension' + self.assertIn(expected_message, str(raises.exception)) + + +@skip_on_cudasim("Tests internals of the CUDA driver device array") +class TestSqueeze(unittest.TestCase): + def test_squeeze(self): + nparr = np.empty((1, 2, 1, 4, 1, 3)) + arr = Array.from_desc( + 0, nparr.shape, nparr.strides, nparr.dtype.itemsize + ) + + def _assert_equal_shape_strides(arr1, arr2): + self.assertEqual(arr1.shape, arr2.shape) + self.assertEqual(arr1.strides, arr2.strides) + _assert_equal_shape_strides(arr, nparr) + _assert_equal_shape_strides(arr.squeeze()[0], nparr.squeeze()) + for axis in (0, 2, 4, (0, 2), (0, 4), (2, 4), (0, 2, 4)): + _assert_equal_shape_strides( + arr.squeeze(axis=axis)[0], nparr.squeeze(axis=axis) + ) + + def test_squeeze_invalid_axis(self): + nparr = np.empty((1, 2, 1, 4, 1, 3)) + arr = Array.from_desc( + 0, nparr.shape, nparr.strides, nparr.dtype.itemsize + ) + with self.assertRaises(ValueError): + arr.squeeze(axis=1) + with self.assertRaises(ValueError): + arr.squeeze(axis=(2, 3)) + + +@skip_on_cudasim("Tests internals of the CUDA driver device array") +class TestExtent(unittest.TestCase): + def test_extent_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + s, e = arr.extent + self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize) + + def test_extent_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + s, e = arr.extent + self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize) + + def test_extent_iter_1d(self): + nparr = np.empty(4) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + [ext] = list(arr.iter_contiguous_extent()) + self.assertEqual(ext, arr.extent) + + def test_extent_iter_2d(self): + nparr = np.empty((4, 5)) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + [ext] = list(arr.iter_contiguous_extent()) + self.assertEqual(ext, arr.extent) + + self.assertEqual(len(list(arr[::2].iter_contiguous_extent())), 2) + + +@skip_on_cudasim("Tests internals of the CUDA driver device array") +class TestIterate(unittest.TestCase): + def test_for_loop(self): + # for #4201 + N = 5 + nparr = np.empty(N) + arr = Array.from_desc(0, nparr.shape, nparr.strides, + nparr.dtype.itemsize) + + x = 0 # just a placeholder + # this loop should not raise AssertionError + for val in arr: + x = val # noqa: F841 + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_function_resolution.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_function_resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..1153707bbc2701194f0f85dbd6451ba8355522b8 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_function_resolution.py @@ -0,0 +1,36 @@ +from numba.cuda.testing import unittest, skip_on_cudasim +import operator +from numba.core import types, typing +from numba.cuda.cudadrv import nvvm + + +@unittest.skipIf(not nvvm.is_available(), "No libNVVM") +@skip_on_cudasim("Skip on simulator due to use of cuda_target") +class TestFunctionResolution(unittest.TestCase): + def test_fp16_binary_operators(self): + from numba.cuda.descriptor import cuda_target + ops = (operator.add, operator.iadd, operator.sub, operator.isub, + operator.mul, operator.imul) + for op in ops: + fp16 = types.float16 + typingctx = cuda_target.typing_context + typingctx.refresh() + fnty = typingctx.resolve_value_type(op) + out = typingctx.resolve_function_type(fnty, (fp16, fp16), {}) + self.assertEqual(out, typing.signature(fp16, fp16, fp16), + msg=str(out)) + + def test_fp16_unary_operators(self): + from numba.cuda.descriptor import cuda_target + ops = (operator.neg, abs) + for op in ops: + fp16 = types.float16 + typingctx = cuda_target.typing_context + typingctx.refresh() + fnty = typingctx.resolve_value_type(op) + out = typingctx.resolve_function_type(fnty, (fp16,), {}) + self.assertEqual(out, typing.signature(fp16, fp16), msg=str(out)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_import.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_import.py new file mode 100644 index 0000000000000000000000000000000000000000..73126cd6ed10d8b6668fbe4cd3fe45bf8d42f4bf --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_import.py @@ -0,0 +1,49 @@ +from numba.tests.support import run_in_subprocess +import unittest + + +class TestImport(unittest.TestCase): + def test_no_impl_import(self): + """ + Tests that importing cuda doesn't trigger the import of modules + containing lowering implementation that would likely install things in + the builtins registry and have side effects impacting other targets. + """ + + banlist = ( + 'numba.cpython.slicing', + 'numba.cpython.tupleobj', + 'numba.cpython.enumimpl', + 'numba.cpython.hashing', + 'numba.cpython.heapq', + 'numba.cpython.iterators', + 'numba.cpython.numbers', + 'numba.cpython.rangeobj', + 'numba.cpython.cmathimpl', + 'numba.cpython.mathimpl', + 'numba.cpython.printimpl', + 'numba.cpython.randomimpl', + 'numba.core.optional', + 'numba.misc.gdb_hook', + 'numba.misc.literal', + 'numba.misc.cffiimpl', + 'numba.np.linalg', + 'numba.np.polynomial', + 'numba.np.arraymath', + 'numba.np.npdatetime', + 'numba.np.npyimpl', + 'numba.typed.typeddict', + 'numba.typed.typedlist', + 'numba.experimental.jitclass.base', + ) + + code = "import sys; from numba import cuda; print(list(sys.modules))" + + out, _ = run_in_subprocess(code) + modlist = set(eval(out.strip())) + unexpected = set(banlist) & set(modlist) + self.assertFalse(unexpected, "some modules unexpectedly imported") + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_library_lookup.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_library_lookup.py new file mode 100644 index 0000000000000000000000000000000000000000..acf67082920c7296f2b199523ec7a1f94c4cf9a3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_library_lookup.py @@ -0,0 +1,238 @@ +import sys +import os +import multiprocessing as mp +import warnings + +from numba.core.config import IS_WIN32, IS_OSX +from numba.core.errors import NumbaWarning +from numba.cuda.cudadrv import nvvm +from numba.cuda.testing import ( + unittest, + skip_on_cudasim, + SerialMixin, + skip_unless_conda_cudatoolkit, +) +from numba.cuda.cuda_paths import ( + _get_libdevice_path_decision, + _get_nvvm_path_decision, + _get_cudalib_dir_path_decision, + get_system_ctk, +) + + +has_cuda = nvvm.is_available() +has_mp_get_context = hasattr(mp, 'get_context') + + +class LibraryLookupBase(SerialMixin, unittest.TestCase): + def setUp(self): + ctx = mp.get_context('spawn') + + qrecv = ctx.Queue() + qsend = ctx.Queue() + self.qsend = qsend + self.qrecv = qrecv + self.child_process = ctx.Process( + target=check_lib_lookup, + args=(qrecv, qsend), + daemon=True, + ) + self.child_process.start() + + def tearDown(self): + self.qsend.put(self.do_terminate) + self.child_process.join(3) + # Ensure the process is terminated + self.assertIsNotNone(self.child_process) + + def remote_do(self, action): + self.qsend.put(action) + out = self.qrecv.get() + self.assertNotIsInstance(out, BaseException) + return out + + @staticmethod + def do_terminate(): + return False, None + + +def remove_env(name): + try: + del os.environ[name] + except KeyError: + return False + else: + return True + + +def check_lib_lookup(qout, qin): + status = True + while status: + try: + action = qin.get() + except Exception as e: + qout.put(e) + status = False + else: + try: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", NumbaWarning) + status, result = action() + qout.put(result + (w,)) + except Exception as e: + qout.put(e) + status = False + + +@skip_on_cudasim('Library detection unsupported in the simulator') +@unittest.skipUnless(has_mp_get_context, 'mp.get_context not available') +@skip_unless_conda_cudatoolkit('test assumes conda installed cudatoolkit') +class TestLibDeviceLookUp(LibraryLookupBase): + def test_libdevice_path_decision(self): + # Check that the default is using conda environment + by, info, warns = self.remote_do(self.do_clear_envs) + if has_cuda: + self.assertEqual(by, 'Conda environment') + else: + self.assertEqual(by, "") + self.assertIsNone(info) + self.assertFalse(warns) + # Check that CUDA_HOME works by removing conda-env + by, info, warns = self.remote_do(self.do_set_cuda_home) + self.assertEqual(by, 'CUDA_HOME') + self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'libdevice')) + self.assertFalse(warns) + + if get_system_ctk() is None: + # Fake remove conda environment so no cudatoolkit is available + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, '') + self.assertIsNone(info) + self.assertFalse(warns) + else: + # Use system available cudatoolkit + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, 'System') + self.assertFalse(warns) + + @staticmethod + def do_clear_envs(): + remove_env('CUDA_HOME') + remove_env('CUDA_PATH') + return True, _get_libdevice_path_decision() + + @staticmethod + def do_set_cuda_home(): + os.environ['CUDA_HOME'] = os.path.join('mycudahome') + _fake_non_conda_env() + return True, _get_libdevice_path_decision() + + +@skip_on_cudasim('Library detection unsupported in the simulator') +@unittest.skipUnless(has_mp_get_context, 'mp.get_context not available') +@skip_unless_conda_cudatoolkit('test assumes conda installed cudatoolkit') +class TestNvvmLookUp(LibraryLookupBase): + def test_nvvm_path_decision(self): + # Check that the default is using conda environment + by, info, warns = self.remote_do(self.do_clear_envs) + if has_cuda: + self.assertEqual(by, 'Conda environment') + else: + self.assertEqual(by, "") + self.assertIsNone(info) + self.assertFalse(warns) + # Check that CUDA_HOME works by removing conda-env + by, info, warns = self.remote_do(self.do_set_cuda_home) + self.assertEqual(by, 'CUDA_HOME') + self.assertFalse(warns) + if IS_WIN32: + self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'bin')) + elif IS_OSX: + self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'lib')) + else: + self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'lib64')) + + if get_system_ctk() is None: + # Fake remove conda environment so no cudatoolkit is available + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, '') + self.assertIsNone(info) + self.assertFalse(warns) + else: + # Use system available cudatoolkit + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, 'System') + self.assertFalse(warns) + + @staticmethod + def do_clear_envs(): + remove_env('CUDA_HOME') + remove_env('CUDA_PATH') + return True, _get_nvvm_path_decision() + + @staticmethod + def do_set_cuda_home(): + os.environ['CUDA_HOME'] = os.path.join('mycudahome') + _fake_non_conda_env() + return True, _get_nvvm_path_decision() + + +@skip_on_cudasim('Library detection unsupported in the simulator') +@unittest.skipUnless(has_mp_get_context, 'mp.get_context not available') +@skip_unless_conda_cudatoolkit('test assumes conda installed cudatoolkit') +class TestCudaLibLookUp(LibraryLookupBase): + def test_cudalib_path_decision(self): + # Check that the default is using conda environment + by, info, warns = self.remote_do(self.do_clear_envs) + if has_cuda: + self.assertEqual(by, 'Conda environment') + else: + self.assertEqual(by, "") + self.assertIsNone(info) + self.assertFalse(warns) + + # Check that CUDA_HOME works by removing conda-env + self.remote_do(self.do_clear_envs) + by, info, warns = self.remote_do(self.do_set_cuda_home) + self.assertEqual(by, 'CUDA_HOME') + self.assertFalse(warns) + if IS_WIN32: + self.assertEqual(info, os.path.join('mycudahome', 'bin')) + elif IS_OSX: + self.assertEqual(info, os.path.join('mycudahome', 'lib')) + else: + self.assertEqual(info, os.path.join('mycudahome', 'lib64')) + if get_system_ctk() is None: + # Fake remove conda environment so no cudatoolkit is available + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, "") + self.assertIsNone(info) + self.assertFalse(warns) + else: + # Use system available cudatoolkit + by, info, warns = self.remote_do(self.do_clear_envs) + self.assertEqual(by, 'System') + self.assertFalse(warns) + + @staticmethod + def do_clear_envs(): + remove_env('CUDA_HOME') + remove_env('CUDA_PATH') + return True, _get_cudalib_dir_path_decision() + + @staticmethod + def do_set_cuda_home(): + os.environ['CUDA_HOME'] = os.path.join('mycudahome') + _fake_non_conda_env() + return True, _get_cudalib_dir_path_decision() + + +def _fake_non_conda_env(): + """ + Monkeypatch sys.prefix to hide the fact we are in a conda-env + """ + sys.prefix = '' + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_nvvm.py b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_nvvm.py new file mode 100644 index 0000000000000000000000000000000000000000..742aa1017115d7ee1841a33a3fea0cd32236e673 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/nocuda/test_nvvm.py @@ -0,0 +1,54 @@ +from numba.cuda.cudadrv import nvvm +from numba.cuda.testing import skip_on_cudasim +from numba.core import utils + +from llvmlite import ir +from llvmlite import binding as llvm + +import unittest + + +original = "call void @llvm.memset.p0i8.i64(" \ + "i8* align 4 %arg.x.41, i8 0, i64 %0, i1 false)" + +missing_align = "call void @llvm.memset.p0i8.i64(" \ + "i8* %arg.x.41, i8 0, i64 %0, i1 false)" + + +@skip_on_cudasim('libNVVM not supported in simulator') +@unittest.skipIf(utils.MACHINE_BITS == 32, "CUDA not support for 32-bit") +@unittest.skipIf(not nvvm.is_available(), "No libNVVM") +class TestNvvmWithoutCuda(unittest.TestCase): + def test_nvvm_accepts_encoding(self): + # Test that NVVM will accept a constant containing all possible 8-bit + # characters. Taken from the test case added in llvmlite PR #53: + # + # https://github.com/numba/llvmlite/pull/53 + # + # This test case is included in Numba to ensure that the encoding used + # by llvmlite (e.g. utf-8, latin1, etc.) does not result in an input to + # NVVM that it cannot parse correctly + + # Create a module with a constant containing all 8-bit characters + c = ir.Constant(ir.ArrayType(ir.IntType(8), 256), + bytearray(range(256))) + m = ir.Module() + m.triple = 'nvptx64-nvidia-cuda' + nvvm.add_ir_version(m) + gv = ir.GlobalVariable(m, c.type, "myconstant") + gv.global_constant = True + gv.initializer = c + m.data_layout = nvvm.NVVM().data_layout + + # Parse with LLVM then dump the parsed module into NVVM + parsed = llvm.parse_assembly(str(m)) + ptx = nvvm.compile_ir(str(parsed)) + + # Ensure all characters appear in the generated constant array. + elements = ", ".join([str(i) for i in range(256)]) + myconstant = f"myconstant[256] = {{{elements}}}".encode('utf-8') + self.assertIn(myconstant, ptx) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/types.py b/lib/python3.10/site-packages/numba/cuda/types.py new file mode 100644 index 0000000000000000000000000000000000000000..531dcb2ccb22abcfea9f103df4ddb616728b4ca5 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/types.py @@ -0,0 +1,37 @@ +from numba.core import types + + +class Dim3(types.Type): + """ + A 3-tuple (x, y, z) representing the position of a block or thread. + """ + def __init__(self): + super().__init__(name='Dim3') + + +class GridGroup(types.Type): + """ + The grid of all threads in a cooperative kernel launch. + """ + def __init__(self): + super().__init__(name='GridGroup') + + +dim3 = Dim3() +grid_group = GridGroup() + + +class CUDADispatcher(types.Dispatcher): + """The type of CUDA dispatchers""" + # This type exists (instead of using types.Dispatcher as the type of CUDA + # dispatchers) so that we can have an alternative lowering for them to the + # lowering of CPU dispatchers - the CPU target lowers all dispatchers as a + # constant address, but we need to lower to a dummy value because it's not + # generally valid to use the address of CUDA kernels and functions. + # + # Notes: it may be a bug in the CPU target that it lowers all dispatchers to + # a constant address - it should perhaps only lower dispatchers acting as + # first-class functions to a constant address. Even if that bug is fixed, it + # is still probably a good idea to have a separate type for CUDA + # dispatchers, and this type might get other differentiation from the CPU + # dispatcher type in future. diff --git a/lib/python3.10/site-packages/numba/cuda/ufuncs.py b/lib/python3.10/site-packages/numba/cuda/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..f552370480e3df5d19095351b4cea8b3fc4069d6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/ufuncs.py @@ -0,0 +1,662 @@ +"""Contains information on how to translate different ufuncs for the CUDA +target. It is a database of different ufuncs and how each of its loops maps to +a function that implements the inner kernel of that ufunc (the inner kernel +being the per-element function). + +Use get_ufunc_info() to get the information related to a ufunc. +""" + +import math +import numpy as np +from functools import lru_cache +from numba.core import typing + + +def get_ufunc_info(ufunc_key): + return ufunc_db()[ufunc_key] + + +@lru_cache +def ufunc_db(): + # Imports here are at function scope to avoid circular imports + from numba.cpython import cmathimpl, mathimpl, numbers + from numba.np import npyfuncs + from numba.np.numpy_support import numpy_version + from numba.cuda.mathimpl import (get_unary_impl_for_fn_and_ty, + get_binary_impl_for_fn_and_ty) + + def np_unary_impl(fn, context, builder, sig, args): + npyfuncs._check_arity_and_homogeneity(sig, args, 1) + impl = get_unary_impl_for_fn_and_ty(fn, sig.args[0]) + return impl(context, builder, sig, args) + + def np_binary_impl(fn, context, builder, sig, args): + npyfuncs._check_arity_and_homogeneity(sig, args, 2) + impl = get_binary_impl_for_fn_and_ty(fn, sig.args[0]) + return impl(context, builder, sig, args) + + def np_real_log_impl(context, builder, sig, args): + return np_unary_impl(math.log, context, builder, sig, args) + + def np_real_log2_impl(context, builder, sig, args): + return np_unary_impl(math.log2, context, builder, sig, args) + + def np_real_log10_impl(context, builder, sig, args): + return np_unary_impl(math.log10, context, builder, sig, args) + + def np_real_sin_impl(context, builder, sig, args): + return np_unary_impl(math.sin, context, builder, sig, args) + + def np_real_cos_impl(context, builder, sig, args): + return np_unary_impl(math.cos, context, builder, sig, args) + + def np_real_tan_impl(context, builder, sig, args): + return np_unary_impl(math.tan, context, builder, sig, args) + + def np_real_asin_impl(context, builder, sig, args): + return np_unary_impl(math.asin, context, builder, sig, args) + + def np_real_acos_impl(context, builder, sig, args): + return np_unary_impl(math.acos, context, builder, sig, args) + + def np_real_atan_impl(context, builder, sig, args): + return np_unary_impl(math.atan, context, builder, sig, args) + + def np_real_atan2_impl(context, builder, sig, args): + return np_binary_impl(math.atan2, context, builder, sig, args) + + def np_real_hypot_impl(context, builder, sig, args): + return np_binary_impl(math.hypot, context, builder, sig, args) + + def np_real_sinh_impl(context, builder, sig, args): + return np_unary_impl(math.sinh, context, builder, sig, args) + + def np_complex_sinh_impl(context, builder, sig, args): + # npymath does not provide a complex sinh. The code in funcs.inc.src + # is translated here... + npyfuncs._check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty] * 2) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + xr = x.real + xi = x.imag + + sxi = np_real_sin_impl(context, builder, fsig1, [xi]) + shxr = np_real_sinh_impl(context, builder, fsig1, [xr]) + cxi = np_real_cos_impl(context, builder, fsig1, [xi]) + chxr = np_real_cosh_impl(context, builder, fsig1, [xr]) + + out.real = builder.fmul(cxi, shxr) + out.imag = builder.fmul(sxi, chxr) + + return out._getvalue() + + def np_real_cosh_impl(context, builder, sig, args): + return np_unary_impl(math.cosh, context, builder, sig, args) + + def np_complex_cosh_impl(context, builder, sig, args): + # npymath does not provide a complex cosh. The code in funcs.inc.src + # is translated here... + npyfuncs._check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty] * 2) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + xr = x.real + xi = x.imag + + cxi = np_real_cos_impl(context, builder, fsig1, [xi]) + chxr = np_real_cosh_impl(context, builder, fsig1, [xr]) + sxi = np_real_sin_impl(context, builder, fsig1, [xi]) + shxr = np_real_sinh_impl(context, builder, fsig1, [xr]) + + out.real = builder.fmul(cxi, chxr) + out.imag = builder.fmul(sxi, shxr) + + return out._getvalue() + + def np_real_tanh_impl(context, builder, sig, args): + return np_unary_impl(math.tanh, context, builder, sig, args) + + def np_complex_tanh_impl(context, builder, sig, args): + # npymath does not provide complex tan functions. The code + # in funcs.inc.src for tanh is translated here... + npyfuncs._check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty] * 2) + ONE = context.get_constant(fty, 1.0) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + + xr = x.real + xi = x.imag + si = np_real_sin_impl(context, builder, fsig1, [xi]) + ci = np_real_cos_impl(context, builder, fsig1, [xi]) + shr = np_real_sinh_impl(context, builder, fsig1, [xr]) + chr_ = np_real_cosh_impl(context, builder, fsig1, [xr]) + rs = builder.fmul(ci, shr) + is_ = builder.fmul(si, chr_) + rc = builder.fmul(ci, chr_) + # Note: opposite sign for `ic` from code in funcs.inc.src + ic = builder.fmul(si, shr) + sqr_rc = builder.fmul(rc, rc) + sqr_ic = builder.fmul(ic, ic) + d = builder.fadd(sqr_rc, sqr_ic) + inv_d = builder.fdiv(ONE, d) + rs_rc = builder.fmul(rs, rc) + is_ic = builder.fmul(is_, ic) + is_rc = builder.fmul(is_, rc) + rs_ic = builder.fmul(rs, ic) + numr = builder.fadd(rs_rc, is_ic) + numi = builder.fsub(is_rc, rs_ic) + out.real = builder.fmul(numr, inv_d) + out.imag = builder.fmul(numi, inv_d) + + return out._getvalue() + + def np_real_asinh_impl(context, builder, sig, args): + return np_unary_impl(math.asinh, context, builder, sig, args) + + def np_real_acosh_impl(context, builder, sig, args): + return np_unary_impl(math.acosh, context, builder, sig, args) + + def np_real_atanh_impl(context, builder, sig, args): + return np_unary_impl(math.atanh, context, builder, sig, args) + + db = {} + + db[np.sin] = { + 'f->f': np_real_sin_impl, + 'd->d': np_real_sin_impl, + 'F->F': npyfuncs.np_complex_sin_impl, + 'D->D': npyfuncs.np_complex_sin_impl, + } + + db[np.cos] = { + 'f->f': np_real_cos_impl, + 'd->d': np_real_cos_impl, + 'F->F': npyfuncs.np_complex_cos_impl, + 'D->D': npyfuncs.np_complex_cos_impl, + } + + db[np.tan] = { + 'f->f': np_real_tan_impl, + 'd->d': np_real_tan_impl, + 'F->F': cmathimpl.tan_impl, + 'D->D': cmathimpl.tan_impl, + } + + db[np.arcsin] = { + 'f->f': np_real_asin_impl, + 'd->d': np_real_asin_impl, + 'F->F': cmathimpl.asin_impl, + 'D->D': cmathimpl.asin_impl, + } + + db[np.arccos] = { + 'f->f': np_real_acos_impl, + 'd->d': np_real_acos_impl, + 'F->F': cmathimpl.acos_impl, + 'D->D': cmathimpl.acos_impl, + } + + db[np.arctan] = { + 'f->f': np_real_atan_impl, + 'd->d': np_real_atan_impl, + 'F->F': cmathimpl.atan_impl, + 'D->D': cmathimpl.atan_impl, + } + + db[np.arctan2] = { + 'ff->f': np_real_atan2_impl, + 'dd->d': np_real_atan2_impl, + } + + db[np.hypot] = { + 'ff->f': np_real_hypot_impl, + 'dd->d': np_real_hypot_impl, + } + + db[np.sinh] = { + 'f->f': np_real_sinh_impl, + 'd->d': np_real_sinh_impl, + 'F->F': np_complex_sinh_impl, + 'D->D': np_complex_sinh_impl, + } + + db[np.cosh] = { + 'f->f': np_real_cosh_impl, + 'd->d': np_real_cosh_impl, + 'F->F': np_complex_cosh_impl, + 'D->D': np_complex_cosh_impl, + } + + db[np.tanh] = { + 'f->f': np_real_tanh_impl, + 'd->d': np_real_tanh_impl, + 'F->F': np_complex_tanh_impl, + 'D->D': np_complex_tanh_impl, + } + + db[np.arcsinh] = { + 'f->f': np_real_asinh_impl, + 'd->d': np_real_asinh_impl, + 'F->F': cmathimpl.asinh_impl, + 'D->D': cmathimpl.asinh_impl, + } + + db[np.arccosh] = { + 'f->f': np_real_acosh_impl, + 'd->d': np_real_acosh_impl, + 'F->F': npyfuncs.np_complex_acosh_impl, + 'D->D': npyfuncs.np_complex_acosh_impl, + } + + db[np.arctanh] = { + 'f->f': np_real_atanh_impl, + 'd->d': np_real_atanh_impl, + 'F->F': cmathimpl.atanh_impl, + 'D->D': cmathimpl.atanh_impl, + } + + db[np.deg2rad] = { + 'f->f': mathimpl.radians_float_impl, + 'd->d': mathimpl.radians_float_impl, + } + + db[np.radians] = db[np.deg2rad] + + db[np.rad2deg] = { + 'f->f': mathimpl.degrees_float_impl, + 'd->d': mathimpl.degrees_float_impl, + } + + db[np.degrees] = db[np.rad2deg] + + db[np.greater] = { + '??->?': numbers.int_ugt_impl, + 'bb->?': numbers.int_sgt_impl, + 'BB->?': numbers.int_ugt_impl, + 'hh->?': numbers.int_sgt_impl, + 'HH->?': numbers.int_ugt_impl, + 'ii->?': numbers.int_sgt_impl, + 'II->?': numbers.int_ugt_impl, + 'll->?': numbers.int_sgt_impl, + 'LL->?': numbers.int_ugt_impl, + 'qq->?': numbers.int_sgt_impl, + 'QQ->?': numbers.int_ugt_impl, + 'ff->?': numbers.real_gt_impl, + 'dd->?': numbers.real_gt_impl, + 'FF->?': npyfuncs.np_complex_gt_impl, + 'DD->?': npyfuncs.np_complex_gt_impl, + } + if numpy_version >= (1, 25): + db[np.greater].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('>'), + 'Qq->?': numbers.int_unsigned_signed_cmp('>')}) + + db[np.greater_equal] = { + '??->?': numbers.int_uge_impl, + 'bb->?': numbers.int_sge_impl, + 'BB->?': numbers.int_uge_impl, + 'hh->?': numbers.int_sge_impl, + 'HH->?': numbers.int_uge_impl, + 'ii->?': numbers.int_sge_impl, + 'II->?': numbers.int_uge_impl, + 'll->?': numbers.int_sge_impl, + 'LL->?': numbers.int_uge_impl, + 'qq->?': numbers.int_sge_impl, + 'QQ->?': numbers.int_uge_impl, + 'ff->?': numbers.real_ge_impl, + 'dd->?': numbers.real_ge_impl, + 'FF->?': npyfuncs.np_complex_ge_impl, + 'DD->?': npyfuncs.np_complex_ge_impl, + } + if numpy_version >= (1, 25): + db[np.greater_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('>='), + 'Qq->?': numbers.int_unsigned_signed_cmp('>=')}) + + db[np.less] = { + '??->?': numbers.int_ult_impl, + 'bb->?': numbers.int_slt_impl, + 'BB->?': numbers.int_ult_impl, + 'hh->?': numbers.int_slt_impl, + 'HH->?': numbers.int_ult_impl, + 'ii->?': numbers.int_slt_impl, + 'II->?': numbers.int_ult_impl, + 'll->?': numbers.int_slt_impl, + 'LL->?': numbers.int_ult_impl, + 'qq->?': numbers.int_slt_impl, + 'QQ->?': numbers.int_ult_impl, + 'ff->?': numbers.real_lt_impl, + 'dd->?': numbers.real_lt_impl, + 'FF->?': npyfuncs.np_complex_lt_impl, + 'DD->?': npyfuncs.np_complex_lt_impl, + } + if numpy_version >= (1, 25): + db[np.less].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('<'), + 'Qq->?': numbers.int_unsigned_signed_cmp('<')}) + + db[np.less_equal] = { + '??->?': numbers.int_ule_impl, + 'bb->?': numbers.int_sle_impl, + 'BB->?': numbers.int_ule_impl, + 'hh->?': numbers.int_sle_impl, + 'HH->?': numbers.int_ule_impl, + 'ii->?': numbers.int_sle_impl, + 'II->?': numbers.int_ule_impl, + 'll->?': numbers.int_sle_impl, + 'LL->?': numbers.int_ule_impl, + 'qq->?': numbers.int_sle_impl, + 'QQ->?': numbers.int_ule_impl, + 'ff->?': numbers.real_le_impl, + 'dd->?': numbers.real_le_impl, + 'FF->?': npyfuncs.np_complex_le_impl, + 'DD->?': npyfuncs.np_complex_le_impl, + } + if numpy_version >= (1, 25): + db[np.less_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('<='), + 'Qq->?': numbers.int_unsigned_signed_cmp('<=')}) + + db[np.not_equal] = { + '??->?': numbers.int_ne_impl, + 'bb->?': numbers.int_ne_impl, + 'BB->?': numbers.int_ne_impl, + 'hh->?': numbers.int_ne_impl, + 'HH->?': numbers.int_ne_impl, + 'ii->?': numbers.int_ne_impl, + 'II->?': numbers.int_ne_impl, + 'll->?': numbers.int_ne_impl, + 'LL->?': numbers.int_ne_impl, + 'qq->?': numbers.int_ne_impl, + 'QQ->?': numbers.int_ne_impl, + 'ff->?': numbers.real_ne_impl, + 'dd->?': numbers.real_ne_impl, + 'FF->?': npyfuncs.np_complex_ne_impl, + 'DD->?': npyfuncs.np_complex_ne_impl, + } + if numpy_version >= (1, 25): + db[np.not_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('!='), + 'Qq->?': numbers.int_unsigned_signed_cmp('!=')}) + + db[np.equal] = { + '??->?': numbers.int_eq_impl, + 'bb->?': numbers.int_eq_impl, + 'BB->?': numbers.int_eq_impl, + 'hh->?': numbers.int_eq_impl, + 'HH->?': numbers.int_eq_impl, + 'ii->?': numbers.int_eq_impl, + 'II->?': numbers.int_eq_impl, + 'll->?': numbers.int_eq_impl, + 'LL->?': numbers.int_eq_impl, + 'qq->?': numbers.int_eq_impl, + 'QQ->?': numbers.int_eq_impl, + 'ff->?': numbers.real_eq_impl, + 'dd->?': numbers.real_eq_impl, + 'FF->?': npyfuncs.np_complex_eq_impl, + 'DD->?': npyfuncs.np_complex_eq_impl, + } + if numpy_version >= (1, 25): + db[np.equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('=='), + 'Qq->?': numbers.int_unsigned_signed_cmp('==')}) + + db[np.logical_and] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->?': npyfuncs.np_logical_and_impl, + 'BB->?': npyfuncs.np_logical_and_impl, + 'hh->?': npyfuncs.np_logical_and_impl, + 'HH->?': npyfuncs.np_logical_and_impl, + 'ii->?': npyfuncs.np_logical_and_impl, + 'II->?': npyfuncs.np_logical_and_impl, + 'll->?': npyfuncs.np_logical_and_impl, + 'LL->?': npyfuncs.np_logical_and_impl, + 'qq->?': npyfuncs.np_logical_and_impl, + 'QQ->?': npyfuncs.np_logical_and_impl, + 'ff->?': npyfuncs.np_logical_and_impl, + 'dd->?': npyfuncs.np_logical_and_impl, + 'FF->?': npyfuncs.np_complex_logical_and_impl, + 'DD->?': npyfuncs.np_complex_logical_and_impl, + } + + db[np.logical_or] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->?': npyfuncs.np_logical_or_impl, + 'BB->?': npyfuncs.np_logical_or_impl, + 'hh->?': npyfuncs.np_logical_or_impl, + 'HH->?': npyfuncs.np_logical_or_impl, + 'ii->?': npyfuncs.np_logical_or_impl, + 'II->?': npyfuncs.np_logical_or_impl, + 'll->?': npyfuncs.np_logical_or_impl, + 'LL->?': npyfuncs.np_logical_or_impl, + 'qq->?': npyfuncs.np_logical_or_impl, + 'QQ->?': npyfuncs.np_logical_or_impl, + 'ff->?': npyfuncs.np_logical_or_impl, + 'dd->?': npyfuncs.np_logical_or_impl, + 'FF->?': npyfuncs.np_complex_logical_or_impl, + 'DD->?': npyfuncs.np_complex_logical_or_impl, + } + + db[np.logical_xor] = { + '??->?': npyfuncs.np_logical_xor_impl, + 'bb->?': npyfuncs.np_logical_xor_impl, + 'BB->?': npyfuncs.np_logical_xor_impl, + 'hh->?': npyfuncs.np_logical_xor_impl, + 'HH->?': npyfuncs.np_logical_xor_impl, + 'ii->?': npyfuncs.np_logical_xor_impl, + 'II->?': npyfuncs.np_logical_xor_impl, + 'll->?': npyfuncs.np_logical_xor_impl, + 'LL->?': npyfuncs.np_logical_xor_impl, + 'qq->?': npyfuncs.np_logical_xor_impl, + 'QQ->?': npyfuncs.np_logical_xor_impl, + 'ff->?': npyfuncs.np_logical_xor_impl, + 'dd->?': npyfuncs.np_logical_xor_impl, + 'FF->?': npyfuncs.np_complex_logical_xor_impl, + 'DD->?': npyfuncs.np_complex_logical_xor_impl, + } + + db[np.logical_not] = { + '?->?': npyfuncs.np_logical_not_impl, + 'b->?': npyfuncs.np_logical_not_impl, + 'B->?': npyfuncs.np_logical_not_impl, + 'h->?': npyfuncs.np_logical_not_impl, + 'H->?': npyfuncs.np_logical_not_impl, + 'i->?': npyfuncs.np_logical_not_impl, + 'I->?': npyfuncs.np_logical_not_impl, + 'l->?': npyfuncs.np_logical_not_impl, + 'L->?': npyfuncs.np_logical_not_impl, + 'q->?': npyfuncs.np_logical_not_impl, + 'Q->?': npyfuncs.np_logical_not_impl, + 'f->?': npyfuncs.np_logical_not_impl, + 'd->?': npyfuncs.np_logical_not_impl, + 'F->?': npyfuncs.np_complex_logical_not_impl, + 'D->?': npyfuncs.np_complex_logical_not_impl, + } + + db[np.maximum] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->b': npyfuncs.np_int_smax_impl, + 'BB->B': npyfuncs.np_int_umax_impl, + 'hh->h': npyfuncs.np_int_smax_impl, + 'HH->H': npyfuncs.np_int_umax_impl, + 'ii->i': npyfuncs.np_int_smax_impl, + 'II->I': npyfuncs.np_int_umax_impl, + 'll->l': npyfuncs.np_int_smax_impl, + 'LL->L': npyfuncs.np_int_umax_impl, + 'qq->q': npyfuncs.np_int_smax_impl, + 'QQ->Q': npyfuncs.np_int_umax_impl, + 'ff->f': npyfuncs.np_real_maximum_impl, + 'dd->d': npyfuncs.np_real_maximum_impl, + 'FF->F': npyfuncs.np_complex_maximum_impl, + 'DD->D': npyfuncs.np_complex_maximum_impl, + } + + db[np.minimum] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->b': npyfuncs.np_int_smin_impl, + 'BB->B': npyfuncs.np_int_umin_impl, + 'hh->h': npyfuncs.np_int_smin_impl, + 'HH->H': npyfuncs.np_int_umin_impl, + 'ii->i': npyfuncs.np_int_smin_impl, + 'II->I': npyfuncs.np_int_umin_impl, + 'll->l': npyfuncs.np_int_smin_impl, + 'LL->L': npyfuncs.np_int_umin_impl, + 'qq->q': npyfuncs.np_int_smin_impl, + 'QQ->Q': npyfuncs.np_int_umin_impl, + 'ff->f': npyfuncs.np_real_minimum_impl, + 'dd->d': npyfuncs.np_real_minimum_impl, + 'FF->F': npyfuncs.np_complex_minimum_impl, + 'DD->D': npyfuncs.np_complex_minimum_impl, + } + + db[np.fmax] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->b': npyfuncs.np_int_smax_impl, + 'BB->B': npyfuncs.np_int_umax_impl, + 'hh->h': npyfuncs.np_int_smax_impl, + 'HH->H': npyfuncs.np_int_umax_impl, + 'ii->i': npyfuncs.np_int_smax_impl, + 'II->I': npyfuncs.np_int_umax_impl, + 'll->l': npyfuncs.np_int_smax_impl, + 'LL->L': npyfuncs.np_int_umax_impl, + 'qq->q': npyfuncs.np_int_smax_impl, + 'QQ->Q': npyfuncs.np_int_umax_impl, + 'ff->f': npyfuncs.np_real_fmax_impl, + 'dd->d': npyfuncs.np_real_fmax_impl, + 'FF->F': npyfuncs.np_complex_fmax_impl, + 'DD->D': npyfuncs.np_complex_fmax_impl, + } + + db[np.fmin] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->b': npyfuncs.np_int_smin_impl, + 'BB->B': npyfuncs.np_int_umin_impl, + 'hh->h': npyfuncs.np_int_smin_impl, + 'HH->H': npyfuncs.np_int_umin_impl, + 'ii->i': npyfuncs.np_int_smin_impl, + 'II->I': npyfuncs.np_int_umin_impl, + 'll->l': npyfuncs.np_int_smin_impl, + 'LL->L': npyfuncs.np_int_umin_impl, + 'qq->q': npyfuncs.np_int_smin_impl, + 'QQ->Q': npyfuncs.np_int_umin_impl, + 'ff->f': npyfuncs.np_real_fmin_impl, + 'dd->d': npyfuncs.np_real_fmin_impl, + 'FF->F': npyfuncs.np_complex_fmin_impl, + 'DD->D': npyfuncs.np_complex_fmin_impl, + } + + db[np.bitwise_and] = { + '??->?': numbers.int_and_impl, + 'bb->b': numbers.int_and_impl, + 'BB->B': numbers.int_and_impl, + 'hh->h': numbers.int_and_impl, + 'HH->H': numbers.int_and_impl, + 'ii->i': numbers.int_and_impl, + 'II->I': numbers.int_and_impl, + 'll->l': numbers.int_and_impl, + 'LL->L': numbers.int_and_impl, + 'qq->q': numbers.int_and_impl, + 'QQ->Q': numbers.int_and_impl, + } + + db[np.bitwise_or] = { + '??->?': numbers.int_or_impl, + 'bb->b': numbers.int_or_impl, + 'BB->B': numbers.int_or_impl, + 'hh->h': numbers.int_or_impl, + 'HH->H': numbers.int_or_impl, + 'ii->i': numbers.int_or_impl, + 'II->I': numbers.int_or_impl, + 'll->l': numbers.int_or_impl, + 'LL->L': numbers.int_or_impl, + 'qq->q': numbers.int_or_impl, + 'QQ->Q': numbers.int_or_impl, + } + + db[np.bitwise_xor] = { + '??->?': numbers.int_xor_impl, + 'bb->b': numbers.int_xor_impl, + 'BB->B': numbers.int_xor_impl, + 'hh->h': numbers.int_xor_impl, + 'HH->H': numbers.int_xor_impl, + 'ii->i': numbers.int_xor_impl, + 'II->I': numbers.int_xor_impl, + 'll->l': numbers.int_xor_impl, + 'LL->L': numbers.int_xor_impl, + 'qq->q': numbers.int_xor_impl, + 'QQ->Q': numbers.int_xor_impl, + } + + db[np.invert] = { + '?->?': numbers.int_invert_impl, + 'b->b': numbers.int_invert_impl, + 'B->B': numbers.int_invert_impl, + 'h->h': numbers.int_invert_impl, + 'H->H': numbers.int_invert_impl, + 'i->i': numbers.int_invert_impl, + 'I->I': numbers.int_invert_impl, + 'l->l': numbers.int_invert_impl, + 'L->L': numbers.int_invert_impl, + 'q->q': numbers.int_invert_impl, + 'Q->Q': numbers.int_invert_impl, + } + + db[np.left_shift] = { + 'bb->b': numbers.int_shl_impl, + 'BB->B': numbers.int_shl_impl, + 'hh->h': numbers.int_shl_impl, + 'HH->H': numbers.int_shl_impl, + 'ii->i': numbers.int_shl_impl, + 'II->I': numbers.int_shl_impl, + 'll->l': numbers.int_shl_impl, + 'LL->L': numbers.int_shl_impl, + 'qq->q': numbers.int_shl_impl, + 'QQ->Q': numbers.int_shl_impl, + } + + db[np.right_shift] = { + 'bb->b': numbers.int_shr_impl, + 'BB->B': numbers.int_shr_impl, + 'hh->h': numbers.int_shr_impl, + 'HH->H': numbers.int_shr_impl, + 'ii->i': numbers.int_shr_impl, + 'II->I': numbers.int_shr_impl, + 'll->l': numbers.int_shr_impl, + 'LL->L': numbers.int_shr_impl, + 'qq->q': numbers.int_shr_impl, + 'QQ->Q': numbers.int_shr_impl, + } + + db[np.log] = { + 'f->f': np_real_log_impl, + 'd->d': np_real_log_impl, + 'F->F': npyfuncs.np_complex_log_impl, + 'D->D': npyfuncs.np_complex_log_impl, + } + + db[np.log2] = { + 'f->f': np_real_log2_impl, + 'd->d': np_real_log2_impl, + 'F->F': npyfuncs.np_complex_log2_impl, + 'D->D': npyfuncs.np_complex_log2_impl, + } + + db[np.log10] = { + 'f->f': np_real_log10_impl, + 'd->d': np_real_log10_impl, + 'F->F': npyfuncs.np_complex_log10_impl, + 'D->D': npyfuncs.np_complex_log10_impl, + } + + return db diff --git a/lib/python3.10/site-packages/numba/cuda/vector_types.py b/lib/python3.10/site-packages/numba/cuda/vector_types.py new file mode 100644 index 0000000000000000000000000000000000000000..5174e2b20d389be8e0b7a58534808a56ac742c35 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/vector_types.py @@ -0,0 +1,209 @@ +# CUDA built-in Vector Types +# https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#built-in-vector-types + +from typing import List, Tuple, Dict + +from numba import types +from numba.core import cgutils +from numba.core.extending import make_attribute_wrapper, models, register_model +from numba.core.imputils import Registry as ImplRegistry +from numba.core.typing.templates import ConcreteTemplate +from numba.core.typing.templates import Registry as TypingRegistry +from numba.core.typing.templates import signature +from numba.cuda import stubs +from numba.cuda.errors import CudaLoweringError + +typing_registry = TypingRegistry() +impl_registry = ImplRegistry() + +register = typing_registry.register +register_attr = typing_registry.register_attr +register_global = typing_registry.register_global +lower = impl_registry.lower + + +class VectorType(types.Type): + def __init__(self, name, base_type, attr_names, user_facing_object): + self._base_type = base_type + self._attr_names = attr_names + self._user_facing_object = user_facing_object + super().__init__(name=name) + + @property + def base_type(self): + return self._base_type + + @property + def attr_names(self): + return self._attr_names + + @property + def num_elements(self): + return len(self._attr_names) + + @property + def user_facing_object(self): + return self._user_facing_object + + +def make_vector_type( + name: str, + base_type: types.Type, + attr_names: Tuple[str, ...], + user_facing_object +) -> types.Type: + """Create a vector type. + + Parameters + ---------- + name: str + The name of the type. + base_type: numba.types.Type + The primitive type for each element in the vector. + attr_names: tuple of str + Name for each attribute. + user_facing_object: object + The handle to be used in cuda kernel. + """ + + class _VectorType(VectorType): + """Internal instantiation of VectorType.""" + + pass + + class VectorTypeModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [(attr_name, base_type) for attr_name in attr_names] + super().__init__(dmm, fe_type, members) + + vector_type = _VectorType(name, base_type, attr_names, user_facing_object) + register_model(_VectorType)(VectorTypeModel) + for attr_name in attr_names: + make_attribute_wrapper(_VectorType, attr_name, attr_name) + + return vector_type + + +def enable_vector_type_ctor( + vector_type: VectorType, overloads: List[List[types.Type]] +): + """Create typing and lowering for vector type constructor. + + Parameters + ---------- + vector_type: VectorType + The type whose constructor to type and lower. + overloads: List of argument types + A list containing different overloads of the constructor. Each base type + in the argument list should either be primitive type or VectorType. + """ + ctor = vector_type.user_facing_object + + @register + class CtorTemplate(ConcreteTemplate): + key = ctor + cases = [signature(vector_type, *arglist) for arglist in overloads] + + register_global(ctor, types.Function(CtorTemplate)) + + # Lowering + + def make_lowering(fml_arg_list): + """Meta function to create a lowering for the constructor. Flattens + the arguments by converting vector_type into load instructions for each + of its attributes. Such as float2 -> float2.x, float2.y. + """ + + def lowering(context, builder, sig, actual_args): + # A list of elements to assign from + source_list = [] + # Convert the list of argument types to a list of load IRs. + for argidx, fml_arg in enumerate(fml_arg_list): + if isinstance(fml_arg, VectorType): + pxy = cgutils.create_struct_proxy(fml_arg)( + context, builder, actual_args[argidx] + ) + source_list += [ + getattr(pxy, attr) for attr in fml_arg.attr_names + ] + else: + # assumed primitive type + source_list.append(actual_args[argidx]) + + if len(source_list) != vector_type.num_elements: + raise CudaLoweringError( + f"Unmatched number of source elements ({len(source_list)}) " + "and target elements ({vector_type.num_elements})." + ) + + out = cgutils.create_struct_proxy(vector_type)(context, builder) + + for attr_name, source in zip(vector_type.attr_names, source_list): + setattr(out, attr_name, source) + return out._getvalue() + + return lowering + + for arglist in overloads: + lowering = make_lowering(arglist) + lower(ctor, *arglist)(lowering) + + +vector_types : Dict[str, VectorType] = {} + + +def build_constructor_overloads(base_type, vty_name, num_elements, arglists, l): + """ + For a given vector type, build a list of overloads for its constructor. + """ + + # TODO: speed up with memoization + if num_elements == 0: + arglists.append(l[:]) + + for i in range(1, num_elements + 1): + if i == 1: + # For 1-element component, it can construct with either a + # primitive type or other 1-element component. + l.append(base_type) + build_constructor_overloads( + base_type, vty_name, num_elements - i, arglists, l + ) + l.pop(-1) + + l.append(vector_types[f"{vty_name[:-1]}1"]) + build_constructor_overloads( + base_type, vty_name, num_elements - i, arglists, l + ) + l.pop(-1) + else: + l.append(vector_types[f"{vty_name[:-1]}{i}"]) + build_constructor_overloads( + base_type, vty_name, num_elements - i, arglists, l + ) + l.pop(-1) + + +def _initialize(): + """ + Construct the vector types, populate `vector_types` dictionary, and + enable the constructors. + """ + vector_type_attribute_names = ("x", "y", "z", "w") + for stub in stubs._vector_type_stubs: + type_name = stub.__name__ + base_type = getattr(types, type_name[:-2]) + num_elements = int(type_name[-1]) + attributes = vector_type_attribute_names[:num_elements] + vector_type = make_vector_type(type_name, base_type, attributes, stub) + vector_types[type_name] = vector_type + + for vty in vector_types.values(): + arglists, l = [], [] + build_constructor_overloads( + vty.base_type, vty.name, vty.num_elements, arglists, l + ) + enable_vector_type_ctor(vty, arglists) + + +_initialize() diff --git a/lib/python3.10/site-packages/numba/cuda/vectorizers.py b/lib/python3.10/site-packages/numba/cuda/vectorizers.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c6bcf5d451f05d2d822beaaa1d740e5e91d460 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/vectorizers.py @@ -0,0 +1,252 @@ +from numba import cuda +from numpy import array as np_array +from numba.cuda import deviceufunc +from numba.cuda.deviceufunc import (UFuncMechanism, GeneralizedUFunc, + GUFuncCallSteps) + + +class CUDAUFuncDispatcher(object): + """ + Invoke the CUDA ufunc specialization for the given inputs. + """ + + def __init__(self, types_to_retty_kernels, pyfunc): + self.functions = types_to_retty_kernels + self.__name__ = pyfunc.__name__ + + def __call__(self, *args, **kws): + """ + *args: numpy arrays or DeviceArrayBase (created by cuda.to_device). + Cannot mix the two types in one call. + + **kws: + stream -- cuda stream; when defined, asynchronous mode is used. + out -- output array. Can be a numpy array or DeviceArrayBase + depending on the input arguments. Type must match + the input arguments. + """ + return CUDAUFuncMechanism.call(self.functions, args, kws) + + def reduce(self, arg, stream=0): + assert len(list(self.functions.keys())[0]) == 2, "must be a binary " \ + "ufunc" + assert arg.ndim == 1, "must use 1d array" + + n = arg.shape[0] + gpu_mems = [] + + if n == 0: + raise TypeError("Reduction on an empty array.") + elif n == 1: # nothing to do + return arg[0] + + # always use a stream + stream = stream or cuda.stream() + with stream.auto_synchronize(): + # transfer memory to device if necessary + if cuda.cudadrv.devicearray.is_cuda_ndarray(arg): + mem = arg + else: + mem = cuda.to_device(arg, stream) + # do reduction + out = self.__reduce(mem, gpu_mems, stream) + # use a small buffer to store the result element + buf = np_array((1,), dtype=arg.dtype) + out.copy_to_host(buf, stream=stream) + + return buf[0] + + def __reduce(self, mem, gpu_mems, stream): + n = mem.shape[0] + if n % 2 != 0: # odd? + fatcut, thincut = mem.split(n - 1) + # prevent freeing during async mode + gpu_mems.append(fatcut) + gpu_mems.append(thincut) + # execute the kernel + out = self.__reduce(fatcut, gpu_mems, stream) + gpu_mems.append(out) + return self(out, thincut, out=out, stream=stream) + else: # even? + left, right = mem.split(n // 2) + # prevent freeing during async mode + gpu_mems.append(left) + gpu_mems.append(right) + # execute the kernel + self(left, right, out=left, stream=stream) + if n // 2 > 1: + return self.__reduce(left, gpu_mems, stream) + else: + return left + + +class _CUDAGUFuncCallSteps(GUFuncCallSteps): + __slots__ = [ + '_stream', + ] + + def __init__(self, nin, nout, args, kwargs): + super().__init__(nin, nout, args, kwargs) + self._stream = kwargs.get('stream', 0) + + def is_device_array(self, obj): + return cuda.is_cuda_array(obj) + + def as_device_array(self, obj): + # We don't want to call as_cuda_array on objects that are already Numba + # device arrays, because this results in exporting the array as a + # Producer then importing it as a Consumer, which causes a + # synchronization on the array's stream (if it has one) by default. + # When we have a Numba device array, we can simply return it. + if cuda.cudadrv.devicearray.is_cuda_ndarray(obj): + return obj + return cuda.as_cuda_array(obj) + + def to_device(self, hostary): + return cuda.to_device(hostary, stream=self._stream) + + def to_host(self, devary, hostary): + out = devary.copy_to_host(hostary, stream=self._stream) + return out + + def allocate_device_array(self, shape, dtype): + return cuda.device_array(shape=shape, dtype=dtype, stream=self._stream) + + def launch_kernel(self, kernel, nelem, args): + kernel.forall(nelem, stream=self._stream)(*args) + + +class CUDAGeneralizedUFunc(GeneralizedUFunc): + def __init__(self, kernelmap, engine, pyfunc): + self.__name__ = pyfunc.__name__ + super().__init__(kernelmap, engine) + + @property + def _call_steps(self): + return _CUDAGUFuncCallSteps + + def _broadcast_scalar_input(self, ary, shape): + return cuda.cudadrv.devicearray.DeviceNDArray(shape=shape, + strides=(0,), + dtype=ary.dtype, + gpu_data=ary.gpu_data) + + def _broadcast_add_axis(self, ary, newshape): + newax = len(newshape) - len(ary.shape) + # Add 0 strides for missing dimension + newstrides = (0,) * newax + ary.strides + return cuda.cudadrv.devicearray.DeviceNDArray(shape=newshape, + strides=newstrides, + dtype=ary.dtype, + gpu_data=ary.gpu_data) + + +class CUDAUFuncMechanism(UFuncMechanism): + """ + Provide CUDA specialization + """ + DEFAULT_STREAM = 0 + + def launch(self, func, count, stream, args): + func.forall(count, stream=stream)(*args) + + def is_device_array(self, obj): + return cuda.is_cuda_array(obj) + + def as_device_array(self, obj): + # We don't want to call as_cuda_array on objects that are already Numba + # device arrays, because this results in exporting the array as a + # Producer then importing it as a Consumer, which causes a + # synchronization on the array's stream (if it has one) by default. + # When we have a Numba device array, we can simply return it. + if cuda.cudadrv.devicearray.is_cuda_ndarray(obj): + return obj + return cuda.as_cuda_array(obj) + + def to_device(self, hostary, stream): + return cuda.to_device(hostary, stream=stream) + + def to_host(self, devary, stream): + return devary.copy_to_host(stream=stream) + + def allocate_device_array(self, shape, dtype, stream): + return cuda.device_array(shape=shape, dtype=dtype, stream=stream) + + def broadcast_device(self, ary, shape): + ax_differs = [ax for ax in range(len(shape)) + if ax >= ary.ndim + or ary.shape[ax] != shape[ax]] + + missingdim = len(shape) - len(ary.shape) + strides = [0] * missingdim + list(ary.strides) + + for ax in ax_differs: + strides[ax] = 0 + + return cuda.cudadrv.devicearray.DeviceNDArray(shape=shape, + strides=strides, + dtype=ary.dtype, + gpu_data=ary.gpu_data) + + +vectorizer_stager_source = ''' +def __vectorized_{name}({args}, __out__): + __tid__ = __cuda__.grid(1) + if __tid__ < __out__.shape[0]: + __out__[__tid__] = __core__({argitems}) +''' + + +class CUDAVectorize(deviceufunc.DeviceVectorize): + def _compile_core(self, sig): + cudevfn = cuda.jit(sig, device=True, inline=True)(self.pyfunc) + return cudevfn, cudevfn.overloads[sig.args].signature.return_type + + def _get_globals(self, corefn): + glbl = self.pyfunc.__globals__.copy() + glbl.update({'__cuda__': cuda, + '__core__': corefn}) + return glbl + + def _compile_kernel(self, fnobj, sig): + return cuda.jit(fnobj) + + def build_ufunc(self): + return CUDAUFuncDispatcher(self.kernelmap, self.pyfunc) + + @property + def _kernel_template(self): + return vectorizer_stager_source + + +# ------------------------------------------------------------------------------ +# Generalized CUDA ufuncs + +_gufunc_stager_source = ''' +def __gufunc_{name}({args}): + __tid__ = __cuda__.grid(1) + if __tid__ < {checkedarg}: + __core__({argitems}) +''' + + +class CUDAGUFuncVectorize(deviceufunc.DeviceGUFuncVectorize): + def build_ufunc(self): + engine = deviceufunc.GUFuncEngine(self.inputsig, self.outputsig) + return CUDAGeneralizedUFunc(kernelmap=self.kernelmap, + engine=engine, + pyfunc=self.pyfunc) + + def _compile_kernel(self, fnobj, sig): + return cuda.jit(sig)(fnobj) + + @property + def _kernel_template(self): + return _gufunc_stager_source + + def _get_globals(self, sig): + corefn = cuda.jit(sig, device=True)(self.pyfunc) + glbls = self.py_func.__globals__.copy() + glbls.update({'__cuda__': cuda, + '__core__': corefn}) + return glbls diff --git a/lib/python3.10/site-packages/numba/tests/gdb/__init__.py b/lib/python3.10/site-packages/numba/tests/gdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2958441fe7a098eeb66fe183b62bd4ff0edbbe2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/__init__.py @@ -0,0 +1,10 @@ +from os.path import dirname +import unittest +from unittest.suite import TestSuite + +from numba.testing import load_testsuite + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + return suite diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py b/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py new file mode 100644 index 0000000000000000000000000000000000000000..cb48dae2c536a7b1e2568244bf2058ce934238f0 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py @@ -0,0 +1,51 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +import numpy as np +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + @njit(debug=True) + def foo(x): + z = np.ones_like(x) # break here + return x, z + + tmp = np.ones(5) + foo(tmp) + + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=15) + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(2) + llvm_intp = f"i{types.intp.bitwidth}" + expect = ( + '[frame={level="0",args=[{name="x",type="array(float64, 1d, C) ' + f'({{i8*, i8*, {llvm_intp}, {llvm_intp}, double*, ' + f'[1 x {llvm_intp}], [1 x {llvm_intp}]}})"}}]}}]' + ) + driver.assert_output(expect) + driver.stack_list_variables(1) + # 'z' should be zero-init + expect = ('{name="z",value="{meminfo = 0x0, parent = 0x0, nitems = 0, ' + 'itemsize = 0, data = 0x0, shape = {0}, strides = {0}}"}') + driver.assert_output(expect) + driver.set_breakpoint(line=16) + driver.cont() + driver.check_hit_breakpoint(2) + driver.stack_list_variables(1) + # 'z' should be populated + expect = (r'^.*\{name="z",value="\{meminfo = 0x[0-9a-f]+ .*, ' + r'parent = 0x0, nitems = 5, itemsize = 8, ' + r'data = 0x[0-9a-f]+, shape = \{5\}, strides = \{8\}\}.*$') + driver.assert_regex_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py b/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..2a28d6f7468221098ac56e41cb221f704b27a0be --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py @@ -0,0 +1,39 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + @njit(debug=True) + def foo(x): + z = 7 + x # break here + return x, z + + foo(120) + + sz = types.intp.bitwidth + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=14) + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="int%s",' + 'value="120"}]}]' % sz) + driver.assert_output(expect) + driver.stack_list_variables(1) + expect = '[{name="x",arg="1",value="120"},{name="z",value="0"}]' + driver.assert_output(expect) + driver.next() + driver.stack_list_variables(1) + expect = '[{name="x",arg="1",value="120"},{name="z",value="127"}]' + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py b/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py new file mode 100644 index 0000000000000000000000000000000000000000..7743aafc3bebc87db5257452c34ada5b1cf5b682 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py @@ -0,0 +1,34 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@njit(debug=True) +def foo(x): + z = 7 + x + return x, z + + +@needs_subprocess +class Test(TestCase): + + def test(self): + foo(120) + sz = types.intp.bitwidth + driver = GdbMIDriver(__file__) + driver.set_breakpoint(symbol="__main__::foo") + driver.run() # will hit cpython symbol match + driver.check_hit_breakpoint(number=1) + driver.cont() # will hit njit symbol match + driver.check_hit_breakpoint(number=1, line=10) # Ensure line number + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="int%s",' + 'value="120"}]}]' % sz) + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py b/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py new file mode 100644 index 0000000000000000000000000000000000000000..72f09b19ab11f11b3d6bfb68cefec554a6e9f29e --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py @@ -0,0 +1,65 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +def foo_factory(n): + @njit(debug=True) + def foo(x): + z = 7 + n + return x, z + + return foo + + +foo1, foo2, foo3 = [foo_factory(x) for x in range(3)] + + +@njit(debug=True) +def call_foo(): + a = foo1(10) + b = foo2(20) + c = foo3(30) + return a, b, c + + +@needs_subprocess +class Test(TestCase): + + def test(self): + call_foo() + driver = GdbMIDriver(__file__) + # A specific foo, the first one, it has uid=2 + vsym = "__main__::foo_factory::_3clocals_3e::foo[abi:v2]" + driver.set_breakpoint(symbol=vsym) + driver.run() + driver.check_hit_breakpoint(number=1) + driver.assert_regex_output(r'^.*foo\[abi:v2\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](10)",' + 'value="10"}]}]') + driver.assert_output(expect) + # Now break on any foo + driver.set_breakpoint(symbol="foo") + driver.cont() + driver.check_hit_breakpoint(number=2) + driver.assert_regex_output(r'^.*foo\[abi:v3\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](20)",' + 'value="20"}]}]') + driver.assert_output(expect) + # and again, hit the third foo + driver.cont() + driver.check_hit_breakpoint(number=2) + driver.assert_regex_output(r'^.*foo\[abi:v4\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](30)",' + 'value="30"}]}]') + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py b/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..867a53eb16b82cb0fbd5e03a8e833afd3640bfcf --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py @@ -0,0 +1,45 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + + @njit(debug=True) + def foo(x, y): + c = x + y # break-here + return c + + @njit(debug=True) + def call_foo(a): + acc = 0 + for i in range(10): + acc += foo(i, a) + return acc + + call_foo(10) + + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=15, condition='x == 4') + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(1) + expect = ('[frame={level="0",args=[{name="x",value="4"},' + '{name="y",value="10"}]}]') + driver.assert_output(expect) + driver.set_breakpoint(line=22, condition='i == 8') + driver.cont() + driver.check_hit_breakpoint(2) + driver.stack_list_variables(1) + # i should be 8 + driver.assert_output('{name="i",value="8"}') + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py b/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py new file mode 100644 index 0000000000000000000000000000000000000000..b0be5dbe8e4fcb78e2c1edafc679a4ebe3b1bbee --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py @@ -0,0 +1,69 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +import numpy as np +from numba.tests.gdb_support import GdbMIDriver, needs_gdb_py3 +from numba.tests.support import TestCase, needs_subprocess +from numba.misc.numba_gdbinfo import collect_gdbinfo +import unittest +import re + + +@needs_gdb_py3 +@needs_subprocess +class Test(TestCase): + + def test(self): + rdt_a = np.dtype([("x", np.int16), ("y", np.float64)], align=True) + + @njit(debug=True) + def foo(): + a = 1.234 + b = (1, 2, 3) + c = ('a', b, 4) + d = np.arange(5.) + e = np.array([[1, 3j], [2, 4j]]) + f = "Some string" + " L-Padded string".lstrip() + g = 11 + 22j + h = np.arange(24).reshape((4, 6))[::2, ::3] + i = np.zeros(2, dtype=rdt_a) + return a, b, c, d, e, f, g, h, i + + foo() + + extension = collect_gdbinfo().extension_loc + driver = GdbMIDriver(__file__, init_cmds=['-x', extension], debug=False) + driver.set_breakpoint(line=29) + driver.run() + driver.check_hit_breakpoint(1) + + # Ideally the function would be run to get the string repr of locals + # but not everything appears in DWARF e.g. string literals. Further, + # str on NumPy arrays seems to vary a bit in output. Therefore a custom + # match is used. + + driver.stack_list_variables(1) + output = driver._captured.after.decode('UTF-8') + done_str = output.splitlines()[0] + pat = r'^\^done,variables=\[\{(.*)\}\]$' + lcls_strs = re.match(pat, done_str).groups()[0].split('},{') + lcls = {k: v for k, v in [re.match(r'name="(.*)",value="(.*)"', + x).groups() for x in lcls_strs]} + expected = dict() + expected['a'] = r'1\.234' + expected['b'] = r'\(1, 2, 3\)' + expected['c'] = r'\(0x0, \(1, 2, 3\), 4\)' + expected['d'] = r'\\n\[0. 1. 2. 3. 4.\]' + expected['e'] = r'\\n\[\[1.\+0.j 0.\+3.j\]\\n \[2.\+0.j 0.\+4.j\]\]' + expected['f'] = "'Some stringL-Padded string'" + expected['g'] = r"11\+22j" + expected['h'] = r'\\n\[\[ 0 3\]\\n \[12 15\]\]' + expected['i'] = r'\\n\[\(0, 0.\) \(0, 0.\)\]' + + for k, v in expected.items(): + self.assertRegex(lcls[k], v) + + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py b/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2958441fe7a098eeb66fe183b62bd4ff0edbbe2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py @@ -0,0 +1,10 @@ +from os.path import dirname +import unittest +from unittest.suite import TestSuite + +from numba.testing import load_testsuite + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + return suite diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py b/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..4d250756d404b1e87115e4a191768925e5c2bc21 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py @@ -0,0 +1,76 @@ +import numba as nb + + +# +# UFunc +# + +def direct_ufunc_cache_usecase(**kwargs): + @nb.vectorize(["intp(intp)", "float64(float64)"], cache=True, **kwargs) + def ufunc(inp): + return inp * 2 + + return ufunc + + +def indirect_ufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def indirect_ufunc_core(inp): + return inp * 3 + + @nb.vectorize(["intp(intp)", "float64(float64)", "complex64(complex64)"], + **kwargs) + def ufunc(inp): + return indirect_ufunc_core(inp) + + return ufunc + + +# +# DUFunc +# + +def direct_dufunc_cache_usecase(**kwargs): + @nb.vectorize(cache=True, **kwargs) + def ufunc(inp): + return inp * 2 + + return ufunc + + +def indirect_dufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def indirect_ufunc_core(inp): + return inp * 3 + + @nb.vectorize(**kwargs) + def ufunc(inp): + return indirect_ufunc_core(inp) + + return ufunc + + +# +# GUFunc +# + +def direct_gufunc_cache_usecase(**kwargs): + @nb.guvectorize(["(intp, intp[:])", "(float64, float64[:])"], + "()->()", cache=True, **kwargs) + def gufunc(inp, out): + out[0] = inp * 2 + + return gufunc + + +def indirect_gufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def core(x): + return x * 3 + + @nb.guvectorize(["(intp, intp[:])", "(float64, float64[:])", + "(complex64, complex64[:])"], "()->()", **kwargs) + def gufunc(inp, out): + out[0] = core(inp) + + return gufunc diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb47f113211721bdeea9ea2327f811833706e9b --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py @@ -0,0 +1,228 @@ +import sys +import os.path +import re +import subprocess + +import numpy as np + +from numba.tests.support import capture_cache_log +from numba.tests.test_caching import BaseCacheTest +from numba.core import config +import unittest + + +class UfuncCacheTest(BaseCacheTest): + """ + Since the cache stats is not exposed by ufunc, we test by looking at the + cache debug log. + """ + _numba_parallel_test_ = False + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cache_usecases.py") + modname = "ufunc_caching_test_fodder" + + regex_data_saved = re.compile(r'\[cache\] data saved to') + regex_index_saved = re.compile(r'\[cache\] index saved to') + + regex_data_loaded = re.compile(r'\[cache\] data loaded from') + regex_index_loaded = re.compile(r'\[cache\] index loaded from') + + def check_cache_saved(self, cachelog, count): + """ + Check number of cache-save were issued + """ + data_saved = self.regex_data_saved.findall(cachelog) + index_saved = self.regex_index_saved.findall(cachelog) + self.assertEqual(len(data_saved), count) + self.assertEqual(len(index_saved), count) + + def check_cache_loaded(self, cachelog, count): + """ + Check number of cache-load were issued + """ + data_loaded = self.regex_data_loaded.findall(cachelog) + index_loaded = self.regex_index_loaded.findall(cachelog) + self.assertEqual(len(data_loaded), count) + self.assertEqual(len(index_loaded), count) + + def check_ufunc_cache(self, usecase_name, n_overloads, **kwargs): + """ + Check number of cache load/save. + There should be one per overloaded version. + """ + mod = self.import_module() + usecase = getattr(mod, usecase_name) + # New cache entry saved + with capture_cache_log() as out: + new_ufunc = usecase(**kwargs) + cachelog = out.getvalue() + self.check_cache_saved(cachelog, count=n_overloads) + + # Use cached version + with capture_cache_log() as out: + cached_ufunc = usecase(**kwargs) + cachelog = out.getvalue() + self.check_cache_loaded(cachelog, count=n_overloads) + + return new_ufunc, cached_ufunc + + +class TestUfuncCacheTest(UfuncCacheTest): + + def test_direct_ufunc_cache(self, **kwargs): + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "direct_ufunc_cache_usecase", n_overloads=2, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_direct_ufunc_cache_objmode(self): + self.test_direct_ufunc_cache(forceobj=True) + + def test_direct_ufunc_cache_parallel(self): + self.test_direct_ufunc_cache(target='parallel') + + def test_indirect_ufunc_cache(self, **kwargs): + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "indirect_ufunc_cache_usecase", n_overloads=3, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_indirect_ufunc_cache_parallel(self): + self.test_indirect_ufunc_cache(target='parallel') + + +class TestDUfuncCacheTest(UfuncCacheTest): + # Note: DUFunc doesn't support parallel target yet + + def check_dufunc_usecase(self, usecase_name): + mod = self.import_module() + usecase = getattr(mod, usecase_name) + # Create dufunc + with capture_cache_log() as out: + ufunc = usecase() + self.check_cache_saved(out.getvalue(), count=0) + # Compile & cache + with capture_cache_log() as out: + ufunc(np.arange(10)) + self.check_cache_saved(out.getvalue(), count=1) + self.check_cache_loaded(out.getvalue(), count=0) + # Use cached + with capture_cache_log() as out: + ufunc = usecase() + ufunc(np.arange(10)) + self.check_cache_loaded(out.getvalue(), count=1) + + def test_direct_dufunc_cache(self): + # We don't test for objmode because DUfunc don't support it. + self.check_dufunc_usecase('direct_dufunc_cache_usecase') + + def test_indirect_dufunc_cache(self): + self.check_dufunc_usecase('indirect_dufunc_cache_usecase') + + +def _fix_raw_path(rstr): + if config.IS_WIN32: + rstr = rstr.replace(r'/', r'\\\\') + return rstr + + +class TestGUfuncCacheTest(UfuncCacheTest): + + def test_filename_prefix(self): + mod = self.import_module() + usecase = getattr(mod, "direct_gufunc_cache_usecase") + with capture_cache_log() as out: + usecase() + cachelog = out.getvalue() + # find number filename with "guf-" prefix + fmt1 = _fix_raw_path(r'/__pycache__/guf-{}') + prefixed = re.findall(fmt1.format(self.modname), cachelog) + fmt2 = _fix_raw_path(r'/__pycache__/{}') + normal = re.findall(fmt2.format(self.modname), cachelog) + # expecting 2 overloads + self.assertGreater(len(normal), 2) + # expecting equal number of wrappers and overloads cache entries + self.assertEqual(len(normal), len(prefixed)) + + def test_direct_gufunc_cache(self, **kwargs): + # 2 cache entry for the 2 overloads + # and 2 cache entry for the gufunc wrapper + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "direct_gufunc_cache_usecase", n_overloads=2 + 2, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_direct_gufunc_cache_objmode(self): + self.test_direct_gufunc_cache(forceobj=True) + + def test_direct_gufunc_cache_parallel(self): + self.test_direct_gufunc_cache(target='parallel') + + def test_indirect_gufunc_cache(self, **kwargs): + # 3 cache entry for the 3 overloads + # and no cache entry for the gufunc wrapper + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "indirect_gufunc_cache_usecase", n_overloads=3, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_indirect_gufunc_cache_parallel(self, **kwargs): + self.test_indirect_gufunc_cache(target='parallel') + + +class TestCacheSpecificIssue(UfuncCacheTest): + + def run_in_separate_process(self, runcode): + # Based on the same name util function in test_dispatcher but modified + # to allow user to define what to run. + code = """if 1: + import sys + + sys.path.insert(0, %(tempdir)r) + mod = __import__(%(modname)r) + mod.%(runcode)s + """ % dict(tempdir=self.tempdir, modname=self.modname, + runcode=runcode) + + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError("process failed with code %s: stderr follows" + "\n%s\n" % (popen.returncode, err.decode())) + + # + # The following test issue #2198 that loading cached (g)ufunc first + # bypasses some target context initialization. + # + + def test_first_load_cached_ufunc(self): + # ensure function is cached + self.run_in_separate_process('direct_ufunc_cache_usecase()') + # use the cached function + # this will fail if the target context is not init'ed + self.run_in_separate_process('direct_ufunc_cache_usecase()') + + def test_first_load_cached_gufunc(self): + # ensure function is cached + self.run_in_separate_process('direct_gufunc_cache_usecase()') + # use the cached function + # this will fail out if the target context is not init'ed + self.run_in_separate_process('direct_gufunc_cache_usecase()') + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb3343ffb17bbbf5d3146a272813ef6a3cd07f9 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py @@ -0,0 +1,849 @@ +import unittest +import pickle + +import numpy as np + +from numba import void, float32, float64, int32, int64, jit, guvectorize +from numba.core.errors import TypingError +from numba.np.ufunc import GUVectorize +from numba.tests.support import tag, TestCase + + +def matmulcore(A, B, C): + """docstring for matmulcore""" + m, n = A.shape + n, p = B.shape + for i in range(m): + for j in range(p): + C[i, j] = 0 + for k in range(n): + C[i, j] += A[i, k] * B[k, j] + + +def axpy(a, x, y, out): + out[0] = a * x + y + + +class TestGUFunc(TestCase): + target = 'cpu' + + def check_matmul_gufunc(self, gufunc): + matrix_ct = 1001 + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + + C = gufunc(A, B) + Gold = np.matmul(A, B) + + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + def test_gufunc(self): + gufunc = GUVectorize(matmulcore, '(m,n),(n,p)->(m,p)', + target=self.target) + gufunc.add((float32[:, :], float32[:, :], float32[:, :])) + gufunc = gufunc.build_ufunc() + + self.check_matmul_gufunc(gufunc) + + def test_guvectorize_decor(self): + gufunc = guvectorize([void(float32[:,:], float32[:,:], float32[:,:])], + '(m,n),(n,p)->(m,p)', + target=self.target)(matmulcore) + + self.check_matmul_gufunc(gufunc) + + def test_ufunc_like(self): + # Test problem that the stride of "scalar" gufunc argument not properly + # handled when the actual argument is an array, + # causing the same value (first value) being repeated. + gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target) + gufunc.add('(intp, intp, intp, intp[:])') + gufunc = gufunc.build_ufunc() + + x = np.arange(10, dtype=np.intp) + out = gufunc(x, x, x) + + np.testing.assert_equal(out, x * x + x) + + def test_axis(self): + # issue https://github.com/numba/numba/issues/6773 + @guvectorize(["f8[:],f8[:]"], "(n)->(n)") + def my_cumsum(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + x = np.ones((20, 30)) + # Check regular call + y = my_cumsum(x, axis=0) + expected = np.cumsum(x, axis=0) + np.testing.assert_equal(y, expected) + # Check "out" kw + out_kw = np.zeros_like(y) + my_cumsum(x, out=out_kw, axis=0) + np.testing.assert_equal(out_kw, expected) + + def test_docstring(self): + @guvectorize([(int64[:], int64, int64[:])], '(n),()->(n)') + def gufunc(x, y, res): + "docstring for gufunc" + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.assertEqual("numba.tests.npyufunc.test_gufunc", gufunc.__module__) + self.assertEqual("gufunc", gufunc.__name__) + self.assertEqual("TestGUFunc.test_docstring..gufunc", gufunc.__qualname__) + self.assertEqual("docstring for gufunc", gufunc.__doc__) + + +class TestMultipleOutputs(TestCase): + target = 'cpu' + + def test_multiple_outputs_same_type_passed_in(self): + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] + + A = np.arange(10, dtype=np.float32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A) + copy(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A, C) + + def test_multiple_outputs_distinct_values(self): + + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy_and_double(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] * 2 + + A = np.arange(10, dtype=np.float32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A) + copy_and_double(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A * 2, C) + + def test_multiple_output_dtypes(self): + + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy_and_multiply(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] * 1.5 + + A = np.arange(10, dtype=np.int32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A, dtype=np.float64) + copy_and_multiply(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A * np.float64(1.5), C) + + def test_incorrect_number_of_pos_args(self): + @guvectorize('(m),(m)->(m),(m)', target=self.target) + def f(x, y, z, w): + pass + + arr = np.arange(5, dtype=np.int32) + + # Inputs only, too few + msg = "Too few arguments for function 'f'" + with self.assertRaises(TypeError) as te: + f(arr) + self.assertIn(msg, str(te.exception)) + + # Inputs and outputs, too many + with self.assertRaises(TypeError) as te: + f(arr, arr, arr, arr, arr) + self.assertIn(msg, str(te.exception)) + + +class TestGUFuncParallel(TestGUFunc): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestDynamicGUFunc(TestCase): + target = 'cpu' + + def test_dynamic_matmul(self): + + def check_matmul_gufunc(gufunc, A, B, C): + Gold = np.matmul(A, B) + gufunc(A, B, C) + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + gufunc = GUVectorize(matmulcore, '(m,n),(n,p)->(m,p)', + target=self.target, is_dynamic=True) + matrix_ct = 10 + Ai64 = np.arange(matrix_ct * 2 * 4, dtype=np.int64).reshape(matrix_ct, 2, 4) + Bi64 = np.arange(matrix_ct * 4 * 5, dtype=np.int64).reshape(matrix_ct, 4, 5) + Ci64 = np.arange(matrix_ct * 2 * 5, dtype=np.int64).reshape(matrix_ct, 2, 5) + check_matmul_gufunc(gufunc, Ai64, Bi64, Ci64) + + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + C = np.arange(matrix_ct * 2 * 5, dtype=np.float32).reshape(matrix_ct, 2, 5) + check_matmul_gufunc(gufunc, A, B, C) # trigger compilation + + self.assertEqual(len(gufunc.types), 2) # ensure two versions of gufunc + + + def test_dynamic_ufunc_like(self): + + def check_ufunc_output(gufunc, x): + out = np.zeros(10, dtype=x.dtype) + out_kw = np.zeros(10, dtype=x.dtype) + gufunc(x, x, x, out) + gufunc(x, x, x, out=out_kw) + golden = x * x + x + np.testing.assert_equal(out, golden) + np.testing.assert_equal(out_kw, golden) + + # Test problem that the stride of "scalar" gufunc argument not properly + # handled when the actual argument is an array, + # causing the same value (first value) being repeated. + gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target, + is_dynamic=True) + x = np.arange(10, dtype=np.intp) + check_ufunc_output(gufunc, x) + + + def test_dynamic_scalar_output(self): + """ + Note that scalar output is a 0-dimension array that acts as + a pointer to the output location. + """ + + @guvectorize('(n)->()', target=self.target, nopython=True) + def sum_row(inp, out): + tmp = 0. + for i in range(inp.shape[0]): + tmp += inp[i] + out[()] = tmp + + # inp is (10000, 3) + # out is (10000) + # The outer (leftmost) dimension must match or numpy broadcasting is performed. + + self.assertTrue(sum_row.is_dynamic) + inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) + out = np.zeros(10000, dtype=np.int32) + sum_row(inp, out) + + # verify result + for i in range(inp.shape[0]): + self.assertEqual(out[i], inp[i].sum()) + + msg = "Too few arguments for function 'sum_row'." + with self.assertRaisesRegex(TypeError, msg): + sum_row(inp) + + def test_axis(self): + # issue https://github.com/numba/numba/issues/6773 + @guvectorize("(n)->(n)") + def my_cumsum(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + x = np.ones((20, 30)) + expected = np.cumsum(x, axis=0) + # Check regular call + y = np.zeros_like(expected) + my_cumsum(x, y, axis=0) + np.testing.assert_equal(y, expected) + # Check "out" kw + out_kw = np.zeros_like(y) + my_cumsum(x, out=out_kw, axis=0) + np.testing.assert_equal(out_kw, expected) + + def test_gufunc_attributes(self): + @guvectorize("(n)->(n)") + def gufunc(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + # ensure gufunc exports attributes + attrs = ['signature', 'accumulate', 'at', 'outer', 'reduce', 'reduceat'] + for attr in attrs: + contains = hasattr(gufunc, attr) + self.assertTrue(contains, 'dynamic gufunc not exporting "%s"' % (attr,)) + + a = np.array([1, 2, 3, 4]) + res = np.array([0, 0, 0, 0]) + gufunc(a, res) # trigger compilation + self.assertPreciseEqual(res, np.array([1, 3, 6, 10])) + + # other attributes are not callable from a gufunc with signature + # see: https://github.com/numba/numba/issues/2794 + # note: this is a limitation in NumPy source code! + self.assertEqual(gufunc.signature, "(n)->(n)") + + with self.assertRaises(RuntimeError) as raises: + gufunc.accumulate(a) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(RuntimeError) as raises: + gufunc.reduce(a) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(RuntimeError) as raises: + gufunc.reduceat(a, [0, 2]) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(TypeError) as raises: + gufunc.outer(a, a) + self.assertEqual(str(raises.exception), "method outer is not allowed in ufunc with non-trivial signature") + + def test_gufunc_attributes2(self): + @guvectorize('(),()->()') + def add(x, y, res): + res[0] = x + y + + # add signature "(),() -> ()" is evaluated to None + self.assertIsNone(add.signature) + + a = np.array([1, 2, 3, 4]) + b = np.array([4, 3, 2, 1]) + res = np.array([0, 0, 0, 0]) + add(a, b, res) # trigger compilation + self.assertPreciseEqual(res, np.array([5, 5, 5, 5])) + + # now test other attributes + self.assertIsNone(add.signature) + self.assertEqual(add.reduce(a), 10) + self.assertPreciseEqual(add.accumulate(a), np.array([1, 3, 6, 10])) + self.assertPreciseEqual(add.outer([0, 1], [1, 2]), np.array([[1, 2], [2, 3]])) + self.assertPreciseEqual(add.reduceat(a, [0, 2]), np.array([3, 7])) + + x = np.array([1, 2, 3, 4]) + y = np.array([1, 2]) + add.at(x, [0, 1], y) + self.assertPreciseEqual(x, np.array([2, 4, 3, 4])) + + +class TestGUVectorizeScalar(TestCase): + """ + Nothing keeps user from out-of-bound memory access + """ + target = 'cpu' + + def test_scalar_output(self): + """ + Note that scalar output is a 0-dimension array that acts as + a pointer to the output location. + """ + + @guvectorize(['void(int32[:], int32[:])'], '(n)->()', + target=self.target, nopython=True) + def sum_row(inp, out): + tmp = 0. + for i in range(inp.shape[0]): + tmp += inp[i] + out[()] = tmp + + # inp is (10000, 3) + # out is (10000) + # The outer (leftmost) dimension must match or numpy broadcasting is performed. + + inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) + out = sum_row(inp) + + # verify result + for i in range(inp.shape[0]): + self.assertEqual(out[i], inp[i].sum()) + + def test_scalar_input(self): + + @guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)', + target=self.target, nopython=True) + def foo(inp, n, out): + for i in range(inp.shape[0]): + out[i] = inp[i] * n[0] + + inp = np.arange(3 * 10, dtype=np.int32).reshape(10, 3) + # out = np.empty_like(inp) + out = foo(inp, 2) + + # verify result + self.assertPreciseEqual(inp * 2, out) + + def test_scalar_input_core_type(self): + def pyfunc(inp, n, out): + for i in range(inp.size): + out[i] = n * (inp[i] + 1) + + my_gufunc = guvectorize(['int32[:], int32, int32[:]'], + '(n),()->(n)', + target=self.target)(pyfunc) + + # test single core loop execution + arr = np.arange(10).astype(np.int32) + got = my_gufunc(arr, 2) + + expected = np.zeros_like(got) + pyfunc(arr, 2, expected) + + np.testing.assert_equal(got, expected) + + # test multiple core loop execution + arr = np.arange(20).astype(np.int32).reshape(10, 2) + got = my_gufunc(arr, 2) + + expected = np.zeros_like(got) + for ax in range(expected.shape[0]): + pyfunc(arr[ax], 2, expected[ax]) + + np.testing.assert_equal(got, expected) + + def test_scalar_input_core_type_error(self): + with self.assertRaises(TypeError) as raises: + @guvectorize(['int32[:], int32, int32[:]'], '(n),(n)->(n)', + target=self.target) + def pyfunc(a, b, c): + pass + self.assertEqual("scalar type int32 given for non scalar argument #2", + str(raises.exception)) + + def test_ndim_mismatch(self): + with self.assertRaises(TypeError) as raises: + @guvectorize(['int32[:], int32[:]'], '(m,n)->(n)', + target=self.target) + def pyfunc(a, b): + pass + self.assertEqual("type and shape signature mismatch for arg #1", + str(raises.exception)) + + +class TestGUVectorizeScalarParallel(TestGUVectorizeScalar): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestGUVectorizePickling(TestCase): + def test_pickle_gufunc_non_dyanmic(self): + """Non-dynamic gufunc. + """ + @guvectorize(["f8,f8[:]"], "()->()") + def double(x, out): + out[:] = x * 2 + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertTrue(cloned._frozen) + + cloned.disable_compile() + self.assertTrue(cloned._frozen) + + # scalar version + self.assertPreciseEqual(double(0.5), cloned(0.5)) + # array version + arr = np.arange(10) + self.assertPreciseEqual(double(arr), cloned(arr)) + + def test_pickle_gufunc_dyanmic_null_init(self): + """Dynamic gufunc w/o prepopulating before pickling. + """ + @guvectorize("()->()", identity=1) + def double(x, out): + out[:] = x * 2 + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertFalse(cloned._frozen) + + # scalar version + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + cloned(0.5, out=got) + self.assertPreciseEqual(expect, got) + # array version + arr = np.arange(10) + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + cloned(arr, out=got) + self.assertPreciseEqual(expect, got) + + def test_pickle_gufunc_dynamic_initialized(self): + """Dynamic gufunc prepopulated before pickling. + + Once unpickled, we disable compilation to verify that the gufunc + compilation state is carried over. + """ + @guvectorize("()->()", identity=1) + def double(x, out): + out[:] = x * 2 + + # prepopulate scalar + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + # prepopulate array + arr = np.arange(10) + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertFalse(cloned._frozen) + + # disable compilation + cloned.disable_compile() + self.assertTrue(cloned._frozen) + # scalar version + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + cloned(0.5, out=got) + self.assertPreciseEqual(expect, got) + # array version + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + cloned(arr, out=got) + self.assertPreciseEqual(expect, got) + + +class TestGUVectorizeJit(TestCase): + target = 'cpu' + + def check_add_gufunc(self, gufunc): + @jit(nopython=True) + def jit_add(x, y, res): + gufunc(x, y, res) + + x = np.arange(40, dtype='i8').reshape(4, 2, 5) + y = np.int32(100) + res = np.zeros_like(x) + jit_add(x, y, res) + self.assertPreciseEqual(res, x + y) + + def test_add_static(self): + @guvectorize('int64[:], int64, int64[:]', '(n),()->(n)', + target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def test_add_static_cast_args(self): + # cast the second argument from i32 -> i64 + @guvectorize('int64[:], int64, int64[:]', '(n),()->(n)', + target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def test_add_dynamic(self): + @guvectorize('(n),()->(n)', target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + @unittest.expectedFailure + def test_object_mode(self): + @guvectorize('(n),()->(n)', target=self.target, forceobj=True) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def check_matmul(self, jit_func): + matrix_ct = 1001 + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + C = np.arange(matrix_ct * 2 * 5, dtype=np.float32).reshape(matrix_ct, 2, 5) + + jit_func(A, B, C) + Gold = np.matmul(A, B) + + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + def test_njit_matmul_call(self): + + gufunc = guvectorize('(m,n),(n,p)->(m,p)', + target=self.target)(matmulcore) + + @jit(nopython=True) + def matmul_jit(A, B, C): + return gufunc(A, B, C) + + self.check_matmul(matmul_jit) + + def test_axpy(self): + gufunc = GUVectorize(axpy, '(),(),() -> ()', target=self.target, + is_dynamic=True) + + @jit(nopython=True) + def axpy_jit(a, x, y, out): + gufunc(a, x, y, out) + + x = np.arange(10, dtype=np.intp) + out = np.zeros_like(x) + axpy_jit(x, x, x, out) + self.assertPreciseEqual(out, x * x + x) + + def test_output_scalar(self): + + @guvectorize('(n),(m) -> ()') + def gufunc(x, y, res): + res[0] = x.sum() + y.sum() + + @jit(nopython=True) + def jit_func(x, y, res): + gufunc(x, y, res) + + x = np.arange(40, dtype='i8').reshape(4, 10) + y = np.arange(20, dtype='i8') + res = np.zeros(4, dtype='i8') + jit_func(x, y, res) + expected = np.zeros_like(res) + gufunc(x, y, expected) + self.assertPreciseEqual(res, expected) + + def test_input_scalar(self): + + @guvectorize('() -> ()') + def gufunc(x, res): + res[0] = x + 100 + + @jit(nopython=True) + def jit_func(x, res): + gufunc(x, res) + + x = np.arange(40, dtype='i8').reshape(5, 2, 4) + res = np.zeros_like(x) + jit_func(x, res) + expected = np.zeros_like(res) + gufunc(x, expected) + self.assertPreciseEqual(res, expected) + + def test_gufunc_ndim_mismatch(self): + signature = "(n, m), (n, n, n) -> (m), (n, n)" + @guvectorize(signature) + def bar(x, y, res, out): + res[0] = 123 + out[0] = 456 + + @jit(nopython=True) + def foo(x, y, res, out): + bar(x, y, res, out) + + N, M = 2, 3 + x = np.arange(N**2).reshape(N, N) + y = np.arange(N**3).reshape(N, N, N) + res = np.arange(M) + out = np.arange(N**2).reshape(N, N) + + # calling with a 1d array should result in an error + with self.assertRaises(TypingError) as raises: + x_ = np.arange(N * N) + foo(x_, y, res, out) + msg = ('bar: Input operand 0 does not have enough dimensions (has ' + f'1, gufunc core with signature {signature} requires 2)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + y_ = np.arange(N * N).reshape(N, N) + foo(x, y_, res, out) + msg = ('bar: Input operand 1 does not have enough dimensions (has ' + f'2, gufunc core with signature {signature} requires 3)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + res_ = np.array(3) + foo(x, y, res_, out) + msg = ('bar: Output operand 0 does not have enough dimensions (has ' + f'0, gufunc core with signature {signature} requires 1)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + out_ = np.arange(N) + foo(x, y, res, out_) + msg = ('bar: Output operand 1 does not have enough dimensions (has ' + f'1, gufunc core with signature {signature} requires 2)') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions(self): + @guvectorize('(n),(n) -> ()') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + M = 3 + x = np.empty((5, 3, N)) + y = np.empty((M,)) + res = np.zeros((5, 3)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res) + msg = ('Input operand 1 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(n) -> () (size 3 is different from 2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions_input_output(self): + @guvectorize('(n),(m) -> (n)') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + M = 3 + x = np.empty((5, 3, N)) + y = np.empty((M,)) + res = np.zeros((5, 3)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res) + msg = ('Output operand 0 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(m) -> (n) (size 3 is different from 2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions_output(self): + @guvectorize('(n),(m) -> (m),(m)') + def bar(x, y, res, out): + res[0] = 123 + out[0] = 456 + + @jit(nopython=True) + def foo(x, y, res, out): + bar(x, y, res, out) + + N = 2 + M = 3 + x = np.empty((N,)) + y = np.empty((M,)) + res = np.zeros((N,)) + out = np.zeros((M,)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res, out) + msg = ('Output operand 0 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(m) -> (m),(m) (size 2 is different from 3)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res, out) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_loop_shape(self): + @guvectorize('(n),(n) -> ()') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + x = np.empty((1, 5, 3, N,)) + y = np.empty((5, 3, N,)) + res = np.zeros((5, 3)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Loop and array shapes are incompatible') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_loop_shape_2(self): + @guvectorize('(n),(n) -> (), (n)') + def gufunc(x, y, res, out): + res[0] = x.sum() + for i in range(x.shape[0]): + out[i] += x[i] + y.sum() + + @jit + def jit_func(x, y, res, out): + gufunc(x, y, res, out) + + N = 2 + + x = np.arange(4*N).reshape((4, N)) + y = np.arange(N) + res = np.empty((3,)) + out = np.zeros((3, N)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + gufunc(x, y, res, out) + msg = ('operands could not be broadcast together with remapped shapes ' + '[original->remapped]: (4,2)->(4,newaxis) (2,)->() ' + '(3,)->(3,newaxis) (3,2)->(3,2) and requested shape (2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + jit_func(x, y, res, out) + msg = ('Loop and array shapes are incompatible') + self.assertIn(msg, str(raises.exception)) + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py new file mode 100644 index 0000000000000000000000000000000000000000..7d11692ad34250e1056121e570d4c18d86f93183 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py @@ -0,0 +1,38 @@ +from numba.np.ufunc.parallel import get_thread_count +from os import environ as env +from numba.core import config +import unittest + + +class TestParallelEnvVariable(unittest.TestCase): + """ + Tests environment variables related to the underlying "parallel" + functions for npyufuncs. + """ + + _numba_parallel_test_ = False + + def test_num_threads_variable(self): + """ + Tests the NUMBA_NUM_THREADS env variable behaves as expected. + """ + key = 'NUMBA_NUM_THREADS' + current = str(getattr(env, key, config.NUMBA_NUM_THREADS)) + threads = "3154" + env[key] = threads + try: + config.reload_config() + except RuntimeError as e: + # This test should fail if threads have already been launched + self.assertIn("Cannot set NUMBA_NUM_THREADS", e.args[0]) + else: + self.assertEqual(threads, str(get_thread_count())) + self.assertEqual(threads, str(config.NUMBA_NUM_THREADS)) + finally: + # reset the env variable/set to default. Should not fail even if + # threads are launched because the value is the same. + env[key] = current + config.reload_config() + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py new file mode 100644 index 0000000000000000000000000000000000000000..cab4d42749f643ca1b38f74f3f6ef400661c5023 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py @@ -0,0 +1,44 @@ +""" +There was a deadlock problem when work count is smaller than number of threads. +""" + +import numpy as np + +from numba import float32, float64, int32, uint32 +from numba.np.ufunc import Vectorize +import unittest + + +def vector_add(a, b): + return a + b + + +class TestParallelLowWorkCount(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_low_workcount(self): + # build parallel native code ufunc + pv = Vectorize(vector_add, target='parallel') + for ty in (int32, uint32, float32, float64): + pv.add(ty(ty, ty)) + para_ufunc = pv.build_ufunc() + + # build python ufunc + np_ufunc = np.vectorize(vector_add) + + # test it out + def test(ty): + data = np.arange(1).astype(ty) # just one item + result = para_ufunc(data, data) + gold = np_ufunc(data, data) + np.testing.assert_allclose(gold, result) + + test(np.double) + test(np.float32) + test(np.int32) + test(np.uint32) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py new file mode 100644 index 0000000000000000000000000000000000000000..2237122291960d7e437875bc9d55b96bed4a1d33 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py @@ -0,0 +1,128 @@ +import time +import ctypes + +import numpy as np + +from numba.tests.support import captured_stdout +from numba import vectorize, guvectorize +import unittest + + +class TestParUfuncIssues(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_thread_response(self): + """ + Related to #89. + This does not test #89 but tests the fix for it. + We want to make sure the worker threads can be used multiple times + and with different time gap between each execution. + """ + + @vectorize('float64(float64, float64)', target='parallel') + def fnv(a, b): + return a + b + + sleep_time = 1 # 1 second + while sleep_time > 0.00001: # 10us + time.sleep(sleep_time) + a = b = np.arange(10**5) + np.testing.assert_equal(a + b, fnv(a, b)) + # Reduce sleep time + sleep_time /= 2 + + def test_gil_reacquire_deadlock(self): + """ + Testing issue #1998 due to GIL reacquiring + """ + # make a ctypes callback that requires the GIL + proto = ctypes.CFUNCTYPE(None, ctypes.c_int32) + characters = 'abcdefghij' + + def bar(x): + print(characters[x]) + + cbar = proto(bar) + + # our unit under test + @vectorize(['int32(int32)'], target='parallel', nopython=True) + def foo(x): + print(x % 10) # this reacquires the GIL + cbar(x % 10) # this reacquires the GIL + return x * 2 + + # Numpy ufunc has a heuristic to determine whether to release the GIL + # during execution. Small input size (10) seems to not release the GIL. + # Large input size (1000) seems to release the GIL. + for nelem in [1, 10, 100, 1000]: + # inputs + a = np.arange(nelem, dtype=np.int32) + acopy = a.copy() + # run and capture stdout + with captured_stdout() as buf: + got = foo(a) + stdout = buf.getvalue() + buf.close() + # process outputs from print + got_output = sorted(map(lambda x: x.strip(), stdout.splitlines())) + # build expected output + expected_output = [str(x % 10) for x in range(nelem)] + expected_output += [characters[x % 10] for x in range(nelem)] + expected_output = sorted(expected_output) + # verify + self.assertEqual(got_output, expected_output) + np.testing.assert_equal(got, 2 * acopy) + + + +class TestParGUfuncIssues(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_gil_reacquire_deadlock(self): + """ + Testing similar issue to #1998 due to GIL reacquiring for Gufunc + """ + # make a ctypes callback that requires the GIL + proto = ctypes.CFUNCTYPE(None, ctypes.c_int32) + characters = 'abcdefghij' + + def bar(x): + print(characters[x]) + + cbar = proto(bar) + + # our unit under test + @guvectorize(['(int32, int32[:])'], "()->()", + target='parallel', nopython=True) + def foo(x, out): + print(x % 10) # this reacquires the GIL + cbar(x % 10) # this reacquires the GIL + out[0] = x * 2 + + # Numpy ufunc has a heuristic to determine whether to release the GIL + # during execution. Small input size (10) seems to not release the GIL. + # Large input size (1000) seems to release the GIL. + for nelem in [1, 10, 100, 1000]: + # inputs + a = np.arange(nelem, dtype=np.int32) + acopy = a.copy() + # run and capture stdout + with captured_stdout() as buf: + got = foo(a) + stdout = buf.getvalue() + buf.close() + # process outputs from print + got_output = sorted(map(lambda x: x.strip(), stdout.splitlines())) + # build expected output + expected_output = [str(x % 10) for x in range(nelem)] + expected_output += [characters[x % 10] for x in range(nelem)] + expected_output = sorted(expected_output) + # verify + self.assertEqual(got_output, expected_output) + np.testing.assert_equal(got, 2 * acopy) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py new file mode 100644 index 0000000000000000000000000000000000000000..97bc39226d3f0f7e8021dd9be993666a5b9ec3b0 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +import unittest + +import numpy as np +from numba import guvectorize +from numba.tests.support import TestCase + + +def py_replace_2nd(x_t, y_1): + for t in range(0, x_t.shape[0], 2): + x_t[t] = y_1[0] + + +def py_update_3(x0_t, x1_t, x2_t, y_1): + for t in range(0, x0_t.shape[0]): + x0_t[t] = y_1[0] + x1_t[t] = 2 * y_1[0] + x2_t[t] = 3 * y_1[0] + + +class TestUpdateInplace(TestCase): + + def _run_test_for_gufunc(self, gufunc, py_func, expect_f4_to_pass=True, + z=2): + for dtype, expect_to_pass in [('f8', True), ('f4', expect_f4_to_pass)]: + inputs = [np.zeros(10, dtype) for _ in range(gufunc.nin - 1)] + ex_inputs = [x_t.copy() for x_t in inputs] + + gufunc(*inputs, z) + py_func(*ex_inputs, np.array([z])) + + for i, (x_t, ex_x_t) in enumerate(zip(inputs, ex_inputs)): + if expect_to_pass: + np.testing.assert_equal(x_t, ex_x_t, err_msg='input %s' % i) + else: + self.assertFalse((x_t == ex_x_t).all(), msg='input %s' % i) + + def test_update_inplace(self): + # test without writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True)(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd, + expect_f4_to_pass=False) + + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,))(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + # test with writable_args as strings + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', nopython=True, + writable_args=('x_t',))(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_with_cache(self): + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + cache=True)(py_replace_2nd) + # 2nd time it is loaded from cache + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + cache=True)(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_parallel(self): + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + target='parallel')(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_3(self): + # test without writable_args + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', + nopython=True)(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3, expect_f4_to_pass=False) + + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', nopython=True, + writable_args=(0, 1, 2))(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3) + + # test with writable_args as mix of strings and ints + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', nopython=True, + writable_args=('x0_t', 'x1_t', 2))(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3) + + def test_exceptions(self): + # check that len(writable_args) <= nin + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', nopython=True, + writable_args=(0, 1, 2, 5))(py_replace_2nd) + + # check that all values in writable_args are between 0 and nin + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(5,))(py_replace_2nd) + + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(-1,))(py_replace_2nd) + + # check that exception is raised when passing non-existing argument name + with self.assertRaises(RuntimeError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=('z_t',))(py_replace_2nd) + + # writable_args are not supported for target='cuda' + with self.assertRaises(TypeError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + target='cuda')(py_replace_2nd) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py b/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb984d8c04a07c12b0d690a2003b0a61defd058 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py @@ -0,0 +1,151 @@ +import math + +import numpy as np + +from numba import int32, uint32, float32, float64, jit, vectorize +from numba.tests.support import tag, CheckWarningsMixin +import unittest + + +pi = math.pi + + +def sinc(x): + if x == 0.0: + return 1.0 + else: + return math.sin(x * pi) / (pi * x) + +def scaled_sinc(x, scale): + if x == 0.0: + return scale + else: + return scale * (math.sin(x * pi) / (pi * x)) + +def vector_add(a, b): + return a + b + + +class BaseVectorizeDecor(object): + target = None + wrapper = None + funcs = { + 'func1': sinc, + 'func2': scaled_sinc, + 'func3': vector_add, + } + + @classmethod + def _run_and_compare(cls, func, sig, A, *args, **kwargs): + if cls.wrapper is not None: + func = cls.wrapper(func) + numba_func = vectorize(sig, target=cls.target)(func) + numpy_func = np.vectorize(func) + result = numba_func(A, *args) + gold = numpy_func(A, *args) + np.testing.assert_allclose(result, gold, **kwargs) + + def test_1(self): + sig = ['float64(float64)', 'float32(float32)'] + func = self.funcs['func1'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A) + + def test_2(self): + sig = [float64(float64), float32(float32)] + func = self.funcs['func1'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A) + + def test_3(self): + sig = ['float64(float64, uint32)'] + func = self.funcs['func2'] + A = np.arange(100, dtype=np.float64) + scale = np.uint32(3) + self._run_and_compare(func, sig, A, scale, atol=1e-8) + + def test_4(self): + sig = [ + int32(int32, int32), + uint32(uint32, uint32), + float32(float32, float32), + float64(float64, float64), + ] + func = self.funcs['func3'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.float32) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.int32) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.uint32) + self._run_and_compare(func, sig, A, A) + + +class TestCPUVectorizeDecor(unittest.TestCase, BaseVectorizeDecor): + target = 'cpu' + + +class TestParallelVectorizeDecor(unittest.TestCase, BaseVectorizeDecor): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestCPUVectorizeJitted(unittest.TestCase, BaseVectorizeDecor): + target = 'cpu' + wrapper = jit(nopython=True) + + +class BaseVectorizeNopythonArg(unittest.TestCase, CheckWarningsMixin): + """ + Test passing the nopython argument to the vectorize decorator. + """ + def _test_target_nopython(self, target, warnings, with_sig=True): + a = np.array([2.0], dtype=np.float32) + b = np.array([3.0], dtype=np.float32) + sig = [float32(float32, float32)] + args = with_sig and [sig] or [] + with self.check_warnings(warnings): + f = vectorize(*args, target=target, nopython=True)(vector_add) + f(a, b) + +class TestVectorizeNopythonArg(BaseVectorizeNopythonArg): + def test_target_cpu_nopython(self): + self._test_target_nopython('cpu', []) + + def test_target_cpu_nopython_no_sig(self): + self._test_target_nopython('cpu', [], False) + + def test_target_parallel_nopython(self): + self._test_target_nopython('parallel', []) + + +class BaseVectorizeUnrecognizedArg(unittest.TestCase, CheckWarningsMixin): + """ + Test passing an unrecognized argument to the vectorize decorator. + """ + def _test_target_unrecognized_arg(self, target, with_sig=True): + a = np.array([2.0], dtype=np.float32) + b = np.array([3.0], dtype=np.float32) + sig = [float32(float32, float32)] + args = with_sig and [sig] or [] + with self.assertRaises(KeyError) as raises: + f = vectorize(*args, target=target, nonexistent=2)(vector_add) + f(a, b) + self.assertIn("Unrecognized options", str(raises.exception)) + +class TestVectorizeUnrecognizedArg(BaseVectorizeUnrecognizedArg): + def test_target_cpu_unrecognized_arg(self): + self._test_target_unrecognized_arg('cpu') + + def test_target_cpu_unrecognized_arg_no_sig(self): + self._test_target_unrecognized_arg('cpu', False) + + def test_target_parallel_unrecognized_arg(self): + self._test_target_unrecognized_arg('parallel') + + + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py b/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..0e96dc300cf3642ee3a042bf55cda4885a98c547 --- /dev/null +++ b/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py @@ -0,0 +1,69 @@ +from numba import vectorize + + +def add(a, b): + """An addition""" + return a + b + + +def equals(a, b): + return a == b + + +def mul(a, b): + """A multiplication""" + return a * b + + +def guadd(a, b, c): + """A generalized addition""" + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b[i, j] + + +@vectorize(nopython=True) +def inner(a, b): + return a + b + + +@vectorize(["int64(int64, int64)"], nopython=True) +def inner_explicit(a, b): + return a + b + + +def outer(a, b): + return inner(a, b) + + +def outer_explicit(a, b): + return inner_explicit(a, b) + + +class Dummy: + pass + + +def guadd_obj(a, b, c): + Dummy() # to force object mode + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b[i, j] + + +def guadd_scalar_obj(a, b, c): + Dummy() # to force object mode + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b + + +class MyException(Exception): + pass + + +def guerror(a, b, c): + raise MyException diff --git a/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__init__.py b/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391