File size: 6,346 Bytes
63e7e04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
import functools
import warnings
import numpy
import cupy
from cupy._core import core
from cupyx.jit import _compile
from cupyx.jit import _cuda_typerules
from cupyx.jit import _cuda_types
from cupyx.jit import _internal_types
from cupyx.jit._cuda_types import Scalar
class _CudaFunction:
"""JIT cupy function object
"""
def __init__(self, func, mode, device=False, inline=False):
self.attributes = []
if device:
self.attributes.append('__device__')
else:
self.attributes.append('__global__')
if inline:
self.attributes.append('inline')
self.name = getattr(func, 'name', func.__name__)
self.func = func
self.mode = mode
def __call__(self, *args, **kwargs):
raise NotImplementedError
def _emit_code_from_types(self, in_types, ret_type=None):
return _compile.transpile(
self.func, self.attributes, self.mode, in_types, ret_type)
class _JitRawKernel:
"""JIT CUDA kernel object.
The decorator :func:``cupyx.jit.rawkernel`` converts the target function
to an object of this class. This class is not intended to be instantiated
by users.
"""
def __init__(self, func, mode, device):
self._func = func
self._mode = mode
self._device = device
self._cache = {}
self._cached_codes = {}
def __call__(
self, grid, block, args, shared_mem=0, stream=None):
"""Calls the CUDA kernel.
The compilation will be deferred until the first function call.
CuPy's JIT compiler infers the types of arguments at the call
time, and will cache the compiled kernels for speeding up any
subsequent calls.
Args:
grid (tuple of int): Size of grid in blocks.
block (tuple of int): Dimensions of each thread block.
args (tuple):
Arguments of the kernel. The type of all elements must be
``bool``, ``int``, ``float``, ``complex``, NumPy scalar or
``cupy.ndarray``.
shared_mem (int):
Dynamic shared-memory size per thread block in bytes.
stream (cupy.cuda.Stream): CUDA stream.
.. seealso:: :ref:`jit_kernel_definition`
"""
in_types = []
for x in args:
if isinstance(x, cupy.ndarray):
t = _cuda_types.CArray.from_ndarray(x)
elif numpy.isscalar(x):
t = _cuda_typerules.get_ctype_from_scalar(self._mode, x)
else:
raise TypeError(f'{type(x)} is not supported for RawKernel')
in_types.append(t)
in_types = tuple(in_types)
device_id = cupy.cuda.get_device_id()
kern, enable_cg = self._cache.get((in_types, device_id), (None, None))
if kern is None:
result = self._cached_codes.get(in_types)
if result is None:
result = _compile.transpile(
self._func,
['extern "C"', '__global__'],
self._mode,
in_types,
_cuda_types.void,
)
self._cached_codes[in_types] = result
fname = result.func_name
enable_cg = result.enable_cooperative_groups
options = result.options
backend = result.backend
if backend == 'nvcc':
options += ('-DCUPY_JIT_NVCC',)
jitify = result.jitify
module = core.compile_with_cache(
source=result.code,
options=options,
backend=backend,
jitify=jitify)
kern = module.get_function(fname)
self._cache[(in_types, device_id)] = (kern, enable_cg)
new_args = []
for a, t in zip(args, in_types):
if isinstance(t, Scalar):
if t.dtype.char == 'e':
a = numpy.float32(a)
else:
a = t.dtype.type(a)
new_args.append(a)
kern(grid, block, tuple(new_args), shared_mem, stream, enable_cg)
def __getitem__(self, grid_and_block):
"""Numba-style kernel call.
.. seealso:: :ref:`jit_kernel_definition`
"""
grid, block = grid_and_block
if not isinstance(grid, tuple):
grid = (grid, 1, 1)
if not isinstance(block, tuple):
block = (block, 1, 1)
return lambda *args, **kwargs: self(grid, block, args, **kwargs)
@property
def cached_codes(self):
"""Returns a dict that has input types as keys and codes values.
This property method is for debugging purpose.
The return value is not guaranteed to keep backward compatibility.
"""
if len(self._cached_codes) == 0:
warnings.warn(
'No codes are cached because compilation is deferred until '
'the first function call.')
return dict([(k, v.code) for k, v in self._cached_codes.items()])
@property
def cached_code(self):
"""Returns `next(iter(self.cached_codes.values()))`.
This property method is for debugging purpose.
The return value is not guaranteed to keep backward compatibility.
"""
codes = self.cached_codes
if len(codes) > 1:
warnings.warn(
'The input types of the kernel could not be inferred. '
'Please use `.cached_codes` instead.')
return next(iter(codes.values()))
def rawkernel(*, mode='cuda', device=False):
"""A decorator compiles a Python function into CUDA kernel.
"""
cupy._util.experimental('cupyx.jit.rawkernel')
def wrapper(func):
return functools.update_wrapper(
_JitRawKernel(func, mode, device), func)
return wrapper
threadIdx = _internal_types.Data('threadIdx', _cuda_types.dim3)
blockDim = _internal_types.Data('blockDim', _cuda_types.dim3)
blockIdx = _internal_types.Data('blockIdx', _cuda_types.dim3)
gridDim = _internal_types.Data('gridDim', _cuda_types.dim3)
warpsize = _internal_types.Data('warpSize', _cuda_types.int32)
warpsize.__doc__ = r"""Returns the number of threads in a warp.
.. seealso:: :obj:`numba.cuda.warpsize`
"""
|