Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so +3 -0
- lib/python3.10/site-packages/numba/cpython/cmathimpl.py +542 -0
- lib/python3.10/site-packages/numba/cpython/hashing.py +10 -0
- lib/python3.10/site-packages/numba/cpython/listobj.py +1260 -0
- lib/python3.10/site-packages/numba/cpython/old_hashing.py +743 -0
- lib/python3.10/site-packages/numba/cpython/setobj.py +1711 -0
- lib/python3.10/site-packages/numba/cpython/unicode_support.py +768 -0
- lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py +248 -0
- lib/python3.10/site-packages/numba/cuda/cudadrv/libs.py +176 -0
- lib/python3.10/site-packages/numba/cuda/cudadrv/ndarray.py +20 -0
- lib/python3.10/site-packages/numba/cuda/cudadrv/nvvm.py +707 -0
- lib/python3.10/site-packages/numba/cuda/cudadrv/rtapi.py +10 -0
- lib/python3.10/site-packages/numba/cuda/kernels/__init__.py +0 -0
- lib/python3.10/site-packages/numba/cuda/kernels/reduction.py +262 -0
- lib/python3.10/site-packages/numba/cuda/kernels/transpose.py +65 -0
- lib/python3.10/site-packages/numba/cuda/simulator/__init__.py +38 -0
- lib/python3.10/site-packages/numba/cuda/simulator/api.py +110 -0
- lib/python3.10/site-packages/numba/cuda/simulator/compiler.py +9 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/__init__.py +2 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devicearray.py +436 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devices.py +117 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/driver.py +62 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/drvapi.py +4 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/dummyarray.py +4 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/error.py +6 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/libs.py +2 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/nvvm.py +29 -0
- lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/runtime.py +19 -0
- lib/python3.10/site-packages/numba/cuda/simulator/kernel.py +308 -0
- lib/python3.10/site-packages/numba/cuda/simulator/kernelapi.py +495 -0
- lib/python3.10/site-packages/numba/cuda/simulator/reduction.py +15 -0
- lib/python3.10/site-packages/numba/cuda/simulator/vector_types.py +63 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/__init__.py +8 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_array_attr.py +145 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_context_stack.py +145 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +375 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +21 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +179 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_driver.py +235 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_libraries.py +22 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_memory.py +193 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +547 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_deallocations.py +249 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_detect.py +81 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py +192 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_events.py +38 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_host_alloc.py +65 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_init.py +139 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_inline_ptx.py +37 -0
.gitattributes
CHANGED
|
@@ -94,3 +94,4 @@ lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so fil
|
|
| 94 |
lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 95 |
lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 96 |
lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 94 |
lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 95 |
lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 96 |
lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1808806ad22b1e8cca102941bf36917bcb2ebb81801a051088e7c0dd2577a31e
|
| 3 |
+
size 720649
|
lib/python3.10/site-packages/numba/cpython/cmathimpl.py
ADDED
|
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implement the cmath module functions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import cmath
|
| 7 |
+
import math
|
| 8 |
+
|
| 9 |
+
from numba.core.imputils import Registry, impl_ret_untracked
|
| 10 |
+
from numba.core import types, cgutils
|
| 11 |
+
from numba.core.typing import signature
|
| 12 |
+
from numba.cpython import builtins, mathimpl
|
| 13 |
+
from numba.core.extending import overload
|
| 14 |
+
|
| 15 |
+
registry = Registry('cmathimpl')
|
| 16 |
+
lower = registry.lower
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def is_nan(builder, z):
|
| 20 |
+
return builder.fcmp_unordered('uno', z.real, z.imag)
|
| 21 |
+
|
| 22 |
+
def is_inf(builder, z):
|
| 23 |
+
return builder.or_(mathimpl.is_inf(builder, z.real),
|
| 24 |
+
mathimpl.is_inf(builder, z.imag))
|
| 25 |
+
|
| 26 |
+
def is_finite(builder, z):
|
| 27 |
+
return builder.and_(mathimpl.is_finite(builder, z.real),
|
| 28 |
+
mathimpl.is_finite(builder, z.imag))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@lower(cmath.isnan, types.Complex)
|
| 32 |
+
def isnan_float_impl(context, builder, sig, args):
|
| 33 |
+
[typ] = sig.args
|
| 34 |
+
[value] = args
|
| 35 |
+
z = context.make_complex(builder, typ, value=value)
|
| 36 |
+
res = is_nan(builder, z)
|
| 37 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 38 |
+
|
| 39 |
+
@lower(cmath.isinf, types.Complex)
|
| 40 |
+
def isinf_float_impl(context, builder, sig, args):
|
| 41 |
+
[typ] = sig.args
|
| 42 |
+
[value] = args
|
| 43 |
+
z = context.make_complex(builder, typ, value=value)
|
| 44 |
+
res = is_inf(builder, z)
|
| 45 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@lower(cmath.isfinite, types.Complex)
|
| 49 |
+
def isfinite_float_impl(context, builder, sig, args):
|
| 50 |
+
[typ] = sig.args
|
| 51 |
+
[value] = args
|
| 52 |
+
z = context.make_complex(builder, typ, value=value)
|
| 53 |
+
res = is_finite(builder, z)
|
| 54 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@overload(cmath.rect)
|
| 58 |
+
def impl_cmath_rect(r, phi):
|
| 59 |
+
if all([isinstance(typ, types.Float) for typ in [r, phi]]):
|
| 60 |
+
def impl(r, phi):
|
| 61 |
+
if not math.isfinite(phi):
|
| 62 |
+
if not r:
|
| 63 |
+
# cmath.rect(0, phi={inf, nan}) = 0
|
| 64 |
+
return abs(r)
|
| 65 |
+
if math.isinf(r):
|
| 66 |
+
# cmath.rect(inf, phi={inf, nan}) = inf + j phi
|
| 67 |
+
return complex(r, phi)
|
| 68 |
+
real = math.cos(phi)
|
| 69 |
+
imag = math.sin(phi)
|
| 70 |
+
if real == 0. and math.isinf(r):
|
| 71 |
+
# 0 * inf would return NaN, we want to keep 0 but xor the sign
|
| 72 |
+
real /= r
|
| 73 |
+
else:
|
| 74 |
+
real *= r
|
| 75 |
+
if imag == 0. and math.isinf(r):
|
| 76 |
+
# ditto
|
| 77 |
+
imag /= r
|
| 78 |
+
else:
|
| 79 |
+
imag *= r
|
| 80 |
+
return complex(real, imag)
|
| 81 |
+
return impl
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def intrinsic_complex_unary(inner_func):
|
| 85 |
+
def wrapper(context, builder, sig, args):
|
| 86 |
+
[typ] = sig.args
|
| 87 |
+
[value] = args
|
| 88 |
+
z = context.make_complex(builder, typ, value=value)
|
| 89 |
+
x = z.real
|
| 90 |
+
y = z.imag
|
| 91 |
+
# Same as above: math.isfinite() is unavailable on 2.x so we precompute
|
| 92 |
+
# its value and pass it to the pure Python implementation.
|
| 93 |
+
x_is_finite = mathimpl.is_finite(builder, x)
|
| 94 |
+
y_is_finite = mathimpl.is_finite(builder, y)
|
| 95 |
+
inner_sig = signature(sig.return_type,
|
| 96 |
+
*(typ.underlying_float,) * 2 + (types.boolean,) * 2)
|
| 97 |
+
res = context.compile_internal(builder, inner_func, inner_sig,
|
| 98 |
+
(x, y, x_is_finite, y_is_finite))
|
| 99 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 100 |
+
return wrapper
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
NAN = float('nan')
|
| 104 |
+
INF = float('inf')
|
| 105 |
+
|
| 106 |
+
@lower(cmath.exp, types.Complex)
|
| 107 |
+
@intrinsic_complex_unary
|
| 108 |
+
def exp_impl(x, y, x_is_finite, y_is_finite):
|
| 109 |
+
"""cmath.exp(x + y j)"""
|
| 110 |
+
if x_is_finite:
|
| 111 |
+
if y_is_finite:
|
| 112 |
+
c = math.cos(y)
|
| 113 |
+
s = math.sin(y)
|
| 114 |
+
r = math.exp(x)
|
| 115 |
+
return complex(r * c, r * s)
|
| 116 |
+
else:
|
| 117 |
+
return complex(NAN, NAN)
|
| 118 |
+
elif math.isnan(x):
|
| 119 |
+
if y:
|
| 120 |
+
return complex(x, x) # nan + j nan
|
| 121 |
+
else:
|
| 122 |
+
return complex(x, y) # nan + 0j
|
| 123 |
+
elif x > 0.0:
|
| 124 |
+
# x == +inf
|
| 125 |
+
if y_is_finite:
|
| 126 |
+
real = math.cos(y)
|
| 127 |
+
imag = math.sin(y)
|
| 128 |
+
# Avoid NaNs if math.cos(y) or math.sin(y) == 0
|
| 129 |
+
# (e.g. cmath.exp(inf + 0j) == inf + 0j)
|
| 130 |
+
if real != 0:
|
| 131 |
+
real *= x
|
| 132 |
+
if imag != 0:
|
| 133 |
+
imag *= x
|
| 134 |
+
return complex(real, imag)
|
| 135 |
+
else:
|
| 136 |
+
return complex(x, NAN)
|
| 137 |
+
else:
|
| 138 |
+
# x == -inf
|
| 139 |
+
if y_is_finite:
|
| 140 |
+
r = math.exp(x)
|
| 141 |
+
c = math.cos(y)
|
| 142 |
+
s = math.sin(y)
|
| 143 |
+
return complex(r * c, r * s)
|
| 144 |
+
else:
|
| 145 |
+
r = 0
|
| 146 |
+
return complex(r, r)
|
| 147 |
+
|
| 148 |
+
@lower(cmath.log, types.Complex)
|
| 149 |
+
@intrinsic_complex_unary
|
| 150 |
+
def log_impl(x, y, x_is_finite, y_is_finite):
|
| 151 |
+
"""cmath.log(x + y j)"""
|
| 152 |
+
a = math.log(math.hypot(x, y))
|
| 153 |
+
b = math.atan2(y, x)
|
| 154 |
+
return complex(a, b)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@lower(cmath.log, types.Complex, types.Complex)
|
| 158 |
+
def log_base_impl(context, builder, sig, args):
|
| 159 |
+
"""cmath.log(z, base)"""
|
| 160 |
+
[z, base] = args
|
| 161 |
+
|
| 162 |
+
def log_base(z, base):
|
| 163 |
+
return cmath.log(z) / cmath.log(base)
|
| 164 |
+
|
| 165 |
+
res = context.compile_internal(builder, log_base, sig, args)
|
| 166 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@overload(cmath.log10)
|
| 170 |
+
def impl_cmath_log10(z):
|
| 171 |
+
if not isinstance(z, types.Complex):
|
| 172 |
+
return
|
| 173 |
+
|
| 174 |
+
LN_10 = 2.302585092994045684
|
| 175 |
+
|
| 176 |
+
def log10_impl(z):
|
| 177 |
+
"""cmath.log10(z)"""
|
| 178 |
+
z = cmath.log(z)
|
| 179 |
+
# This formula gives better results on +/-inf than cmath.log(z, 10)
|
| 180 |
+
# See http://bugs.python.org/issue22544
|
| 181 |
+
return complex(z.real / LN_10, z.imag / LN_10)
|
| 182 |
+
|
| 183 |
+
return log10_impl
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@overload(cmath.phase)
|
| 187 |
+
def phase_impl(x):
|
| 188 |
+
"""cmath.phase(x + y j)"""
|
| 189 |
+
|
| 190 |
+
if not isinstance(x, types.Complex):
|
| 191 |
+
return
|
| 192 |
+
|
| 193 |
+
def impl(x):
|
| 194 |
+
return math.atan2(x.imag, x.real)
|
| 195 |
+
return impl
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@overload(cmath.polar)
|
| 199 |
+
def polar_impl(x):
|
| 200 |
+
if not isinstance(x, types.Complex):
|
| 201 |
+
return
|
| 202 |
+
|
| 203 |
+
def impl(x):
|
| 204 |
+
r, i = x.real, x.imag
|
| 205 |
+
return math.hypot(r, i), math.atan2(i, r)
|
| 206 |
+
return impl
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
@lower(cmath.sqrt, types.Complex)
|
| 210 |
+
def sqrt_impl(context, builder, sig, args):
|
| 211 |
+
# We risk spurious overflow for components >= FLT_MAX / (1 + sqrt(2)).
|
| 212 |
+
|
| 213 |
+
SQRT2 = 1.414213562373095048801688724209698079E0
|
| 214 |
+
ONE_PLUS_SQRT2 = (1. + SQRT2)
|
| 215 |
+
theargflt = sig.args[0].underlying_float
|
| 216 |
+
# Get a type specific maximum value so scaling for overflow is based on that
|
| 217 |
+
MAX = mathimpl.DBL_MAX if theargflt.bitwidth == 64 else mathimpl.FLT_MAX
|
| 218 |
+
# THRES will be double precision, should not impact typing as it's just
|
| 219 |
+
# used for comparison, there *may* be a few values near THRES which
|
| 220 |
+
# deviate from e.g. NumPy due to rounding that occurs in the computation
|
| 221 |
+
# of this value in the case of a 32bit argument.
|
| 222 |
+
THRES = MAX / ONE_PLUS_SQRT2
|
| 223 |
+
|
| 224 |
+
def sqrt_impl(z):
|
| 225 |
+
"""cmath.sqrt(z)"""
|
| 226 |
+
# This is NumPy's algorithm, see npy_csqrt() in npy_math_complex.c.src
|
| 227 |
+
a = z.real
|
| 228 |
+
b = z.imag
|
| 229 |
+
if a == 0.0 and b == 0.0:
|
| 230 |
+
return complex(abs(b), b)
|
| 231 |
+
if math.isinf(b):
|
| 232 |
+
return complex(abs(b), b)
|
| 233 |
+
if math.isnan(a):
|
| 234 |
+
return complex(a, a)
|
| 235 |
+
if math.isinf(a):
|
| 236 |
+
if a < 0.0:
|
| 237 |
+
return complex(abs(b - b), math.copysign(a, b))
|
| 238 |
+
else:
|
| 239 |
+
return complex(a, math.copysign(b - b, b))
|
| 240 |
+
|
| 241 |
+
# The remaining special case (b is NaN) is handled just fine by
|
| 242 |
+
# the normal code path below.
|
| 243 |
+
|
| 244 |
+
# Scale to avoid overflow
|
| 245 |
+
if abs(a) >= THRES or abs(b) >= THRES:
|
| 246 |
+
a *= 0.25
|
| 247 |
+
b *= 0.25
|
| 248 |
+
scale = True
|
| 249 |
+
else:
|
| 250 |
+
scale = False
|
| 251 |
+
# Algorithm 312, CACM vol 10, Oct 1967
|
| 252 |
+
if a >= 0:
|
| 253 |
+
t = math.sqrt((a + math.hypot(a, b)) * 0.5)
|
| 254 |
+
real = t
|
| 255 |
+
imag = b / (2 * t)
|
| 256 |
+
else:
|
| 257 |
+
t = math.sqrt((-a + math.hypot(a, b)) * 0.5)
|
| 258 |
+
real = abs(b) / (2 * t)
|
| 259 |
+
imag = math.copysign(t, b)
|
| 260 |
+
# Rescale
|
| 261 |
+
if scale:
|
| 262 |
+
return complex(real * 2, imag)
|
| 263 |
+
else:
|
| 264 |
+
return complex(real, imag)
|
| 265 |
+
|
| 266 |
+
res = context.compile_internal(builder, sqrt_impl, sig, args)
|
| 267 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
@lower(cmath.cos, types.Complex)
|
| 271 |
+
def cos_impl(context, builder, sig, args):
|
| 272 |
+
def cos_impl(z):
|
| 273 |
+
"""cmath.cos(z) = cmath.cosh(z j)"""
|
| 274 |
+
return cmath.cosh(complex(-z.imag, z.real))
|
| 275 |
+
|
| 276 |
+
res = context.compile_internal(builder, cos_impl, sig, args)
|
| 277 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 278 |
+
|
| 279 |
+
@overload(cmath.cosh)
|
| 280 |
+
def impl_cmath_cosh(z):
|
| 281 |
+
if not isinstance(z, types.Complex):
|
| 282 |
+
return
|
| 283 |
+
|
| 284 |
+
def cosh_impl(z):
|
| 285 |
+
"""cmath.cosh(z)"""
|
| 286 |
+
x = z.real
|
| 287 |
+
y = z.imag
|
| 288 |
+
if math.isinf(x):
|
| 289 |
+
if math.isnan(y):
|
| 290 |
+
# x = +inf, y = NaN => cmath.cosh(x + y j) = inf + Nan * j
|
| 291 |
+
real = abs(x)
|
| 292 |
+
imag = y
|
| 293 |
+
elif y == 0.0:
|
| 294 |
+
# x = +inf, y = 0 => cmath.cosh(x + y j) = inf + 0j
|
| 295 |
+
real = abs(x)
|
| 296 |
+
imag = y
|
| 297 |
+
else:
|
| 298 |
+
real = math.copysign(x, math.cos(y))
|
| 299 |
+
imag = math.copysign(x, math.sin(y))
|
| 300 |
+
if x < 0.0:
|
| 301 |
+
# x = -inf => negate imaginary part of result
|
| 302 |
+
imag = -imag
|
| 303 |
+
return complex(real, imag)
|
| 304 |
+
return complex(math.cos(y) * math.cosh(x),
|
| 305 |
+
math.sin(y) * math.sinh(x))
|
| 306 |
+
return cosh_impl
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@lower(cmath.sin, types.Complex)
|
| 310 |
+
def sin_impl(context, builder, sig, args):
|
| 311 |
+
def sin_impl(z):
|
| 312 |
+
"""cmath.sin(z) = -j * cmath.sinh(z j)"""
|
| 313 |
+
r = cmath.sinh(complex(-z.imag, z.real))
|
| 314 |
+
return complex(r.imag, -r.real)
|
| 315 |
+
|
| 316 |
+
res = context.compile_internal(builder, sin_impl, sig, args)
|
| 317 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 318 |
+
|
| 319 |
+
@overload(cmath.sinh)
|
| 320 |
+
def impl_cmath_sinh(z):
|
| 321 |
+
if not isinstance(z, types.Complex):
|
| 322 |
+
return
|
| 323 |
+
|
| 324 |
+
def sinh_impl(z):
|
| 325 |
+
"""cmath.sinh(z)"""
|
| 326 |
+
x = z.real
|
| 327 |
+
y = z.imag
|
| 328 |
+
if math.isinf(x):
|
| 329 |
+
if math.isnan(y):
|
| 330 |
+
# x = +/-inf, y = NaN => cmath.sinh(x + y j) = x + NaN * j
|
| 331 |
+
real = x
|
| 332 |
+
imag = y
|
| 333 |
+
else:
|
| 334 |
+
real = math.cos(y)
|
| 335 |
+
imag = math.sin(y)
|
| 336 |
+
if real != 0.:
|
| 337 |
+
real *= x
|
| 338 |
+
if imag != 0.:
|
| 339 |
+
imag *= abs(x)
|
| 340 |
+
return complex(real, imag)
|
| 341 |
+
return complex(math.cos(y) * math.sinh(x),
|
| 342 |
+
math.sin(y) * math.cosh(x))
|
| 343 |
+
return sinh_impl
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@lower(cmath.tan, types.Complex)
|
| 347 |
+
def tan_impl(context, builder, sig, args):
|
| 348 |
+
def tan_impl(z):
|
| 349 |
+
"""cmath.tan(z) = -j * cmath.tanh(z j)"""
|
| 350 |
+
r = cmath.tanh(complex(-z.imag, z.real))
|
| 351 |
+
return complex(r.imag, -r.real)
|
| 352 |
+
|
| 353 |
+
res = context.compile_internal(builder, tan_impl, sig, args)
|
| 354 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
@overload(cmath.tanh)
|
| 358 |
+
def impl_cmath_tanh(z):
|
| 359 |
+
if not isinstance(z, types.Complex):
|
| 360 |
+
return
|
| 361 |
+
|
| 362 |
+
def tanh_impl(z):
|
| 363 |
+
"""cmath.tanh(z)"""
|
| 364 |
+
x = z.real
|
| 365 |
+
y = z.imag
|
| 366 |
+
if math.isinf(x):
|
| 367 |
+
real = math.copysign(1., x)
|
| 368 |
+
if math.isinf(y):
|
| 369 |
+
imag = 0.
|
| 370 |
+
else:
|
| 371 |
+
imag = math.copysign(0., math.sin(2. * y))
|
| 372 |
+
return complex(real, imag)
|
| 373 |
+
# This is CPython's algorithm (see c_tanh() in cmathmodule.c).
|
| 374 |
+
# XXX how to force float constants into single precision?
|
| 375 |
+
tx = math.tanh(x)
|
| 376 |
+
ty = math.tan(y)
|
| 377 |
+
cx = 1. / math.cosh(x)
|
| 378 |
+
txty = tx * ty
|
| 379 |
+
denom = 1. + txty * txty
|
| 380 |
+
return complex(
|
| 381 |
+
tx * (1. + ty * ty) / denom,
|
| 382 |
+
((ty / denom) * cx) * cx)
|
| 383 |
+
|
| 384 |
+
return tanh_impl
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
@lower(cmath.acos, types.Complex)
|
| 388 |
+
def acos_impl(context, builder, sig, args):
|
| 389 |
+
LN_4 = math.log(4)
|
| 390 |
+
THRES = mathimpl.FLT_MAX / 4
|
| 391 |
+
|
| 392 |
+
def acos_impl(z):
|
| 393 |
+
"""cmath.acos(z)"""
|
| 394 |
+
# CPython's algorithm (see c_acos() in cmathmodule.c)
|
| 395 |
+
if abs(z.real) > THRES or abs(z.imag) > THRES:
|
| 396 |
+
# Avoid unnecessary overflow for large arguments
|
| 397 |
+
# (also handles infinities gracefully)
|
| 398 |
+
real = math.atan2(abs(z.imag), z.real)
|
| 399 |
+
imag = math.copysign(
|
| 400 |
+
math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4,
|
| 401 |
+
-z.imag)
|
| 402 |
+
return complex(real, imag)
|
| 403 |
+
else:
|
| 404 |
+
s1 = cmath.sqrt(complex(1. - z.real, -z.imag))
|
| 405 |
+
s2 = cmath.sqrt(complex(1. + z.real, z.imag))
|
| 406 |
+
real = 2. * math.atan2(s1.real, s2.real)
|
| 407 |
+
imag = math.asinh(s2.real * s1.imag - s2.imag * s1.real)
|
| 408 |
+
return complex(real, imag)
|
| 409 |
+
|
| 410 |
+
res = context.compile_internal(builder, acos_impl, sig, args)
|
| 411 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 412 |
+
|
| 413 |
+
@overload(cmath.acosh)
|
| 414 |
+
def impl_cmath_acosh(z):
|
| 415 |
+
if not isinstance(z, types.Complex):
|
| 416 |
+
return
|
| 417 |
+
|
| 418 |
+
LN_4 = math.log(4)
|
| 419 |
+
THRES = mathimpl.FLT_MAX / 4
|
| 420 |
+
|
| 421 |
+
def acosh_impl(z):
|
| 422 |
+
"""cmath.acosh(z)"""
|
| 423 |
+
# CPython's algorithm (see c_acosh() in cmathmodule.c)
|
| 424 |
+
if abs(z.real) > THRES or abs(z.imag) > THRES:
|
| 425 |
+
# Avoid unnecessary overflow for large arguments
|
| 426 |
+
# (also handles infinities gracefully)
|
| 427 |
+
real = math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4
|
| 428 |
+
imag = math.atan2(z.imag, z.real)
|
| 429 |
+
return complex(real, imag)
|
| 430 |
+
else:
|
| 431 |
+
s1 = cmath.sqrt(complex(z.real - 1., z.imag))
|
| 432 |
+
s2 = cmath.sqrt(complex(z.real + 1., z.imag))
|
| 433 |
+
real = math.asinh(s1.real * s2.real + s1.imag * s2.imag)
|
| 434 |
+
imag = 2. * math.atan2(s1.imag, s2.real)
|
| 435 |
+
return complex(real, imag)
|
| 436 |
+
# Condensed formula (NumPy)
|
| 437 |
+
#return cmath.log(z + cmath.sqrt(z + 1.) * cmath.sqrt(z - 1.))
|
| 438 |
+
|
| 439 |
+
return acosh_impl
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
@lower(cmath.asinh, types.Complex)
|
| 443 |
+
def asinh_impl(context, builder, sig, args):
|
| 444 |
+
LN_4 = math.log(4)
|
| 445 |
+
THRES = mathimpl.FLT_MAX / 4
|
| 446 |
+
|
| 447 |
+
def asinh_impl(z):
|
| 448 |
+
"""cmath.asinh(z)"""
|
| 449 |
+
# CPython's algorithm (see c_asinh() in cmathmodule.c)
|
| 450 |
+
if abs(z.real) > THRES or abs(z.imag) > THRES:
|
| 451 |
+
real = math.copysign(
|
| 452 |
+
math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4,
|
| 453 |
+
z.real)
|
| 454 |
+
imag = math.atan2(z.imag, abs(z.real))
|
| 455 |
+
return complex(real, imag)
|
| 456 |
+
else:
|
| 457 |
+
s1 = cmath.sqrt(complex(1. + z.imag, -z.real))
|
| 458 |
+
s2 = cmath.sqrt(complex(1. - z.imag, z.real))
|
| 459 |
+
real = math.asinh(s1.real * s2.imag - s2.real * s1.imag)
|
| 460 |
+
imag = math.atan2(z.imag, s1.real * s2.real - s1.imag * s2.imag)
|
| 461 |
+
return complex(real, imag)
|
| 462 |
+
|
| 463 |
+
res = context.compile_internal(builder, asinh_impl, sig, args)
|
| 464 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 465 |
+
|
| 466 |
+
@lower(cmath.asin, types.Complex)
|
| 467 |
+
def asin_impl(context, builder, sig, args):
|
| 468 |
+
def asin_impl(z):
|
| 469 |
+
"""cmath.asin(z) = -j * cmath.asinh(z j)"""
|
| 470 |
+
r = cmath.asinh(complex(-z.imag, z.real))
|
| 471 |
+
return complex(r.imag, -r.real)
|
| 472 |
+
|
| 473 |
+
res = context.compile_internal(builder, asin_impl, sig, args)
|
| 474 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 475 |
+
|
| 476 |
+
@lower(cmath.atan, types.Complex)
|
| 477 |
+
def atan_impl(context, builder, sig, args):
|
| 478 |
+
def atan_impl(z):
|
| 479 |
+
"""cmath.atan(z) = -j * cmath.atanh(z j)"""
|
| 480 |
+
r = cmath.atanh(complex(-z.imag, z.real))
|
| 481 |
+
if math.isinf(z.real) and math.isnan(z.imag):
|
| 482 |
+
# XXX this is odd but necessary
|
| 483 |
+
return complex(r.imag, r.real)
|
| 484 |
+
else:
|
| 485 |
+
return complex(r.imag, -r.real)
|
| 486 |
+
|
| 487 |
+
res = context.compile_internal(builder, atan_impl, sig, args)
|
| 488 |
+
return impl_ret_untracked(context, builder, sig, res)
|
| 489 |
+
|
| 490 |
+
@lower(cmath.atanh, types.Complex)
|
| 491 |
+
def atanh_impl(context, builder, sig, args):
|
| 492 |
+
LN_4 = math.log(4)
|
| 493 |
+
THRES_LARGE = math.sqrt(mathimpl.FLT_MAX / 4)
|
| 494 |
+
THRES_SMALL = math.sqrt(mathimpl.FLT_MIN)
|
| 495 |
+
PI_12 = math.pi / 2
|
| 496 |
+
|
| 497 |
+
def atanh_impl(z):
|
| 498 |
+
"""cmath.atanh(z)"""
|
| 499 |
+
# CPython's algorithm (see c_atanh() in cmathmodule.c)
|
| 500 |
+
if z.real < 0.:
|
| 501 |
+
# Reduce to case where z.real >= 0., using atanh(z) = -atanh(-z).
|
| 502 |
+
negate = True
|
| 503 |
+
z = -z
|
| 504 |
+
else:
|
| 505 |
+
negate = False
|
| 506 |
+
|
| 507 |
+
ay = abs(z.imag)
|
| 508 |
+
if math.isnan(z.real) or z.real > THRES_LARGE or ay > THRES_LARGE:
|
| 509 |
+
if math.isinf(z.imag):
|
| 510 |
+
real = math.copysign(0., z.real)
|
| 511 |
+
elif math.isinf(z.real):
|
| 512 |
+
real = 0.
|
| 513 |
+
else:
|
| 514 |
+
# may be safe from overflow, depending on hypot's implementation...
|
| 515 |
+
h = math.hypot(z.real * 0.5, z.imag * 0.5)
|
| 516 |
+
real = z.real/4./h/h
|
| 517 |
+
imag = -math.copysign(PI_12, -z.imag)
|
| 518 |
+
elif z.real == 1. and ay < THRES_SMALL:
|
| 519 |
+
# C99 standard says: atanh(1+/-0.) should be inf +/- 0j
|
| 520 |
+
if ay == 0.:
|
| 521 |
+
real = INF
|
| 522 |
+
imag = z.imag
|
| 523 |
+
else:
|
| 524 |
+
real = -math.log(math.sqrt(ay) /
|
| 525 |
+
math.sqrt(math.hypot(ay, 2.)))
|
| 526 |
+
imag = math.copysign(math.atan2(2., -ay) / 2, z.imag)
|
| 527 |
+
else:
|
| 528 |
+
sqay = ay * ay
|
| 529 |
+
zr1 = 1 - z.real
|
| 530 |
+
real = math.log1p(4. * z.real / (zr1 * zr1 + sqay)) * 0.25
|
| 531 |
+
imag = -math.atan2(-2. * z.imag,
|
| 532 |
+
zr1 * (1 + z.real) - sqay) * 0.5
|
| 533 |
+
|
| 534 |
+
if math.isnan(z.imag):
|
| 535 |
+
imag = NAN
|
| 536 |
+
if negate:
|
| 537 |
+
return complex(-real, -imag)
|
| 538 |
+
else:
|
| 539 |
+
return complex(real, imag)
|
| 540 |
+
|
| 541 |
+
res = context.compile_internal(builder, atanh_impl, sig, args)
|
| 542 |
+
return impl_ret_untracked(context, builder, sig, res)
|
lib/python3.10/site-packages/numba/cpython/hashing.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from numba.core.utils import _RedirectSubpackage
|
| 3 |
+
from numba.core import config
|
| 4 |
+
|
| 5 |
+
if config.USE_LEGACY_TYPE_SYSTEM:
|
| 6 |
+
sys.modules[__name__] = _RedirectSubpackage(locals(),
|
| 7 |
+
"numba.cpython.old_hashing")
|
| 8 |
+
else:
|
| 9 |
+
sys.modules[__name__] = _RedirectSubpackage(locals(),
|
| 10 |
+
"numba.cpython.new_hashing")
|
lib/python3.10/site-packages/numba/cpython/listobj.py
ADDED
|
@@ -0,0 +1,1260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Support for native homogeneous lists.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import math
|
| 7 |
+
import operator
|
| 8 |
+
from functools import cached_property
|
| 9 |
+
|
| 10 |
+
from llvmlite import ir
|
| 11 |
+
from numba.core import types, typing, errors, cgutils, config
|
| 12 |
+
from numba.core.imputils import (lower_builtin, lower_cast,
|
| 13 |
+
iternext_impl, impl_ret_borrowed,
|
| 14 |
+
impl_ret_new_ref, impl_ret_untracked,
|
| 15 |
+
RefType)
|
| 16 |
+
from numba.core.extending import overload_method, overload
|
| 17 |
+
from numba.misc import quicksort
|
| 18 |
+
from numba.cpython import slicing
|
| 19 |
+
from numba import literal_unroll
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_list_payload(context, builder, list_type, value):
|
| 23 |
+
"""
|
| 24 |
+
Given a list value and type, get its payload structure (as a
|
| 25 |
+
reference, so that mutations are seen by all).
|
| 26 |
+
"""
|
| 27 |
+
payload_type = types.ListPayload(list_type)
|
| 28 |
+
payload = context.nrt.meminfo_data(builder, value.meminfo)
|
| 29 |
+
ptrty = context.get_data_type(payload_type).as_pointer()
|
| 30 |
+
payload = builder.bitcast(payload, ptrty)
|
| 31 |
+
return context.make_data_helper(builder, payload_type, ref=payload)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_itemsize(context, list_type):
|
| 35 |
+
"""
|
| 36 |
+
Return the item size for the given list type.
|
| 37 |
+
"""
|
| 38 |
+
llty = context.get_data_type(list_type.dtype)
|
| 39 |
+
return context.get_abi_sizeof(llty)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class _ListPayloadMixin(object):
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def size(self):
|
| 46 |
+
return self._payload.size
|
| 47 |
+
|
| 48 |
+
@size.setter
|
| 49 |
+
def size(self, value):
|
| 50 |
+
self._payload.size = value
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def dirty(self):
|
| 54 |
+
return self._payload.dirty
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def data(self):
|
| 58 |
+
return self._payload._get_ptr_by_name('data')
|
| 59 |
+
|
| 60 |
+
def _gep(self, idx):
|
| 61 |
+
return cgutils.gep(self._builder, self.data, idx)
|
| 62 |
+
|
| 63 |
+
def getitem(self, idx):
|
| 64 |
+
ptr = self._gep(idx)
|
| 65 |
+
data_item = self._builder.load(ptr)
|
| 66 |
+
return self._datamodel.from_data(self._builder, data_item)
|
| 67 |
+
|
| 68 |
+
def fix_index(self, idx):
|
| 69 |
+
"""
|
| 70 |
+
Fix negative indices by adding the size to them. Positive
|
| 71 |
+
indices are left untouched.
|
| 72 |
+
"""
|
| 73 |
+
is_negative = self._builder.icmp_signed('<', idx,
|
| 74 |
+
ir.Constant(idx.type, 0))
|
| 75 |
+
wrapped_index = self._builder.add(idx, self.size)
|
| 76 |
+
return self._builder.select(is_negative, wrapped_index, idx)
|
| 77 |
+
|
| 78 |
+
def is_out_of_bounds(self, idx):
|
| 79 |
+
"""
|
| 80 |
+
Return whether the index is out of bounds.
|
| 81 |
+
"""
|
| 82 |
+
underflow = self._builder.icmp_signed('<', idx,
|
| 83 |
+
ir.Constant(idx.type, 0))
|
| 84 |
+
overflow = self._builder.icmp_signed('>=', idx, self.size)
|
| 85 |
+
return self._builder.or_(underflow, overflow)
|
| 86 |
+
|
| 87 |
+
def clamp_index(self, idx):
|
| 88 |
+
"""
|
| 89 |
+
Clamp the index in [0, size].
|
| 90 |
+
"""
|
| 91 |
+
builder = self._builder
|
| 92 |
+
idxptr = cgutils.alloca_once_value(builder, idx)
|
| 93 |
+
|
| 94 |
+
zero = ir.Constant(idx.type, 0)
|
| 95 |
+
size = self.size
|
| 96 |
+
|
| 97 |
+
underflow = self._builder.icmp_signed('<', idx, zero)
|
| 98 |
+
with builder.if_then(underflow, likely=False):
|
| 99 |
+
builder.store(zero, idxptr)
|
| 100 |
+
overflow = self._builder.icmp_signed('>=', idx, size)
|
| 101 |
+
with builder.if_then(overflow, likely=False):
|
| 102 |
+
builder.store(size, idxptr)
|
| 103 |
+
|
| 104 |
+
return builder.load(idxptr)
|
| 105 |
+
|
| 106 |
+
def guard_index(self, idx, msg):
|
| 107 |
+
"""
|
| 108 |
+
Raise an error if the index is out of bounds.
|
| 109 |
+
"""
|
| 110 |
+
with self._builder.if_then(self.is_out_of_bounds(idx), likely=False):
|
| 111 |
+
self._context.call_conv.return_user_exc(self._builder,
|
| 112 |
+
IndexError, (msg,))
|
| 113 |
+
|
| 114 |
+
def fix_slice(self, slice):
|
| 115 |
+
"""
|
| 116 |
+
Fix slice start and stop to be valid (inclusive and exclusive, resp)
|
| 117 |
+
indexing bounds.
|
| 118 |
+
"""
|
| 119 |
+
return slicing.fix_slice(self._builder, slice, self.size)
|
| 120 |
+
|
| 121 |
+
def incref_value(self, val):
|
| 122 |
+
"Incref an element value"
|
| 123 |
+
self._context.nrt.incref(self._builder, self.dtype, val)
|
| 124 |
+
|
| 125 |
+
def decref_value(self, val):
|
| 126 |
+
"Decref an element value"
|
| 127 |
+
self._context.nrt.decref(self._builder, self.dtype, val)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class ListPayloadAccessor(_ListPayloadMixin):
|
| 131 |
+
"""
|
| 132 |
+
A helper object to access the list attributes given the pointer to the
|
| 133 |
+
payload type.
|
| 134 |
+
"""
|
| 135 |
+
def __init__(self, context, builder, list_type, payload_ptr):
|
| 136 |
+
self._context = context
|
| 137 |
+
self._builder = builder
|
| 138 |
+
self._ty = list_type
|
| 139 |
+
self._datamodel = context.data_model_manager[list_type.dtype]
|
| 140 |
+
payload_type = types.ListPayload(list_type)
|
| 141 |
+
ptrty = context.get_data_type(payload_type).as_pointer()
|
| 142 |
+
payload_ptr = builder.bitcast(payload_ptr, ptrty)
|
| 143 |
+
payload = context.make_data_helper(builder, payload_type,
|
| 144 |
+
ref=payload_ptr)
|
| 145 |
+
self._payload = payload
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class ListInstance(_ListPayloadMixin):
|
| 149 |
+
|
| 150 |
+
def __init__(self, context, builder, list_type, list_val):
|
| 151 |
+
self._context = context
|
| 152 |
+
self._builder = builder
|
| 153 |
+
self._ty = list_type
|
| 154 |
+
self._list = context.make_helper(builder, list_type, list_val)
|
| 155 |
+
self._itemsize = get_itemsize(context, list_type)
|
| 156 |
+
self._datamodel = context.data_model_manager[list_type.dtype]
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def dtype(self):
|
| 160 |
+
return self._ty.dtype
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def _payload(self):
|
| 164 |
+
# This cannot be cached as it can be reallocated
|
| 165 |
+
return get_list_payload(self._context, self._builder, self._ty, self._list)
|
| 166 |
+
|
| 167 |
+
@property
|
| 168 |
+
def parent(self):
|
| 169 |
+
return self._list.parent
|
| 170 |
+
|
| 171 |
+
@parent.setter
|
| 172 |
+
def parent(self, value):
|
| 173 |
+
self._list.parent = value
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def value(self):
|
| 177 |
+
return self._list._getvalue()
|
| 178 |
+
|
| 179 |
+
@property
|
| 180 |
+
def meminfo(self):
|
| 181 |
+
return self._list.meminfo
|
| 182 |
+
|
| 183 |
+
def set_dirty(self, val):
|
| 184 |
+
if self._ty.reflected:
|
| 185 |
+
self._payload.dirty = cgutils.true_bit if val else cgutils.false_bit
|
| 186 |
+
|
| 187 |
+
def clear_value(self, idx):
|
| 188 |
+
"""Remove the value at the location
|
| 189 |
+
"""
|
| 190 |
+
self.decref_value(self.getitem(idx))
|
| 191 |
+
# it's necessary for the dtor which just decref every slot on it.
|
| 192 |
+
self.zfill(idx, self._builder.add(idx, idx.type(1)))
|
| 193 |
+
|
| 194 |
+
def setitem(self, idx, val, incref, decref_old_value=True):
|
| 195 |
+
# Decref old data
|
| 196 |
+
if decref_old_value:
|
| 197 |
+
self.decref_value(self.getitem(idx))
|
| 198 |
+
|
| 199 |
+
ptr = self._gep(idx)
|
| 200 |
+
data_item = self._datamodel.as_data(self._builder, val)
|
| 201 |
+
self._builder.store(data_item, ptr)
|
| 202 |
+
self.set_dirty(True)
|
| 203 |
+
if incref:
|
| 204 |
+
# Incref the underlying data
|
| 205 |
+
self.incref_value(val)
|
| 206 |
+
|
| 207 |
+
def inititem(self, idx, val, incref=True):
|
| 208 |
+
ptr = self._gep(idx)
|
| 209 |
+
data_item = self._datamodel.as_data(self._builder, val)
|
| 210 |
+
self._builder.store(data_item, ptr)
|
| 211 |
+
if incref:
|
| 212 |
+
self.incref_value(val)
|
| 213 |
+
|
| 214 |
+
def zfill(self, start, stop):
|
| 215 |
+
"""Zero-fill the memory at index *start* to *stop*
|
| 216 |
+
|
| 217 |
+
*stop* MUST not be smaller than *start*.
|
| 218 |
+
"""
|
| 219 |
+
builder = self._builder
|
| 220 |
+
base = self._gep(start)
|
| 221 |
+
end = self._gep(stop)
|
| 222 |
+
intaddr_t = self._context.get_value_type(types.intp)
|
| 223 |
+
size = builder.sub(builder.ptrtoint(end, intaddr_t),
|
| 224 |
+
builder.ptrtoint(base, intaddr_t))
|
| 225 |
+
cgutils.memset(builder, base, size, ir.IntType(8)(0))
|
| 226 |
+
|
| 227 |
+
@classmethod
|
| 228 |
+
def allocate_ex(cls, context, builder, list_type, nitems):
|
| 229 |
+
"""
|
| 230 |
+
Allocate a ListInstance with its storage.
|
| 231 |
+
Return a (ok, instance) tuple where *ok* is a LLVM boolean and
|
| 232 |
+
*instance* is a ListInstance object (the object's contents are
|
| 233 |
+
only valid when *ok* is true).
|
| 234 |
+
"""
|
| 235 |
+
intp_t = context.get_value_type(types.intp)
|
| 236 |
+
|
| 237 |
+
if isinstance(nitems, int):
|
| 238 |
+
nitems = ir.Constant(intp_t, nitems)
|
| 239 |
+
|
| 240 |
+
payload_type = context.get_data_type(types.ListPayload(list_type))
|
| 241 |
+
payload_size = context.get_abi_sizeof(payload_type)
|
| 242 |
+
|
| 243 |
+
itemsize = get_itemsize(context, list_type)
|
| 244 |
+
# Account for the fact that the payload struct contains one entry
|
| 245 |
+
payload_size -= itemsize
|
| 246 |
+
|
| 247 |
+
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 248 |
+
self = cls(context, builder, list_type, None)
|
| 249 |
+
|
| 250 |
+
# Total allocation size = <payload header size> + nitems * itemsize
|
| 251 |
+
allocsize, ovf = cgutils.muladd_with_overflow(builder, nitems,
|
| 252 |
+
ir.Constant(intp_t, itemsize),
|
| 253 |
+
ir.Constant(intp_t, payload_size))
|
| 254 |
+
with builder.if_then(ovf, likely=False):
|
| 255 |
+
builder.store(cgutils.false_bit, ok)
|
| 256 |
+
|
| 257 |
+
with builder.if_then(builder.load(ok), likely=True):
|
| 258 |
+
meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked(
|
| 259 |
+
builder, size=allocsize, dtor=self.get_dtor())
|
| 260 |
+
with builder.if_else(cgutils.is_null(builder, meminfo),
|
| 261 |
+
likely=False) as (if_error, if_ok):
|
| 262 |
+
with if_error:
|
| 263 |
+
builder.store(cgutils.false_bit, ok)
|
| 264 |
+
with if_ok:
|
| 265 |
+
self._list.meminfo = meminfo
|
| 266 |
+
self._list.parent = context.get_constant_null(types.pyobject)
|
| 267 |
+
self._payload.allocated = nitems
|
| 268 |
+
self._payload.size = ir.Constant(intp_t, 0) # for safety
|
| 269 |
+
self._payload.dirty = cgutils.false_bit
|
| 270 |
+
# Zero the allocated region
|
| 271 |
+
self.zfill(self.size.type(0), nitems)
|
| 272 |
+
|
| 273 |
+
return builder.load(ok), self
|
| 274 |
+
|
| 275 |
+
def define_dtor(self):
|
| 276 |
+
"Define the destructor if not already defined"
|
| 277 |
+
context = self._context
|
| 278 |
+
builder = self._builder
|
| 279 |
+
mod = builder.module
|
| 280 |
+
# Declare dtor
|
| 281 |
+
fnty = ir.FunctionType(ir.VoidType(), [cgutils.voidptr_t])
|
| 282 |
+
fn = cgutils.get_or_insert_function(mod, fnty,
|
| 283 |
+
'.dtor.list.{}'.format(self.dtype))
|
| 284 |
+
if not fn.is_declaration:
|
| 285 |
+
# End early if the dtor is already defined
|
| 286 |
+
return fn
|
| 287 |
+
fn.linkage = 'linkonce_odr'
|
| 288 |
+
# Populate the dtor
|
| 289 |
+
builder = ir.IRBuilder(fn.append_basic_block())
|
| 290 |
+
base_ptr = fn.args[0] # void*
|
| 291 |
+
|
| 292 |
+
# get payload
|
| 293 |
+
payload = ListPayloadAccessor(context, builder, self._ty, base_ptr)
|
| 294 |
+
|
| 295 |
+
# Loop over all data to decref
|
| 296 |
+
intp = payload.size.type
|
| 297 |
+
with cgutils.for_range_slice(
|
| 298 |
+
builder, start=intp(0), stop=payload.size, step=intp(1),
|
| 299 |
+
intp=intp) as (idx, _):
|
| 300 |
+
val = payload.getitem(idx)
|
| 301 |
+
context.nrt.decref(builder, self.dtype, val)
|
| 302 |
+
builder.ret_void()
|
| 303 |
+
return fn
|
| 304 |
+
|
| 305 |
+
def get_dtor(self):
|
| 306 |
+
""""Get the element dtor function pointer as void pointer.
|
| 307 |
+
|
| 308 |
+
It's safe to be called multiple times.
|
| 309 |
+
"""
|
| 310 |
+
# Define and set the Dtor
|
| 311 |
+
dtor = self.define_dtor()
|
| 312 |
+
dtor_fnptr = self._builder.bitcast(dtor, cgutils.voidptr_t)
|
| 313 |
+
return dtor_fnptr
|
| 314 |
+
|
| 315 |
+
@classmethod
|
| 316 |
+
def allocate(cls, context, builder, list_type, nitems):
|
| 317 |
+
"""
|
| 318 |
+
Allocate a ListInstance with its storage. Same as allocate_ex(),
|
| 319 |
+
but return an initialized *instance*. If allocation failed,
|
| 320 |
+
control is transferred to the caller using the target's current
|
| 321 |
+
call convention.
|
| 322 |
+
"""
|
| 323 |
+
ok, self = cls.allocate_ex(context, builder, list_type, nitems)
|
| 324 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 325 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 326 |
+
("cannot allocate list",))
|
| 327 |
+
return self
|
| 328 |
+
|
| 329 |
+
@classmethod
|
| 330 |
+
def from_meminfo(cls, context, builder, list_type, meminfo):
|
| 331 |
+
"""
|
| 332 |
+
Allocate a new list instance pointing to an existing payload
|
| 333 |
+
(a meminfo pointer).
|
| 334 |
+
Note the parent field has to be filled by the caller.
|
| 335 |
+
"""
|
| 336 |
+
self = cls(context, builder, list_type, None)
|
| 337 |
+
self._list.meminfo = meminfo
|
| 338 |
+
self._list.parent = context.get_constant_null(types.pyobject)
|
| 339 |
+
context.nrt.incref(builder, list_type, self.value)
|
| 340 |
+
# Payload is part of the meminfo, no need to touch it
|
| 341 |
+
return self
|
| 342 |
+
|
| 343 |
+
def resize(self, new_size):
|
| 344 |
+
"""
|
| 345 |
+
Ensure the list is properly sized for the new size.
|
| 346 |
+
"""
|
| 347 |
+
def _payload_realloc(new_allocated):
|
| 348 |
+
payload_type = context.get_data_type(types.ListPayload(self._ty))
|
| 349 |
+
payload_size = context.get_abi_sizeof(payload_type)
|
| 350 |
+
# Account for the fact that the payload struct contains one entry
|
| 351 |
+
payload_size -= itemsize
|
| 352 |
+
|
| 353 |
+
allocsize, ovf = cgutils.muladd_with_overflow(
|
| 354 |
+
builder, new_allocated,
|
| 355 |
+
ir.Constant(intp_t, itemsize),
|
| 356 |
+
ir.Constant(intp_t, payload_size))
|
| 357 |
+
with builder.if_then(ovf, likely=False):
|
| 358 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 359 |
+
("cannot resize list",))
|
| 360 |
+
|
| 361 |
+
ptr = context.nrt.meminfo_varsize_realloc_unchecked(builder,
|
| 362 |
+
self._list.meminfo,
|
| 363 |
+
size=allocsize)
|
| 364 |
+
cgutils.guard_memory_error(context, builder, ptr,
|
| 365 |
+
"cannot resize list")
|
| 366 |
+
self._payload.allocated = new_allocated
|
| 367 |
+
|
| 368 |
+
context = self._context
|
| 369 |
+
builder = self._builder
|
| 370 |
+
intp_t = new_size.type
|
| 371 |
+
|
| 372 |
+
itemsize = get_itemsize(context, self._ty)
|
| 373 |
+
allocated = self._payload.allocated
|
| 374 |
+
|
| 375 |
+
two = ir.Constant(intp_t, 2)
|
| 376 |
+
eight = ir.Constant(intp_t, 8)
|
| 377 |
+
|
| 378 |
+
# allocated < new_size
|
| 379 |
+
is_too_small = builder.icmp_signed('<', allocated, new_size)
|
| 380 |
+
# (allocated >> 2) > new_size
|
| 381 |
+
is_too_large = builder.icmp_signed('>', builder.ashr(allocated, two), new_size)
|
| 382 |
+
|
| 383 |
+
with builder.if_then(is_too_large, likely=False):
|
| 384 |
+
# Exact downsize to requested size
|
| 385 |
+
# NOTE: is_too_large must be aggressive enough to avoid repeated
|
| 386 |
+
# upsizes and downsizes when growing a list.
|
| 387 |
+
_payload_realloc(new_size)
|
| 388 |
+
|
| 389 |
+
with builder.if_then(is_too_small, likely=False):
|
| 390 |
+
# Upsize with moderate over-allocation (size + size >> 2 + 8)
|
| 391 |
+
new_allocated = builder.add(eight,
|
| 392 |
+
builder.add(new_size,
|
| 393 |
+
builder.ashr(new_size, two)))
|
| 394 |
+
_payload_realloc(new_allocated)
|
| 395 |
+
self.zfill(self.size, new_allocated)
|
| 396 |
+
|
| 397 |
+
self._payload.size = new_size
|
| 398 |
+
self.set_dirty(True)
|
| 399 |
+
|
| 400 |
+
def move(self, dest_idx, src_idx, count):
|
| 401 |
+
"""
|
| 402 |
+
Move `count` elements from `src_idx` to `dest_idx`.
|
| 403 |
+
"""
|
| 404 |
+
dest_ptr = self._gep(dest_idx)
|
| 405 |
+
src_ptr = self._gep(src_idx)
|
| 406 |
+
cgutils.raw_memmove(self._builder, dest_ptr, src_ptr,
|
| 407 |
+
count, itemsize=self._itemsize)
|
| 408 |
+
|
| 409 |
+
self.set_dirty(True)
|
| 410 |
+
|
| 411 |
+
class ListIterInstance(_ListPayloadMixin):
|
| 412 |
+
|
| 413 |
+
def __init__(self, context, builder, iter_type, iter_val):
|
| 414 |
+
self._context = context
|
| 415 |
+
self._builder = builder
|
| 416 |
+
self._ty = iter_type
|
| 417 |
+
self._iter = context.make_helper(builder, iter_type, iter_val)
|
| 418 |
+
self._datamodel = context.data_model_manager[iter_type.yield_type]
|
| 419 |
+
|
| 420 |
+
@classmethod
|
| 421 |
+
def from_list(cls, context, builder, iter_type, list_val):
|
| 422 |
+
list_inst = ListInstance(context, builder, iter_type.container, list_val)
|
| 423 |
+
self = cls(context, builder, iter_type, None)
|
| 424 |
+
index = context.get_constant(types.intp, 0)
|
| 425 |
+
self._iter.index = cgutils.alloca_once_value(builder, index)
|
| 426 |
+
self._iter.meminfo = list_inst.meminfo
|
| 427 |
+
return self
|
| 428 |
+
|
| 429 |
+
@property
|
| 430 |
+
def _payload(self):
|
| 431 |
+
# This cannot be cached as it can be reallocated
|
| 432 |
+
return get_list_payload(self._context, self._builder,
|
| 433 |
+
self._ty.container, self._iter)
|
| 434 |
+
|
| 435 |
+
@property
|
| 436 |
+
def value(self):
|
| 437 |
+
return self._iter._getvalue()
|
| 438 |
+
|
| 439 |
+
@property
|
| 440 |
+
def index(self):
|
| 441 |
+
return self._builder.load(self._iter.index)
|
| 442 |
+
|
| 443 |
+
@index.setter
|
| 444 |
+
def index(self, value):
|
| 445 |
+
self._builder.store(value, self._iter.index)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
#-------------------------------------------------------------------------------
|
| 449 |
+
# Constructors
|
| 450 |
+
|
| 451 |
+
def build_list(context, builder, list_type, items):
|
| 452 |
+
"""
|
| 453 |
+
Build a list of the given type, containing the given items.
|
| 454 |
+
"""
|
| 455 |
+
nitems = len(items)
|
| 456 |
+
inst = ListInstance.allocate(context, builder, list_type, nitems)
|
| 457 |
+
# Populate list
|
| 458 |
+
inst.size = context.get_constant(types.intp, nitems)
|
| 459 |
+
for i, val in enumerate(items):
|
| 460 |
+
inst.setitem(context.get_constant(types.intp, i), val, incref=True)
|
| 461 |
+
|
| 462 |
+
return impl_ret_new_ref(context, builder, list_type, inst.value)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
@lower_builtin(list, types.IterableType)
|
| 466 |
+
def list_constructor(context, builder, sig, args):
|
| 467 |
+
|
| 468 |
+
def list_impl(iterable):
|
| 469 |
+
res = []
|
| 470 |
+
res.extend(iterable)
|
| 471 |
+
return res
|
| 472 |
+
|
| 473 |
+
return context.compile_internal(builder, list_impl, sig, args)
|
| 474 |
+
|
| 475 |
+
@lower_builtin(list)
|
| 476 |
+
def list_constructor(context, builder, sig, args):
|
| 477 |
+
list_type = sig.return_type
|
| 478 |
+
list_len = 0
|
| 479 |
+
inst = ListInstance.allocate(context, builder, list_type, list_len)
|
| 480 |
+
return impl_ret_new_ref(context, builder, list_type, inst.value)
|
| 481 |
+
|
| 482 |
+
#-------------------------------------------------------------------------------
|
| 483 |
+
# Various operations
|
| 484 |
+
|
| 485 |
+
@lower_builtin(len, types.List)
|
| 486 |
+
def list_len(context, builder, sig, args):
|
| 487 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 488 |
+
return inst.size
|
| 489 |
+
|
| 490 |
+
@lower_builtin('getiter', types.List)
|
| 491 |
+
def getiter_list(context, builder, sig, args):
|
| 492 |
+
inst = ListIterInstance.from_list(context, builder, sig.return_type, args[0])
|
| 493 |
+
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
|
| 494 |
+
|
| 495 |
+
@lower_builtin('iternext', types.ListIter)
|
| 496 |
+
@iternext_impl(RefType.BORROWED)
|
| 497 |
+
def iternext_listiter(context, builder, sig, args, result):
|
| 498 |
+
inst = ListIterInstance(context, builder, sig.args[0], args[0])
|
| 499 |
+
|
| 500 |
+
index = inst.index
|
| 501 |
+
nitems = inst.size
|
| 502 |
+
is_valid = builder.icmp_signed('<', index, nitems)
|
| 503 |
+
result.set_valid(is_valid)
|
| 504 |
+
|
| 505 |
+
with builder.if_then(is_valid):
|
| 506 |
+
result.yield_(inst.getitem(index))
|
| 507 |
+
inst.index = builder.add(index, context.get_constant(types.intp, 1))
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
@lower_builtin(operator.getitem, types.List, types.Integer)
|
| 511 |
+
def getitem_list(context, builder, sig, args):
|
| 512 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 513 |
+
index = args[1]
|
| 514 |
+
|
| 515 |
+
index = inst.fix_index(index)
|
| 516 |
+
inst.guard_index(index, msg="getitem out of range")
|
| 517 |
+
result = inst.getitem(index)
|
| 518 |
+
|
| 519 |
+
return impl_ret_borrowed(context, builder, sig.return_type, result)
|
| 520 |
+
|
| 521 |
+
@lower_builtin(operator.setitem, types.List, types.Integer, types.Any)
|
| 522 |
+
def setitem_list(context, builder, sig, args):
|
| 523 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 524 |
+
index = args[1]
|
| 525 |
+
value = args[2]
|
| 526 |
+
|
| 527 |
+
index = inst.fix_index(index)
|
| 528 |
+
inst.guard_index(index, msg="setitem out of range")
|
| 529 |
+
inst.setitem(index, value, incref=True)
|
| 530 |
+
return context.get_dummy_value()
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
@lower_builtin(operator.getitem, types.List, types.SliceType)
|
| 534 |
+
def getslice_list(context, builder, sig, args):
|
| 535 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 536 |
+
slice = context.make_helper(builder, sig.args[1], args[1])
|
| 537 |
+
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
|
| 538 |
+
inst.fix_slice(slice)
|
| 539 |
+
|
| 540 |
+
# Allocate result and populate it
|
| 541 |
+
result_size = slicing.get_slice_length(builder, slice)
|
| 542 |
+
result = ListInstance.allocate(context, builder, sig.return_type,
|
| 543 |
+
result_size)
|
| 544 |
+
result.size = result_size
|
| 545 |
+
with cgutils.for_range_slice_generic(builder, slice.start, slice.stop,
|
| 546 |
+
slice.step) as (pos_range, neg_range):
|
| 547 |
+
with pos_range as (idx, count):
|
| 548 |
+
value = inst.getitem(idx)
|
| 549 |
+
result.inititem(count, value, incref=True)
|
| 550 |
+
with neg_range as (idx, count):
|
| 551 |
+
value = inst.getitem(idx)
|
| 552 |
+
result.inititem(count, value, incref=True)
|
| 553 |
+
|
| 554 |
+
return impl_ret_new_ref(context, builder, sig.return_type, result.value)
|
| 555 |
+
|
| 556 |
+
@lower_builtin(operator.setitem, types.List, types.SliceType, types.Any)
|
| 557 |
+
def setitem_list(context, builder, sig, args):
|
| 558 |
+
dest = ListInstance(context, builder, sig.args[0], args[0])
|
| 559 |
+
src = ListInstance(context, builder, sig.args[2], args[2])
|
| 560 |
+
|
| 561 |
+
slice = context.make_helper(builder, sig.args[1], args[1])
|
| 562 |
+
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
|
| 563 |
+
dest.fix_slice(slice)
|
| 564 |
+
|
| 565 |
+
src_size = src.size
|
| 566 |
+
avail_size = slicing.get_slice_length(builder, slice)
|
| 567 |
+
size_delta = builder.sub(src.size, avail_size)
|
| 568 |
+
|
| 569 |
+
zero = ir.Constant(size_delta.type, 0)
|
| 570 |
+
one = ir.Constant(size_delta.type, 1)
|
| 571 |
+
|
| 572 |
+
with builder.if_else(builder.icmp_signed('==', slice.step, one)) as (then, otherwise):
|
| 573 |
+
with then:
|
| 574 |
+
# Slice step == 1 => we can resize
|
| 575 |
+
|
| 576 |
+
# Compute the real stop, e.g. for dest[2:0] = [...]
|
| 577 |
+
real_stop = builder.add(slice.start, avail_size)
|
| 578 |
+
# Size of the list tail, after the end of slice
|
| 579 |
+
tail_size = builder.sub(dest.size, real_stop)
|
| 580 |
+
|
| 581 |
+
with builder.if_then(builder.icmp_signed('>', size_delta, zero)):
|
| 582 |
+
# Grow list then move list tail
|
| 583 |
+
dest.resize(builder.add(dest.size, size_delta))
|
| 584 |
+
dest.move(builder.add(real_stop, size_delta), real_stop,
|
| 585 |
+
tail_size)
|
| 586 |
+
|
| 587 |
+
with builder.if_then(builder.icmp_signed('<', size_delta, zero)):
|
| 588 |
+
# Move list tail then shrink list
|
| 589 |
+
dest.move(builder.add(real_stop, size_delta), real_stop,
|
| 590 |
+
tail_size)
|
| 591 |
+
dest.resize(builder.add(dest.size, size_delta))
|
| 592 |
+
|
| 593 |
+
dest_offset = slice.start
|
| 594 |
+
|
| 595 |
+
with cgutils.for_range(builder, src_size) as loop:
|
| 596 |
+
value = src.getitem(loop.index)
|
| 597 |
+
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
|
| 598 |
+
|
| 599 |
+
with otherwise:
|
| 600 |
+
with builder.if_then(builder.icmp_signed('!=', size_delta, zero)):
|
| 601 |
+
msg = "cannot resize extended list slice with step != 1"
|
| 602 |
+
context.call_conv.return_user_exc(builder, ValueError, (msg,))
|
| 603 |
+
|
| 604 |
+
with cgutils.for_range_slice_generic(
|
| 605 |
+
builder, slice.start, slice.stop, slice.step) as (pos_range, neg_range):
|
| 606 |
+
with pos_range as (index, count):
|
| 607 |
+
value = src.getitem(count)
|
| 608 |
+
dest.setitem(index, value, incref=True)
|
| 609 |
+
with neg_range as (index, count):
|
| 610 |
+
value = src.getitem(count)
|
| 611 |
+
dest.setitem(index, value, incref=True)
|
| 612 |
+
|
| 613 |
+
return context.get_dummy_value()
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
@lower_builtin(operator.delitem, types.List, types.Integer)
|
| 618 |
+
def delitem_list_index(context, builder, sig, args):
|
| 619 |
+
|
| 620 |
+
def list_delitem_impl(lst, i):
|
| 621 |
+
lst.pop(i)
|
| 622 |
+
|
| 623 |
+
return context.compile_internal(builder, list_delitem_impl, sig, args)
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
@lower_builtin(operator.delitem, types.List, types.SliceType)
|
| 627 |
+
def delitem_list(context, builder, sig, args):
|
| 628 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 629 |
+
slice = context.make_helper(builder, sig.args[1], args[1])
|
| 630 |
+
|
| 631 |
+
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
|
| 632 |
+
inst.fix_slice(slice)
|
| 633 |
+
|
| 634 |
+
slice_len = slicing.get_slice_length(builder, slice)
|
| 635 |
+
|
| 636 |
+
one = ir.Constant(slice_len.type, 1)
|
| 637 |
+
|
| 638 |
+
with builder.if_then(builder.icmp_signed('!=', slice.step, one), likely=False):
|
| 639 |
+
msg = "unsupported del list[start:stop:step] with step != 1"
|
| 640 |
+
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
|
| 641 |
+
|
| 642 |
+
# Compute the real stop, e.g. for dest[2:0]
|
| 643 |
+
start = slice.start
|
| 644 |
+
real_stop = builder.add(start, slice_len)
|
| 645 |
+
# Decref the removed range
|
| 646 |
+
with cgutils.for_range_slice(
|
| 647 |
+
builder, start, real_stop, start.type(1)
|
| 648 |
+
) as (idx, _):
|
| 649 |
+
inst.decref_value(inst.getitem(idx))
|
| 650 |
+
|
| 651 |
+
# Size of the list tail, after the end of slice
|
| 652 |
+
tail_size = builder.sub(inst.size, real_stop)
|
| 653 |
+
inst.move(start, real_stop, tail_size)
|
| 654 |
+
inst.resize(builder.sub(inst.size, slice_len))
|
| 655 |
+
|
| 656 |
+
return context.get_dummy_value()
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
# XXX should there be a specific module for Sequence or collection base classes?
|
| 660 |
+
|
| 661 |
+
@lower_builtin(operator.contains, types.Sequence, types.Any)
|
| 662 |
+
def in_seq(context, builder, sig, args):
|
| 663 |
+
def seq_contains_impl(lst, value):
|
| 664 |
+
for elem in lst:
|
| 665 |
+
if elem == value:
|
| 666 |
+
return True
|
| 667 |
+
return False
|
| 668 |
+
|
| 669 |
+
return context.compile_internal(builder, seq_contains_impl, sig, args)
|
| 670 |
+
|
| 671 |
+
@lower_builtin(bool, types.Sequence)
|
| 672 |
+
def sequence_bool(context, builder, sig, args):
|
| 673 |
+
def sequence_bool_impl(seq):
|
| 674 |
+
return len(seq) != 0
|
| 675 |
+
|
| 676 |
+
return context.compile_internal(builder, sequence_bool_impl, sig, args)
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
@overload(operator.truth)
|
| 680 |
+
def sequence_truth(seq):
|
| 681 |
+
if isinstance(seq, types.Sequence):
|
| 682 |
+
def impl(seq):
|
| 683 |
+
return len(seq) != 0
|
| 684 |
+
return impl
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
@lower_builtin(operator.add, types.List, types.List)
|
| 688 |
+
def list_add(context, builder, sig, args):
|
| 689 |
+
a = ListInstance(context, builder, sig.args[0], args[0])
|
| 690 |
+
b = ListInstance(context, builder, sig.args[1], args[1])
|
| 691 |
+
|
| 692 |
+
a_size = a.size
|
| 693 |
+
b_size = b.size
|
| 694 |
+
nitems = builder.add(a_size, b_size)
|
| 695 |
+
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
|
| 696 |
+
dest.size = nitems
|
| 697 |
+
|
| 698 |
+
with cgutils.for_range(builder, a_size) as loop:
|
| 699 |
+
value = a.getitem(loop.index)
|
| 700 |
+
value = context.cast(builder, value, a.dtype, dest.dtype)
|
| 701 |
+
dest.setitem(loop.index, value, incref=True)
|
| 702 |
+
with cgutils.for_range(builder, b_size) as loop:
|
| 703 |
+
value = b.getitem(loop.index)
|
| 704 |
+
value = context.cast(builder, value, b.dtype, dest.dtype)
|
| 705 |
+
dest.setitem(builder.add(loop.index, a_size), value, incref=True)
|
| 706 |
+
|
| 707 |
+
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
|
| 708 |
+
|
| 709 |
+
@lower_builtin(operator.iadd, types.List, types.List)
|
| 710 |
+
def list_add_inplace(context, builder, sig, args):
|
| 711 |
+
assert sig.args[0].dtype == sig.return_type.dtype
|
| 712 |
+
dest = _list_extend_list(context, builder, sig, args)
|
| 713 |
+
|
| 714 |
+
return impl_ret_borrowed(context, builder, sig.return_type, dest.value)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@lower_builtin(operator.mul, types.List, types.Integer)
|
| 718 |
+
@lower_builtin(operator.mul, types.Integer, types.List)
|
| 719 |
+
def list_mul(context, builder, sig, args):
|
| 720 |
+
if isinstance(sig.args[0], types.List):
|
| 721 |
+
list_idx, int_idx = 0, 1
|
| 722 |
+
else:
|
| 723 |
+
list_idx, int_idx = 1, 0
|
| 724 |
+
src = ListInstance(context, builder, sig.args[list_idx], args[list_idx])
|
| 725 |
+
src_size = src.size
|
| 726 |
+
|
| 727 |
+
mult = args[int_idx]
|
| 728 |
+
zero = ir.Constant(mult.type, 0)
|
| 729 |
+
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
|
| 730 |
+
nitems = builder.mul(mult, src_size)
|
| 731 |
+
|
| 732 |
+
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
|
| 733 |
+
dest.size = nitems
|
| 734 |
+
|
| 735 |
+
with cgutils.for_range_slice(builder, zero, nitems, src_size, inc=True) as (dest_offset, _):
|
| 736 |
+
with cgutils.for_range(builder, src_size) as loop:
|
| 737 |
+
value = src.getitem(loop.index)
|
| 738 |
+
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
|
| 739 |
+
|
| 740 |
+
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
|
| 741 |
+
|
| 742 |
+
@lower_builtin(operator.imul, types.List, types.Integer)
|
| 743 |
+
def list_mul_inplace(context, builder, sig, args):
|
| 744 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 745 |
+
src_size = inst.size
|
| 746 |
+
|
| 747 |
+
mult = args[1]
|
| 748 |
+
zero = ir.Constant(mult.type, 0)
|
| 749 |
+
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
|
| 750 |
+
nitems = builder.mul(mult, src_size)
|
| 751 |
+
|
| 752 |
+
inst.resize(nitems)
|
| 753 |
+
|
| 754 |
+
with cgutils.for_range_slice(builder, src_size, nitems, src_size, inc=True) as (dest_offset, _):
|
| 755 |
+
with cgutils.for_range(builder, src_size) as loop:
|
| 756 |
+
value = inst.getitem(loop.index)
|
| 757 |
+
inst.setitem(builder.add(loop.index, dest_offset), value, incref=True)
|
| 758 |
+
|
| 759 |
+
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
#-------------------------------------------------------------------------------
|
| 763 |
+
# Comparisons
|
| 764 |
+
|
| 765 |
+
@lower_builtin(operator.is_, types.List, types.List)
|
| 766 |
+
def list_is(context, builder, sig, args):
|
| 767 |
+
a = ListInstance(context, builder, sig.args[0], args[0])
|
| 768 |
+
b = ListInstance(context, builder, sig.args[1], args[1])
|
| 769 |
+
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
|
| 770 |
+
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
|
| 771 |
+
return builder.icmp_signed('==', ma, mb)
|
| 772 |
+
|
| 773 |
+
@lower_builtin(operator.eq, types.List, types.List)
|
| 774 |
+
def list_eq(context, builder, sig, args):
|
| 775 |
+
aty, bty = sig.args
|
| 776 |
+
a = ListInstance(context, builder, aty, args[0])
|
| 777 |
+
b = ListInstance(context, builder, bty, args[1])
|
| 778 |
+
|
| 779 |
+
a_size = a.size
|
| 780 |
+
same_size = builder.icmp_signed('==', a_size, b.size)
|
| 781 |
+
|
| 782 |
+
res = cgutils.alloca_once_value(builder, same_size)
|
| 783 |
+
|
| 784 |
+
with builder.if_then(same_size):
|
| 785 |
+
with cgutils.for_range(builder, a_size) as loop:
|
| 786 |
+
v = a.getitem(loop.index)
|
| 787 |
+
w = b.getitem(loop.index)
|
| 788 |
+
itemres = context.generic_compare(builder, operator.eq,
|
| 789 |
+
(aty.dtype, bty.dtype), (v, w))
|
| 790 |
+
with builder.if_then(builder.not_(itemres)):
|
| 791 |
+
# Exit early
|
| 792 |
+
builder.store(cgutils.false_bit, res)
|
| 793 |
+
loop.do_break()
|
| 794 |
+
|
| 795 |
+
return builder.load(res)
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def all_list(*args):
|
| 799 |
+
return all([isinstance(typ, types.List) for typ in args])
|
| 800 |
+
|
| 801 |
+
@overload(operator.ne)
|
| 802 |
+
def impl_list_ne(a, b):
|
| 803 |
+
if not all_list(a, b):
|
| 804 |
+
return
|
| 805 |
+
|
| 806 |
+
def list_ne_impl(a, b):
|
| 807 |
+
return not (a == b)
|
| 808 |
+
|
| 809 |
+
return list_ne_impl
|
| 810 |
+
|
| 811 |
+
@overload(operator.le)
|
| 812 |
+
def impl_list_le(a, b):
|
| 813 |
+
if not all_list(a, b):
|
| 814 |
+
return
|
| 815 |
+
|
| 816 |
+
def list_le_impl(a, b):
|
| 817 |
+
m = len(a)
|
| 818 |
+
n = len(b)
|
| 819 |
+
for i in range(min(m, n)):
|
| 820 |
+
if a[i] < b[i]:
|
| 821 |
+
return True
|
| 822 |
+
elif a[i] > b[i]:
|
| 823 |
+
return False
|
| 824 |
+
return m <= n
|
| 825 |
+
|
| 826 |
+
return list_le_impl
|
| 827 |
+
|
| 828 |
+
@overload(operator.lt)
|
| 829 |
+
def impl_list_lt(a, b):
|
| 830 |
+
if not all_list(a, b):
|
| 831 |
+
return
|
| 832 |
+
|
| 833 |
+
def list_lt_impl(a, b):
|
| 834 |
+
m = len(a)
|
| 835 |
+
n = len(b)
|
| 836 |
+
for i in range(min(m, n)):
|
| 837 |
+
if a[i] < b[i]:
|
| 838 |
+
return True
|
| 839 |
+
elif a[i] > b[i]:
|
| 840 |
+
return False
|
| 841 |
+
return m < n
|
| 842 |
+
|
| 843 |
+
return list_lt_impl
|
| 844 |
+
|
| 845 |
+
@overload(operator.ge)
|
| 846 |
+
def impl_list_ge(a, b):
|
| 847 |
+
if not all_list(a, b):
|
| 848 |
+
return
|
| 849 |
+
|
| 850 |
+
def list_ge_impl(a, b):
|
| 851 |
+
return b <= a
|
| 852 |
+
|
| 853 |
+
return list_ge_impl
|
| 854 |
+
|
| 855 |
+
@overload(operator.gt)
|
| 856 |
+
def impl_list_gt(a, b):
|
| 857 |
+
if not all_list(a, b):
|
| 858 |
+
return
|
| 859 |
+
|
| 860 |
+
def list_gt_impl(a, b):
|
| 861 |
+
return b < a
|
| 862 |
+
|
| 863 |
+
return list_gt_impl
|
| 864 |
+
|
| 865 |
+
#-------------------------------------------------------------------------------
|
| 866 |
+
# Methods
|
| 867 |
+
|
| 868 |
+
@lower_builtin("list.append", types.List, types.Any)
|
| 869 |
+
def list_append(context, builder, sig, args):
|
| 870 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 871 |
+
item = args[1]
|
| 872 |
+
|
| 873 |
+
n = inst.size
|
| 874 |
+
new_size = builder.add(n, ir.Constant(n.type, 1))
|
| 875 |
+
inst.resize(new_size)
|
| 876 |
+
inst.setitem(n, item, incref=True)
|
| 877 |
+
|
| 878 |
+
return context.get_dummy_value()
|
| 879 |
+
|
| 880 |
+
@lower_builtin("list.clear", types.List)
|
| 881 |
+
def list_clear(context, builder, sig, args):
|
| 882 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 883 |
+
inst.resize(context.get_constant(types.intp, 0))
|
| 884 |
+
|
| 885 |
+
return context.get_dummy_value()
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
@overload_method(types.List, "copy")
|
| 889 |
+
def list_copy(lst):
|
| 890 |
+
def list_copy_impl(lst):
|
| 891 |
+
return list(lst)
|
| 892 |
+
|
| 893 |
+
return list_copy_impl
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
@overload_method(types.List, "count")
|
| 897 |
+
def list_count(lst, value):
|
| 898 |
+
|
| 899 |
+
def list_count_impl(lst, value):
|
| 900 |
+
res = 0
|
| 901 |
+
for elem in lst:
|
| 902 |
+
if elem == value:
|
| 903 |
+
res += 1
|
| 904 |
+
return res
|
| 905 |
+
|
| 906 |
+
return list_count_impl
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
def _list_extend_list(context, builder, sig, args):
|
| 910 |
+
src = ListInstance(context, builder, sig.args[1], args[1])
|
| 911 |
+
dest = ListInstance(context, builder, sig.args[0], args[0])
|
| 912 |
+
|
| 913 |
+
src_size = src.size
|
| 914 |
+
dest_size = dest.size
|
| 915 |
+
nitems = builder.add(src_size, dest_size)
|
| 916 |
+
dest.resize(nitems)
|
| 917 |
+
dest.size = nitems
|
| 918 |
+
|
| 919 |
+
with cgutils.for_range(builder, src_size) as loop:
|
| 920 |
+
value = src.getitem(loop.index)
|
| 921 |
+
value = context.cast(builder, value, src.dtype, dest.dtype)
|
| 922 |
+
dest.setitem(builder.add(loop.index, dest_size), value, incref=True)
|
| 923 |
+
|
| 924 |
+
return dest
|
| 925 |
+
|
| 926 |
+
@lower_builtin("list.extend", types.List, types.IterableType)
|
| 927 |
+
def list_extend(context, builder, sig, args):
|
| 928 |
+
if isinstance(sig.args[1], types.List):
|
| 929 |
+
# Specialize for list operands, for speed.
|
| 930 |
+
_list_extend_list(context, builder, sig, args)
|
| 931 |
+
return context.get_dummy_value()
|
| 932 |
+
|
| 933 |
+
def list_extend(lst, iterable):
|
| 934 |
+
# Speed hack to avoid NRT refcount operations inside the loop
|
| 935 |
+
meth = lst.append
|
| 936 |
+
for v in iterable:
|
| 937 |
+
meth(v)
|
| 938 |
+
|
| 939 |
+
return context.compile_internal(builder, list_extend, sig, args)
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
if config.USE_LEGACY_TYPE_SYSTEM:
|
| 943 |
+
intp_max = types.intp.maxval
|
| 944 |
+
else:
|
| 945 |
+
intp_max = types.py_int.maxval
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
@overload_method(types.List, "index")
|
| 949 |
+
def list_index(lst, value, start=0, stop=intp_max):
|
| 950 |
+
|
| 951 |
+
if not isinstance(start, (int, types.Integer, types.Omitted)):
|
| 952 |
+
raise errors.TypingError(f'arg "start" must be an Integer. Got {start}')
|
| 953 |
+
if not isinstance(stop, (int, types.Integer, types.Omitted)):
|
| 954 |
+
raise errors.TypingError(f'arg "stop" must be an Integer. Got {stop}')
|
| 955 |
+
|
| 956 |
+
def list_index_impl(lst, value, start=0, stop=intp_max):
|
| 957 |
+
n = len(lst)
|
| 958 |
+
if start < 0:
|
| 959 |
+
start += n
|
| 960 |
+
if start < 0:
|
| 961 |
+
start = 0
|
| 962 |
+
if stop < 0:
|
| 963 |
+
stop += n
|
| 964 |
+
if stop > n:
|
| 965 |
+
stop = n
|
| 966 |
+
for i in range(start, stop):
|
| 967 |
+
if lst[i] == value:
|
| 968 |
+
return i
|
| 969 |
+
# XXX references are leaked when raising
|
| 970 |
+
raise ValueError("value not in list")
|
| 971 |
+
return list_index_impl
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
@lower_builtin("list.insert", types.List, types.Integer,
|
| 975 |
+
types.Any)
|
| 976 |
+
def list_insert(context, builder, sig, args):
|
| 977 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 978 |
+
index = inst.fix_index(args[1])
|
| 979 |
+
index = inst.clamp_index(index)
|
| 980 |
+
value = args[2]
|
| 981 |
+
|
| 982 |
+
n = inst.size
|
| 983 |
+
one = ir.Constant(n.type, 1)
|
| 984 |
+
new_size = builder.add(n, one)
|
| 985 |
+
inst.resize(new_size)
|
| 986 |
+
inst.move(builder.add(index, one), index, builder.sub(n, index))
|
| 987 |
+
inst.setitem(index, value, incref=True, decref_old_value=False)
|
| 988 |
+
|
| 989 |
+
return context.get_dummy_value()
|
| 990 |
+
|
| 991 |
+
@lower_builtin("list.pop", types.List)
|
| 992 |
+
def list_pop(context, builder, sig, args):
|
| 993 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 994 |
+
|
| 995 |
+
n = inst.size
|
| 996 |
+
cgutils.guard_zero(context, builder, n,
|
| 997 |
+
(IndexError, "pop from empty list"))
|
| 998 |
+
n = builder.sub(n, ir.Constant(n.type, 1))
|
| 999 |
+
res = inst.getitem(n)
|
| 1000 |
+
inst.incref_value(res) # incref the pop'ed element
|
| 1001 |
+
inst.clear_value(n) # clear the storage space
|
| 1002 |
+
inst.resize(n)
|
| 1003 |
+
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
| 1004 |
+
|
| 1005 |
+
@lower_builtin("list.pop", types.List, types.Integer)
|
| 1006 |
+
def list_pop(context, builder, sig, args):
|
| 1007 |
+
inst = ListInstance(context, builder, sig.args[0], args[0])
|
| 1008 |
+
idx = inst.fix_index(args[1])
|
| 1009 |
+
|
| 1010 |
+
n = inst.size
|
| 1011 |
+
cgutils.guard_zero(context, builder, n,
|
| 1012 |
+
(IndexError, "pop from empty list"))
|
| 1013 |
+
inst.guard_index(idx, "pop index out of range")
|
| 1014 |
+
|
| 1015 |
+
res = inst.getitem(idx)
|
| 1016 |
+
|
| 1017 |
+
one = ir.Constant(n.type, 1)
|
| 1018 |
+
n = builder.sub(n, ir.Constant(n.type, 1))
|
| 1019 |
+
inst.move(idx, builder.add(idx, one), builder.sub(n, idx))
|
| 1020 |
+
inst.resize(n)
|
| 1021 |
+
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
| 1022 |
+
|
| 1023 |
+
@overload_method(types.List, "remove")
|
| 1024 |
+
def list_remove(lst, value):
|
| 1025 |
+
|
| 1026 |
+
def list_remove_impl(lst, value):
|
| 1027 |
+
for i in range(len(lst)):
|
| 1028 |
+
if lst[i] == value:
|
| 1029 |
+
lst.pop(i)
|
| 1030 |
+
return
|
| 1031 |
+
# XXX references are leaked when raising
|
| 1032 |
+
raise ValueError("list.remove(x): x not in list")
|
| 1033 |
+
|
| 1034 |
+
return list_remove_impl
|
| 1035 |
+
|
| 1036 |
+
@overload_method(types.List, "reverse")
|
| 1037 |
+
def list_reverse(lst):
|
| 1038 |
+
|
| 1039 |
+
def list_reverse_impl(lst):
|
| 1040 |
+
for a in range(0, len(lst) // 2):
|
| 1041 |
+
b = -a - 1
|
| 1042 |
+
lst[a], lst[b] = lst[b], lst[a]
|
| 1043 |
+
|
| 1044 |
+
return list_reverse_impl
|
| 1045 |
+
|
| 1046 |
+
# -----------------------------------------------------------------------------
|
| 1047 |
+
# Sorting
|
| 1048 |
+
|
| 1049 |
+
def gt(a, b):
|
| 1050 |
+
return a > b
|
| 1051 |
+
|
| 1052 |
+
sort_forwards = quicksort.make_jit_quicksort().run_quicksort
|
| 1053 |
+
sort_backwards = quicksort.make_jit_quicksort(lt=gt).run_quicksort
|
| 1054 |
+
|
| 1055 |
+
arg_sort_forwards = quicksort.make_jit_quicksort(is_argsort=True,
|
| 1056 |
+
is_list=True).run_quicksort
|
| 1057 |
+
arg_sort_backwards = quicksort.make_jit_quicksort(is_argsort=True, lt=gt,
|
| 1058 |
+
is_list=True).run_quicksort
|
| 1059 |
+
|
| 1060 |
+
|
| 1061 |
+
def _sort_check_reverse(reverse):
|
| 1062 |
+
if isinstance(reverse, types.Omitted):
|
| 1063 |
+
rty = reverse.value
|
| 1064 |
+
elif isinstance(reverse, types.Optional):
|
| 1065 |
+
rty = reverse.type
|
| 1066 |
+
else:
|
| 1067 |
+
rty = reverse
|
| 1068 |
+
if not isinstance(rty, (types.Boolean, types.Integer, int, bool)):
|
| 1069 |
+
msg = "an integer is required for 'reverse' (got type %s)" % reverse
|
| 1070 |
+
raise errors.TypingError(msg)
|
| 1071 |
+
return rty
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
def _sort_check_key(key):
|
| 1075 |
+
if isinstance(key, types.Optional):
|
| 1076 |
+
msg = ("Key must concretely be None or a Numba JIT compiled function, "
|
| 1077 |
+
"an Optional (union of None and a value) was found")
|
| 1078 |
+
raise errors.TypingError(msg)
|
| 1079 |
+
if not (cgutils.is_nonelike(key) or isinstance(key, types.Dispatcher)):
|
| 1080 |
+
msg = "Key must be None or a Numba JIT compiled function"
|
| 1081 |
+
raise errors.TypingError(msg)
|
| 1082 |
+
|
| 1083 |
+
|
| 1084 |
+
@overload_method(types.List, "sort")
|
| 1085 |
+
def ol_list_sort(lst, key=None, reverse=False):
|
| 1086 |
+
|
| 1087 |
+
_sort_check_key(key)
|
| 1088 |
+
_sort_check_reverse(reverse)
|
| 1089 |
+
|
| 1090 |
+
if cgutils.is_nonelike(key):
|
| 1091 |
+
KEY = False
|
| 1092 |
+
sort_f = sort_forwards
|
| 1093 |
+
sort_b = sort_backwards
|
| 1094 |
+
elif isinstance(key, types.Dispatcher):
|
| 1095 |
+
KEY = True
|
| 1096 |
+
sort_f = arg_sort_forwards
|
| 1097 |
+
sort_b = arg_sort_backwards
|
| 1098 |
+
|
| 1099 |
+
def impl(lst, key=None, reverse=False):
|
| 1100 |
+
if KEY is True:
|
| 1101 |
+
_lst = [key(x) for x in lst]
|
| 1102 |
+
else:
|
| 1103 |
+
_lst = lst
|
| 1104 |
+
if reverse is False or reverse == 0:
|
| 1105 |
+
tmp = sort_f(_lst)
|
| 1106 |
+
else:
|
| 1107 |
+
tmp = sort_b(_lst)
|
| 1108 |
+
if KEY is True:
|
| 1109 |
+
lst[:] = [lst[i] for i in tmp]
|
| 1110 |
+
return impl
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
@overload(sorted)
|
| 1114 |
+
def ol_sorted(iterable, key=None, reverse=False):
|
| 1115 |
+
|
| 1116 |
+
if not isinstance(iterable, types.IterableType):
|
| 1117 |
+
return False
|
| 1118 |
+
|
| 1119 |
+
_sort_check_key(key)
|
| 1120 |
+
_sort_check_reverse(reverse)
|
| 1121 |
+
|
| 1122 |
+
def impl(iterable, key=None, reverse=False):
|
| 1123 |
+
lst = list(iterable)
|
| 1124 |
+
lst.sort(key=key, reverse=reverse)
|
| 1125 |
+
return lst
|
| 1126 |
+
return impl
|
| 1127 |
+
|
| 1128 |
+
# -----------------------------------------------------------------------------
|
| 1129 |
+
# Implicit casting
|
| 1130 |
+
|
| 1131 |
+
@lower_cast(types.List, types.List)
|
| 1132 |
+
def list_to_list(context, builder, fromty, toty, val):
|
| 1133 |
+
# Casting from non-reflected to reflected
|
| 1134 |
+
assert fromty.dtype == toty.dtype
|
| 1135 |
+
return val
|
| 1136 |
+
|
| 1137 |
+
# -----------------------------------------------------------------------------
|
| 1138 |
+
# Implementations for types.LiteralList
|
| 1139 |
+
# -----------------------------------------------------------------------------
|
| 1140 |
+
|
| 1141 |
+
_banned_error = errors.TypingError("Cannot mutate a literal list")
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
# Things that mutate literal lists are banned
|
| 1145 |
+
@overload_method(types.LiteralList, 'append')
|
| 1146 |
+
def literal_list_banned_append(lst, obj):
|
| 1147 |
+
raise _banned_error
|
| 1148 |
+
|
| 1149 |
+
|
| 1150 |
+
@overload_method(types.LiteralList, 'extend')
|
| 1151 |
+
def literal_list_banned_extend(lst, iterable):
|
| 1152 |
+
raise _banned_error
|
| 1153 |
+
|
| 1154 |
+
|
| 1155 |
+
@overload_method(types.LiteralList, 'insert')
|
| 1156 |
+
def literal_list_banned_insert(lst, index, obj):
|
| 1157 |
+
raise _banned_error
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
@overload_method(types.LiteralList, 'remove')
|
| 1161 |
+
def literal_list_banned_remove(lst, value):
|
| 1162 |
+
raise _banned_error
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
@overload_method(types.LiteralList, 'pop')
|
| 1166 |
+
def literal_list_banned_pop(lst, index=-1):
|
| 1167 |
+
raise _banned_error
|
| 1168 |
+
|
| 1169 |
+
|
| 1170 |
+
@overload_method(types.LiteralList, 'clear')
|
| 1171 |
+
def literal_list_banned_clear(lst):
|
| 1172 |
+
raise _banned_error
|
| 1173 |
+
|
| 1174 |
+
|
| 1175 |
+
@overload_method(types.LiteralList, 'sort')
|
| 1176 |
+
def literal_list_banned_sort(lst, key=None, reverse=False):
|
| 1177 |
+
raise _banned_error
|
| 1178 |
+
|
| 1179 |
+
|
| 1180 |
+
@overload_method(types.LiteralList, 'reverse')
|
| 1181 |
+
def literal_list_banned_reverse(lst):
|
| 1182 |
+
raise _banned_error
|
| 1183 |
+
|
| 1184 |
+
if config.USE_LEGACY_TYPE_SYSTEM:
|
| 1185 |
+
_index_end = types.intp.maxval
|
| 1186 |
+
else:
|
| 1187 |
+
_index_end = types.py_int.maxval
|
| 1188 |
+
|
| 1189 |
+
@overload_method(types.LiteralList, 'index')
|
| 1190 |
+
def literal_list_index(lst, x, start=0, end=_index_end):
|
| 1191 |
+
# TODO: To make this work, need consts as slice for start/end so as to
|
| 1192 |
+
# be able to statically analyse the bounds, then its a just loop body
|
| 1193 |
+
# versioning based iteration along with enumerate to find the item
|
| 1194 |
+
if isinstance(lst, types.LiteralList):
|
| 1195 |
+
msg = "list.index is unsupported for literal lists"
|
| 1196 |
+
raise errors.TypingError(msg)
|
| 1197 |
+
|
| 1198 |
+
@overload_method(types.LiteralList, 'count')
|
| 1199 |
+
def literal_list_count(lst, x):
|
| 1200 |
+
if isinstance(lst, types.LiteralList):
|
| 1201 |
+
def impl(lst, x):
|
| 1202 |
+
count = 0
|
| 1203 |
+
for val in literal_unroll(lst):
|
| 1204 |
+
if val == x:
|
| 1205 |
+
count += 1
|
| 1206 |
+
return count
|
| 1207 |
+
return impl
|
| 1208 |
+
|
| 1209 |
+
@overload_method(types.LiteralList, 'copy')
|
| 1210 |
+
def literal_list_count(lst):
|
| 1211 |
+
if isinstance(lst, types.LiteralList):
|
| 1212 |
+
def impl(lst):
|
| 1213 |
+
return lst # tuples are immutable, as is this, so just return it
|
| 1214 |
+
return impl
|
| 1215 |
+
|
| 1216 |
+
@overload(operator.delitem)
|
| 1217 |
+
def literal_list_delitem(lst, index):
|
| 1218 |
+
if isinstance(lst, types.LiteralList):
|
| 1219 |
+
raise _banned_error
|
| 1220 |
+
|
| 1221 |
+
@overload(operator.setitem)
|
| 1222 |
+
def literal_list_setitem(lst, index, value):
|
| 1223 |
+
if isinstance(lst, types.LiteralList):
|
| 1224 |
+
raise errors.TypingError("Cannot mutate a literal list")
|
| 1225 |
+
|
| 1226 |
+
@overload(operator.getitem)
|
| 1227 |
+
def literal_list_getitem(lst, *args):
|
| 1228 |
+
if not isinstance(lst, types.LiteralList):
|
| 1229 |
+
return
|
| 1230 |
+
msg = ("Cannot __getitem__ on a literal list, return type cannot be "
|
| 1231 |
+
"statically determined.")
|
| 1232 |
+
raise errors.TypingError(msg)
|
| 1233 |
+
|
| 1234 |
+
@overload(len)
|
| 1235 |
+
def literal_list_len(lst):
|
| 1236 |
+
if not isinstance(lst, types.LiteralList):
|
| 1237 |
+
return
|
| 1238 |
+
l = lst.count
|
| 1239 |
+
return lambda lst: l
|
| 1240 |
+
|
| 1241 |
+
@overload(operator.contains)
|
| 1242 |
+
def literal_list_contains(lst, item):
|
| 1243 |
+
if isinstance(lst, types.LiteralList):
|
| 1244 |
+
def impl(lst, item):
|
| 1245 |
+
for val in literal_unroll(lst):
|
| 1246 |
+
if val == item:
|
| 1247 |
+
return True
|
| 1248 |
+
return False
|
| 1249 |
+
return impl
|
| 1250 |
+
|
| 1251 |
+
@lower_cast(types.LiteralList, types.LiteralList)
|
| 1252 |
+
def literallist_to_literallist(context, builder, fromty, toty, val):
|
| 1253 |
+
if len(fromty) != len(toty):
|
| 1254 |
+
# Disallowed by typing layer
|
| 1255 |
+
raise NotImplementedError
|
| 1256 |
+
|
| 1257 |
+
olditems = cgutils.unpack_tuple(builder, val, len(fromty))
|
| 1258 |
+
items = [context.cast(builder, v, f, t)
|
| 1259 |
+
for v, f, t in zip(olditems, fromty, toty)]
|
| 1260 |
+
return context.make_tuple(builder, toty, items)
|
lib/python3.10/site-packages/numba/cpython/old_hashing.py
ADDED
|
@@ -0,0 +1,743 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hash implementations for Numba types
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import numpy as np
|
| 7 |
+
import sys
|
| 8 |
+
import ctypes
|
| 9 |
+
import warnings
|
| 10 |
+
from collections import namedtuple
|
| 11 |
+
|
| 12 |
+
import llvmlite.binding as ll
|
| 13 |
+
from llvmlite import ir
|
| 14 |
+
|
| 15 |
+
from numba import literal_unroll
|
| 16 |
+
from numba.core.extending import (
|
| 17 |
+
overload, overload_method, intrinsic, register_jitable)
|
| 18 |
+
from numba.core import errors
|
| 19 |
+
from numba.core import types
|
| 20 |
+
from numba.core.unsafe.bytes import grab_byte, grab_uint64_t
|
| 21 |
+
from numba.cpython.randomimpl import (const_int, get_next_int, get_next_int32,
|
| 22 |
+
get_state_ptr)
|
| 23 |
+
|
| 24 |
+
# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t):
|
| 25 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96 # noqa: E501
|
| 26 |
+
_hash_width = sys.hash_info.width
|
| 27 |
+
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
|
| 28 |
+
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
|
| 29 |
+
|
| 30 |
+
# Constants from CPython source, obtained by various means:
|
| 31 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h # noqa: E501
|
| 32 |
+
_PyHASH_INF = sys.hash_info.inf
|
| 33 |
+
_PyHASH_NAN = sys.hash_info.nan
|
| 34 |
+
_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus)
|
| 35 |
+
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
|
| 36 |
+
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
|
| 37 |
+
_PyHASH_IMAG = _PyHASH_MULTIPLIER
|
| 38 |
+
_PyLong_SHIFT = sys.int_info.bits_per_digit
|
| 39 |
+
_Py_HASH_CUTOFF = sys.hash_info.cutoff
|
| 40 |
+
_Py_hashfunc_name = sys.hash_info.algorithm
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# This stub/overload pair are used to force branch pruning to remove the dead
|
| 44 |
+
# branch based on the potential `None` type of the hash_func which works better
|
| 45 |
+
# if the predicate for the prune in an ir.Arg. The obj is an arg to allow for
|
| 46 |
+
# a custom error message.
|
| 47 |
+
def _defer_hash(hash_func):
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@overload(_defer_hash)
|
| 52 |
+
def ol_defer_hash(obj, hash_func):
|
| 53 |
+
err_msg = f"unhashable type: '{obj}'"
|
| 54 |
+
|
| 55 |
+
def impl(obj, hash_func):
|
| 56 |
+
if hash_func is None:
|
| 57 |
+
raise TypeError(err_msg)
|
| 58 |
+
else:
|
| 59 |
+
return hash_func()
|
| 60 |
+
return impl
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# hash(obj) is implemented by calling obj.__hash__()
|
| 64 |
+
@overload(hash)
|
| 65 |
+
def hash_overload(obj):
|
| 66 |
+
attempt_generic_msg = ("No __hash__ is defined for object of type "
|
| 67 |
+
f"'{obj}' and a generic hash() cannot be "
|
| 68 |
+
"performed as there is no suitable object "
|
| 69 |
+
"represention in Numba compiled code!")
|
| 70 |
+
|
| 71 |
+
def impl(obj):
|
| 72 |
+
if hasattr(obj, '__hash__'):
|
| 73 |
+
return _defer_hash(obj, getattr(obj, '__hash__'))
|
| 74 |
+
else:
|
| 75 |
+
raise TypeError(attempt_generic_msg)
|
| 76 |
+
return impl
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@register_jitable
|
| 80 |
+
def process_return(val):
|
| 81 |
+
asint = _Py_hash_t(val)
|
| 82 |
+
if (asint == int(-1)):
|
| 83 |
+
asint = int(-2)
|
| 84 |
+
return asint
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# This is a translation of CPython's _Py_HashDouble:
|
| 88 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129 # noqa: E501
|
| 89 |
+
# NOTE: In Python 3.10 hash of nan is now hash of the pointer to the PyObject
|
| 90 |
+
# containing said nan. Numba cannot replicate this as there is no object, so it
|
| 91 |
+
# elects to replicate the behaviour i.e. hash of nan is something "unique" which
|
| 92 |
+
# satisfies https://bugs.python.org/issue43475.
|
| 93 |
+
|
| 94 |
+
@register_jitable(locals={'x': _Py_uhash_t,
|
| 95 |
+
'y': _Py_uhash_t,
|
| 96 |
+
'm': types.double,
|
| 97 |
+
'e': types.intc,
|
| 98 |
+
'sign': types.intc,
|
| 99 |
+
'_PyHASH_MODULUS': _Py_uhash_t,
|
| 100 |
+
'_PyHASH_BITS': types.intc})
|
| 101 |
+
def _Py_HashDouble(v):
|
| 102 |
+
if not np.isfinite(v):
|
| 103 |
+
if (np.isinf(v)):
|
| 104 |
+
if (v > 0):
|
| 105 |
+
return _PyHASH_INF
|
| 106 |
+
else:
|
| 107 |
+
return -_PyHASH_INF
|
| 108 |
+
else:
|
| 109 |
+
# Python 3.10 does not use `_PyHASH_NAN`.
|
| 110 |
+
# https://github.com/python/cpython/blob/2c4792264f9218692a1bd87398a60591f756b171/Python/pyhash.c#L102 # noqa: E501
|
| 111 |
+
# Numba returns a pseudo-random number to reflect the spirit of the
|
| 112 |
+
# change.
|
| 113 |
+
x = _prng_random_hash()
|
| 114 |
+
return process_return(x)
|
| 115 |
+
|
| 116 |
+
m, e = math.frexp(v)
|
| 117 |
+
|
| 118 |
+
sign = 1
|
| 119 |
+
if (m < 0):
|
| 120 |
+
sign = -1
|
| 121 |
+
m = -m
|
| 122 |
+
|
| 123 |
+
# process 28 bits at a time; this should work well both for binary
|
| 124 |
+
# and hexadecimal floating point.
|
| 125 |
+
x = 0
|
| 126 |
+
while (m):
|
| 127 |
+
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28)
|
| 128 |
+
m *= 268435456.0 # /* 2**28 */
|
| 129 |
+
e -= 28
|
| 130 |
+
y = int(m) # /* pull out integer part */
|
| 131 |
+
m -= y
|
| 132 |
+
x += y
|
| 133 |
+
if x >= _PyHASH_MODULUS:
|
| 134 |
+
x -= _PyHASH_MODULUS
|
| 135 |
+
# /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
|
| 136 |
+
if e >= 0:
|
| 137 |
+
e = e % _PyHASH_BITS
|
| 138 |
+
else:
|
| 139 |
+
e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS)
|
| 140 |
+
|
| 141 |
+
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e)
|
| 142 |
+
|
| 143 |
+
x = x * sign
|
| 144 |
+
return process_return(x)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@intrinsic
|
| 148 |
+
def _fpext(tyctx, val):
|
| 149 |
+
def impl(cgctx, builder, signature, args):
|
| 150 |
+
val = args[0]
|
| 151 |
+
return builder.fpext(val, ir.DoubleType())
|
| 152 |
+
sig = types.float64(types.float32)
|
| 153 |
+
return sig, impl
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@intrinsic
|
| 157 |
+
def _prng_random_hash(tyctx):
|
| 158 |
+
|
| 159 |
+
def impl(cgctx, builder, signature, args):
|
| 160 |
+
state_ptr = get_state_ptr(cgctx, builder, "internal")
|
| 161 |
+
bits = const_int(_hash_width)
|
| 162 |
+
|
| 163 |
+
# Why not just use get_next_int() with the correct bitwidth?
|
| 164 |
+
# get_next_int() always returns an i64, because the bitwidth it is
|
| 165 |
+
# passed may not be a compile-time constant, so it needs to allocate
|
| 166 |
+
# the largest unit of storage that may be required. Therefore, if the
|
| 167 |
+
# hash width is 32, then we need to use get_next_int32() to ensure we
|
| 168 |
+
# don't return a wider-than-expected hash, even if everything above
|
| 169 |
+
# the low 32 bits would have been zero.
|
| 170 |
+
if _hash_width == 32:
|
| 171 |
+
value = get_next_int32(cgctx, builder, state_ptr)
|
| 172 |
+
else:
|
| 173 |
+
value = get_next_int(cgctx, builder, state_ptr, bits, False)
|
| 174 |
+
|
| 175 |
+
return value
|
| 176 |
+
|
| 177 |
+
sig = _Py_hash_t()
|
| 178 |
+
return sig, impl
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# This is a translation of CPython's long_hash, but restricted to the numerical
|
| 182 |
+
# domain reachable by int64/uint64 (i.e. no BigInt like support):
|
| 183 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989 # noqa: E501
|
| 184 |
+
# obdigit is a uint32_t which is typedef'd to digit
|
| 185 |
+
# int32_t is typedef'd to sdigit
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@register_jitable(locals={'x': _Py_uhash_t,
|
| 189 |
+
'p1': _Py_uhash_t,
|
| 190 |
+
'p2': _Py_uhash_t,
|
| 191 |
+
'p3': _Py_uhash_t,
|
| 192 |
+
'p4': _Py_uhash_t,
|
| 193 |
+
'_PyHASH_MODULUS': _Py_uhash_t,
|
| 194 |
+
'_PyHASH_BITS': types.int32,
|
| 195 |
+
'_PyLong_SHIFT': types.int32,})
|
| 196 |
+
def _long_impl(val):
|
| 197 |
+
# This function assumes val came from a long int repr with val being a
|
| 198 |
+
# uint64_t this means having to split the input into PyLong_SHIFT size
|
| 199 |
+
# chunks in an unsigned hash wide type, max numba can handle is a 64bit int
|
| 200 |
+
|
| 201 |
+
# mask to select low _PyLong_SHIFT bits
|
| 202 |
+
_tmp_shift = 32 - _PyLong_SHIFT
|
| 203 |
+
mask_shift = (~types.uint32(0x0)) >> _tmp_shift
|
| 204 |
+
|
| 205 |
+
# a 64bit wide max means Numba only needs 3 x 30 bit values max,
|
| 206 |
+
# or 5 x 15 bit values max on 32bit platforms
|
| 207 |
+
i = (64 // _PyLong_SHIFT) + 1
|
| 208 |
+
|
| 209 |
+
# alg as per hash_long
|
| 210 |
+
x = 0
|
| 211 |
+
p3 = (_PyHASH_BITS - _PyLong_SHIFT)
|
| 212 |
+
for idx in range(i - 1, -1, -1):
|
| 213 |
+
p1 = x << _PyLong_SHIFT
|
| 214 |
+
p2 = p1 & _PyHASH_MODULUS
|
| 215 |
+
p4 = x >> p3
|
| 216 |
+
x = p2 | p4
|
| 217 |
+
# the shift and mask splits out the `ob_digit` parts of a Long repr
|
| 218 |
+
x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift)
|
| 219 |
+
if x >= _PyHASH_MODULUS:
|
| 220 |
+
x -= _PyHASH_MODULUS
|
| 221 |
+
return _Py_hash_t(x)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# This has no CPython equivalent, CPython uses long_hash.
|
| 225 |
+
@overload_method(types.Integer, '__hash__')
|
| 226 |
+
@overload_method(types.Boolean, '__hash__')
|
| 227 |
+
def int_hash(val):
|
| 228 |
+
|
| 229 |
+
_HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4
|
| 230 |
+
_SIGNED_MIN = types.int64(-0x8000000000000000)
|
| 231 |
+
|
| 232 |
+
# Find a suitable type to hold a "big" value, i.e. iinfo(ty).min/max
|
| 233 |
+
# this is to ensure e.g. int32.min is handled ok as it's abs() is its value
|
| 234 |
+
_BIG = types.int64 if getattr(val, 'signed', False) else types.uint64
|
| 235 |
+
|
| 236 |
+
# this is a bit involved due to the CPython repr of ints
|
| 237 |
+
def impl(val):
|
| 238 |
+
# If the magnitude is under PyHASH_MODULUS, just return the
|
| 239 |
+
# value val as the hash, couple of special cases if val == val:
|
| 240 |
+
# 1. it's 0, in which case return 0
|
| 241 |
+
# 2. it's signed int minimum value, return the value CPython computes
|
| 242 |
+
# but Numba cannot as there's no type wide enough to hold the shifts.
|
| 243 |
+
#
|
| 244 |
+
# If the magnitude is greater than PyHASH_MODULUS then... if the value
|
| 245 |
+
# is negative then negate it switch the sign on the hash once computed
|
| 246 |
+
# and use the standard wide unsigned hash implementation
|
| 247 |
+
val = _BIG(val)
|
| 248 |
+
mag = abs(val)
|
| 249 |
+
if mag < _PyHASH_MODULUS:
|
| 250 |
+
if val == 0:
|
| 251 |
+
ret = 0
|
| 252 |
+
elif val == _SIGNED_MIN: # e.g. int64 min, -0x8000000000000000
|
| 253 |
+
ret = _Py_hash_t(_HASH_I64_MIN)
|
| 254 |
+
else:
|
| 255 |
+
ret = _Py_hash_t(val)
|
| 256 |
+
else:
|
| 257 |
+
needs_negate = False
|
| 258 |
+
if val < 0:
|
| 259 |
+
val = -val
|
| 260 |
+
needs_negate = True
|
| 261 |
+
ret = _long_impl(val)
|
| 262 |
+
if needs_negate:
|
| 263 |
+
ret = -ret
|
| 264 |
+
return process_return(ret)
|
| 265 |
+
return impl
|
| 266 |
+
|
| 267 |
+
# This is a translation of CPython's float_hash:
|
| 268 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532 # noqa: E501
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
@overload_method(types.Float, '__hash__')
|
| 272 |
+
def float_hash(val):
|
| 273 |
+
if val.bitwidth == 64:
|
| 274 |
+
def impl(val):
|
| 275 |
+
hashed = _Py_HashDouble(val)
|
| 276 |
+
return hashed
|
| 277 |
+
else:
|
| 278 |
+
def impl(val):
|
| 279 |
+
# widen the 32bit float to 64bit
|
| 280 |
+
fpextended = np.float64(_fpext(val))
|
| 281 |
+
hashed = _Py_HashDouble(fpextended)
|
| 282 |
+
return hashed
|
| 283 |
+
return impl
|
| 284 |
+
|
| 285 |
+
# This is a translation of CPython's complex_hash:
|
| 286 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428 # noqa: E501
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
@overload_method(types.Complex, '__hash__')
|
| 290 |
+
def complex_hash(val):
|
| 291 |
+
def impl(val):
|
| 292 |
+
hashreal = hash(val.real)
|
| 293 |
+
hashimag = hash(val.imag)
|
| 294 |
+
# Note: if the imaginary part is 0, hashimag is 0 now,
|
| 295 |
+
# so the following returns hashreal unchanged. This is
|
| 296 |
+
# important because numbers of different types that
|
| 297 |
+
# compare equal must have the same hash value, so that
|
| 298 |
+
# hash(x + 0*j) must equal hash(x).
|
| 299 |
+
combined = hashreal + _PyHASH_IMAG * hashimag
|
| 300 |
+
return process_return(combined)
|
| 301 |
+
return impl
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
# Python 3.8 strengthened its hash alg for tuples.
|
| 305 |
+
# This is a translation of CPython's tuplehash for Python >=3.8
|
| 306 |
+
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L338-L391 # noqa: E501
|
| 307 |
+
|
| 308 |
+
# These consts are needed for this alg variant, they are from:
|
| 309 |
+
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L353-L363 # noqa: E501
|
| 310 |
+
if _Py_uhash_t.bitwidth // 8 > 4:
|
| 311 |
+
_PyHASH_XXPRIME_1 = _Py_uhash_t(11400714785074694791)
|
| 312 |
+
_PyHASH_XXPRIME_2 = _Py_uhash_t(14029467366897019727)
|
| 313 |
+
_PyHASH_XXPRIME_5 = _Py_uhash_t(2870177450012600261)
|
| 314 |
+
|
| 315 |
+
@register_jitable(locals={'x': types.uint64})
|
| 316 |
+
def _PyHASH_XXROTATE(x):
|
| 317 |
+
# Rotate left 31 bits
|
| 318 |
+
return ((x << types.uint64(31)) | (x >> types.uint64(33)))
|
| 319 |
+
else:
|
| 320 |
+
_PyHASH_XXPRIME_1 = _Py_uhash_t(2654435761)
|
| 321 |
+
_PyHASH_XXPRIME_2 = _Py_uhash_t(2246822519)
|
| 322 |
+
_PyHASH_XXPRIME_5 = _Py_uhash_t(374761393)
|
| 323 |
+
|
| 324 |
+
@register_jitable(locals={'x': types.uint64})
|
| 325 |
+
def _PyHASH_XXROTATE(x):
|
| 326 |
+
# Rotate left 13 bits
|
| 327 |
+
return ((x << types.uint64(13)) | (x >> types.uint64(19)))
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@register_jitable(locals={'acc': _Py_uhash_t, 'lane': _Py_uhash_t,
|
| 331 |
+
'_PyHASH_XXPRIME_5': _Py_uhash_t,
|
| 332 |
+
'_PyHASH_XXPRIME_1': _Py_uhash_t,
|
| 333 |
+
'tl': _Py_uhash_t})
|
| 334 |
+
def _tuple_hash(tup):
|
| 335 |
+
tl = len(tup)
|
| 336 |
+
acc = _PyHASH_XXPRIME_5
|
| 337 |
+
for x in literal_unroll(tup):
|
| 338 |
+
lane = hash(x)
|
| 339 |
+
if lane == _Py_uhash_t(-1):
|
| 340 |
+
return -1
|
| 341 |
+
acc += lane * _PyHASH_XXPRIME_2
|
| 342 |
+
acc = _PyHASH_XXROTATE(acc)
|
| 343 |
+
acc *= _PyHASH_XXPRIME_1
|
| 344 |
+
|
| 345 |
+
acc += tl ^ (_PyHASH_XXPRIME_5 ^ _Py_uhash_t(3527539))
|
| 346 |
+
|
| 347 |
+
if acc == _Py_uhash_t(-1):
|
| 348 |
+
return process_return(1546275796)
|
| 349 |
+
|
| 350 |
+
return process_return(acc)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
@overload_method(types.BaseTuple, '__hash__')
|
| 354 |
+
def tuple_hash(val):
|
| 355 |
+
def impl(val):
|
| 356 |
+
return _tuple_hash(val)
|
| 357 |
+
return impl
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# ------------------------------------------------------------------------------
|
| 361 |
+
# String/bytes hashing needs hashseed info, this is from:
|
| 362 |
+
# https://stackoverflow.com/a/41088757
|
| 363 |
+
# with thanks to Martijn Pieters
|
| 364 |
+
#
|
| 365 |
+
# Developer note:
|
| 366 |
+
# CPython makes use of an internal "hashsecret" which is essentially a struct
|
| 367 |
+
# containing some state that is set on CPython initialization and contains magic
|
| 368 |
+
# numbers used particularly in unicode/string hashing. This code binds to the
|
| 369 |
+
# Python runtime libraries in use by the current process and reads the
|
| 370 |
+
# "hashsecret" state so that it can be used by Numba. As this is done at runtime
|
| 371 |
+
# the behaviour and influence of the PYTHONHASHSEED environment variable is
|
| 372 |
+
# accommodated.
|
| 373 |
+
|
| 374 |
+
from ctypes import ( # noqa
|
| 375 |
+
c_size_t,
|
| 376 |
+
c_ubyte,
|
| 377 |
+
c_uint64,
|
| 378 |
+
pythonapi,
|
| 379 |
+
Structure,
|
| 380 |
+
Union,
|
| 381 |
+
) # noqa
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
class FNV(Structure):
|
| 385 |
+
_fields_ = [
|
| 386 |
+
('prefix', c_size_t),
|
| 387 |
+
('suffix', c_size_t)
|
| 388 |
+
]
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class SIPHASH(Structure):
|
| 392 |
+
_fields_ = [
|
| 393 |
+
('k0', c_uint64),
|
| 394 |
+
('k1', c_uint64),
|
| 395 |
+
]
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
class DJBX33A(Structure):
|
| 399 |
+
_fields_ = [
|
| 400 |
+
('padding', c_ubyte * 16),
|
| 401 |
+
('suffix', c_size_t),
|
| 402 |
+
]
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
class EXPAT(Structure):
|
| 406 |
+
_fields_ = [
|
| 407 |
+
('padding', c_ubyte * 16),
|
| 408 |
+
('hashsalt', c_size_t),
|
| 409 |
+
]
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class _Py_HashSecret_t(Union):
|
| 413 |
+
_fields_ = [
|
| 414 |
+
# ensure 24 bytes
|
| 415 |
+
('uc', c_ubyte * 24),
|
| 416 |
+
# two Py_hash_t for FNV
|
| 417 |
+
('fnv', FNV),
|
| 418 |
+
# two uint64 for SipHash24
|
| 419 |
+
('siphash', SIPHASH),
|
| 420 |
+
# a different (!) Py_hash_t for small string optimization
|
| 421 |
+
('djbx33a', DJBX33A),
|
| 422 |
+
('expat', EXPAT),
|
| 423 |
+
]
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
# Only a few members are needed at present
|
| 430 |
+
def _build_hashsecret():
|
| 431 |
+
"""Read hash secret from the Python process
|
| 432 |
+
|
| 433 |
+
Returns
|
| 434 |
+
-------
|
| 435 |
+
info : dict
|
| 436 |
+
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
|
| 437 |
+
- values are the namedtuple[symbol:str, value:int]
|
| 438 |
+
"""
|
| 439 |
+
# Read hashsecret and inject it into the LLVM symbol map under the
|
| 440 |
+
# prefix `_numba_hashsecret_`.
|
| 441 |
+
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
|
| 442 |
+
info = {}
|
| 443 |
+
|
| 444 |
+
def inject(name, val):
|
| 445 |
+
symbol_name = "_numba_hashsecret_{}".format(name)
|
| 446 |
+
val = ctypes.c_uint64(val)
|
| 447 |
+
addr = ctypes.addressof(val)
|
| 448 |
+
ll.add_symbol(symbol_name, addr)
|
| 449 |
+
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
|
| 450 |
+
|
| 451 |
+
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
|
| 452 |
+
inject('siphash_k0', pyhashsecret.siphash.k0)
|
| 453 |
+
inject('siphash_k1', pyhashsecret.siphash.k1)
|
| 454 |
+
return info
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
_hashsecret = _build_hashsecret()
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
# ------------------------------------------------------------------------------
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
if _Py_hashfunc_name in ('siphash13', 'siphash24', 'fnv'):
|
| 464 |
+
|
| 465 |
+
# Check for use of the FNV hashing alg, warn users that it's not implemented
|
| 466 |
+
# and functionality relying of properties derived from hashing will be fine
|
| 467 |
+
# but hash values themselves are likely to be different.
|
| 468 |
+
if _Py_hashfunc_name == 'fnv':
|
| 469 |
+
msg = ("FNV hashing is not implemented in Numba. See PEP 456 "
|
| 470 |
+
"https://www.python.org/dev/peps/pep-0456/ "
|
| 471 |
+
"for rationale over not using FNV. Numba will continue to work, "
|
| 472 |
+
"but hashes for built in types will be computed using "
|
| 473 |
+
"siphash24. This will permit e.g. dictionaries to continue to "
|
| 474 |
+
"behave as expected, however anything relying on the value of "
|
| 475 |
+
"the hash opposed to hash as a derived property is likely to "
|
| 476 |
+
"not work as expected.")
|
| 477 |
+
warnings.warn(msg)
|
| 478 |
+
|
| 479 |
+
# This is a translation of CPython's siphash24 function:
|
| 480 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413 # noqa: E501
|
| 481 |
+
# and also, since Py 3.11, a translation of CPython's siphash13 function:
|
| 482 |
+
# https://github.com/python/cpython/blob/9dda9020abcf0d51d59b283a89c58c8e1fb0f574/Python/pyhash.c#L376-L424
|
| 483 |
+
# the only differences are in the use of SINGLE_ROUND in siphash13 vs.
|
| 484 |
+
# DOUBLE_ROUND in siphash24, and that siphash13 has an extra "ROUND" applied
|
| 485 |
+
# just before the final XORing of components to create the return value.
|
| 486 |
+
|
| 487 |
+
# /* *********************************************************************
|
| 488 |
+
# <MIT License>
|
| 489 |
+
# Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
|
| 490 |
+
|
| 491 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 492 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 493 |
+
# to deal in the Software without restriction, including without limitation
|
| 494 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 495 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 496 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 497 |
+
|
| 498 |
+
# The above copyright notice and this permission notice shall be included in
|
| 499 |
+
# all copies or substantial portions of the Software.
|
| 500 |
+
|
| 501 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 502 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 503 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 504 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 505 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 506 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 507 |
+
# DEALINGS IN THE SOFTWARE.
|
| 508 |
+
# </MIT License>
|
| 509 |
+
|
| 510 |
+
# Original location:
|
| 511 |
+
# https://github.com/majek/csiphash/
|
| 512 |
+
|
| 513 |
+
# Solution inspired by code from:
|
| 514 |
+
# Samuel Neves (supercop/crypto_auth/siphash24/little)
|
| 515 |
+
#djb (supercop/crypto_auth/siphash24/little2)
|
| 516 |
+
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
|
| 517 |
+
|
| 518 |
+
# Modified for Python by Christian Heimes:
|
| 519 |
+
# - C89 / MSVC compatibility
|
| 520 |
+
# - _rotl64() on Windows
|
| 521 |
+
# - letoh64() fallback
|
| 522 |
+
# */
|
| 523 |
+
|
| 524 |
+
@register_jitable(locals={'x': types.uint64,
|
| 525 |
+
'b': types.uint64, })
|
| 526 |
+
def _ROTATE(x, b):
|
| 527 |
+
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
|
| 528 |
+
|
| 529 |
+
@register_jitable(locals={'a': types.uint64,
|
| 530 |
+
'b': types.uint64,
|
| 531 |
+
'c': types.uint64,
|
| 532 |
+
'd': types.uint64,
|
| 533 |
+
's': types.uint64,
|
| 534 |
+
't': types.uint64, })
|
| 535 |
+
def _HALF_ROUND(a, b, c, d, s, t):
|
| 536 |
+
a += b
|
| 537 |
+
c += d
|
| 538 |
+
b = _ROTATE(b, s) ^ a
|
| 539 |
+
d = _ROTATE(d, t) ^ c
|
| 540 |
+
a = _ROTATE(a, 32)
|
| 541 |
+
return a, b, c, d
|
| 542 |
+
|
| 543 |
+
@register_jitable(locals={'v0': types.uint64,
|
| 544 |
+
'v1': types.uint64,
|
| 545 |
+
'v2': types.uint64,
|
| 546 |
+
'v3': types.uint64, })
|
| 547 |
+
def _SINGLE_ROUND(v0, v1, v2, v3):
|
| 548 |
+
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
|
| 549 |
+
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
|
| 550 |
+
return v0, v1, v2, v3
|
| 551 |
+
|
| 552 |
+
@register_jitable(locals={'v0': types.uint64,
|
| 553 |
+
'v1': types.uint64,
|
| 554 |
+
'v2': types.uint64,
|
| 555 |
+
'v3': types.uint64, })
|
| 556 |
+
def _DOUBLE_ROUND(v0, v1, v2, v3):
|
| 557 |
+
v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3)
|
| 558 |
+
v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3)
|
| 559 |
+
return v0, v1, v2, v3
|
| 560 |
+
|
| 561 |
+
def _gen_siphash(alg):
|
| 562 |
+
if alg == 'siphash13':
|
| 563 |
+
_ROUNDER = _SINGLE_ROUND
|
| 564 |
+
_EXTRA_ROUND = True
|
| 565 |
+
elif alg == 'siphash24':
|
| 566 |
+
_ROUNDER = _DOUBLE_ROUND
|
| 567 |
+
_EXTRA_ROUND = False
|
| 568 |
+
else:
|
| 569 |
+
assert 0, 'unreachable'
|
| 570 |
+
|
| 571 |
+
@register_jitable(locals={'v0': types.uint64,
|
| 572 |
+
'v1': types.uint64,
|
| 573 |
+
'v2': types.uint64,
|
| 574 |
+
'v3': types.uint64,
|
| 575 |
+
'b': types.uint64,
|
| 576 |
+
'mi': types.uint64,
|
| 577 |
+
't': types.uint64,
|
| 578 |
+
'mask': types.uint64,
|
| 579 |
+
'jmp': types.uint64,
|
| 580 |
+
'ohexefef': types.uint64})
|
| 581 |
+
def _siphash(k0, k1, src, src_sz):
|
| 582 |
+
b = types.uint64(src_sz) << 56
|
| 583 |
+
v0 = k0 ^ types.uint64(0x736f6d6570736575)
|
| 584 |
+
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
|
| 585 |
+
v2 = k0 ^ types.uint64(0x6c7967656e657261)
|
| 586 |
+
v3 = k1 ^ types.uint64(0x7465646279746573)
|
| 587 |
+
|
| 588 |
+
idx = 0
|
| 589 |
+
while (src_sz >= 8):
|
| 590 |
+
mi = grab_uint64_t(src, idx)
|
| 591 |
+
idx += 1
|
| 592 |
+
src_sz -= 8
|
| 593 |
+
v3 ^= mi
|
| 594 |
+
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
|
| 595 |
+
v0 ^= mi
|
| 596 |
+
|
| 597 |
+
# this is the switch fallthrough:
|
| 598 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400 # noqa: E501
|
| 599 |
+
t = types.uint64(0x0)
|
| 600 |
+
boffset = idx * 8
|
| 601 |
+
ohexefef = types.uint64(0xff)
|
| 602 |
+
if src_sz >= 7:
|
| 603 |
+
jmp = (6 * 8)
|
| 604 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 605 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6))
|
| 606 |
+
<< jmp)
|
| 607 |
+
if src_sz >= 6:
|
| 608 |
+
jmp = (5 * 8)
|
| 609 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 610 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5))
|
| 611 |
+
<< jmp)
|
| 612 |
+
if src_sz >= 5:
|
| 613 |
+
jmp = (4 * 8)
|
| 614 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 615 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4))
|
| 616 |
+
<< jmp)
|
| 617 |
+
if src_sz >= 4:
|
| 618 |
+
t &= types.uint64(0xffffffff00000000)
|
| 619 |
+
for i in range(4):
|
| 620 |
+
jmp = i * 8
|
| 621 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 622 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i))
|
| 623 |
+
<< jmp)
|
| 624 |
+
if src_sz >= 3:
|
| 625 |
+
jmp = (2 * 8)
|
| 626 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 627 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2))
|
| 628 |
+
<< jmp)
|
| 629 |
+
if src_sz >= 2:
|
| 630 |
+
jmp = (1 * 8)
|
| 631 |
+
mask = ~types.uint64(ohexefef << jmp)
|
| 632 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1))
|
| 633 |
+
<< jmp)
|
| 634 |
+
if src_sz >= 1:
|
| 635 |
+
mask = ~(ohexefef)
|
| 636 |
+
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
|
| 637 |
+
|
| 638 |
+
b |= t
|
| 639 |
+
v3 ^= b
|
| 640 |
+
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
|
| 641 |
+
v0 ^= b
|
| 642 |
+
v2 ^= ohexefef
|
| 643 |
+
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
|
| 644 |
+
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
|
| 645 |
+
if _EXTRA_ROUND:
|
| 646 |
+
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
|
| 647 |
+
t = (v0 ^ v1) ^ (v2 ^ v3)
|
| 648 |
+
return t
|
| 649 |
+
|
| 650 |
+
return _siphash
|
| 651 |
+
|
| 652 |
+
_siphash13 = _gen_siphash('siphash13')
|
| 653 |
+
_siphash24 = _gen_siphash('siphash24')
|
| 654 |
+
|
| 655 |
+
_siphasher = _siphash13 if _Py_hashfunc_name == 'siphash13' else _siphash24
|
| 656 |
+
|
| 657 |
+
else:
|
| 658 |
+
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
|
| 659 |
+
raise ValueError(msg)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
@intrinsic
|
| 663 |
+
def _inject_hashsecret_read(tyctx, name):
|
| 664 |
+
"""Emit code to load the hashsecret.
|
| 665 |
+
"""
|
| 666 |
+
if not isinstance(name, types.StringLiteral):
|
| 667 |
+
raise errors.TypingError("requires literal string")
|
| 668 |
+
|
| 669 |
+
sym = _hashsecret[name.literal_value].symbol
|
| 670 |
+
resty = types.uint64
|
| 671 |
+
sig = resty(name)
|
| 672 |
+
|
| 673 |
+
def impl(cgctx, builder, sig, args):
|
| 674 |
+
mod = builder.module
|
| 675 |
+
try:
|
| 676 |
+
# Search for existing global
|
| 677 |
+
gv = mod.get_global(sym)
|
| 678 |
+
except KeyError:
|
| 679 |
+
# Inject the symbol if not already exist.
|
| 680 |
+
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
|
| 681 |
+
v = builder.load(gv)
|
| 682 |
+
return v
|
| 683 |
+
|
| 684 |
+
return sig, impl
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def _load_hashsecret(name):
|
| 688 |
+
return _hashsecret[name].value
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
@overload(_load_hashsecret)
|
| 692 |
+
def _impl_load_hashsecret(name):
|
| 693 |
+
def imp(name):
|
| 694 |
+
return _inject_hashsecret_read(name)
|
| 695 |
+
return imp
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
# This is a translation of CPythons's _Py_HashBytes:
|
| 699 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191 # noqa: E501
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
@register_jitable(locals={'_hash': _Py_uhash_t})
|
| 703 |
+
def _Py_HashBytes(val, _len):
|
| 704 |
+
if (_len == 0):
|
| 705 |
+
return process_return(0)
|
| 706 |
+
|
| 707 |
+
if (_len < _Py_HASH_CUTOFF):
|
| 708 |
+
# TODO: this branch needs testing, needs a CPython setup for it!
|
| 709 |
+
# /* Optimize hashing of very small strings with inline DJBX33A. */
|
| 710 |
+
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
|
| 711 |
+
for idx in range(_len):
|
| 712 |
+
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
|
| 713 |
+
|
| 714 |
+
_hash ^= _len
|
| 715 |
+
_hash ^= _load_hashsecret('djbx33a_suffix')
|
| 716 |
+
else:
|
| 717 |
+
tmp = _siphasher(types.uint64(_load_hashsecret('siphash_k0')),
|
| 718 |
+
types.uint64(_load_hashsecret('siphash_k1')),
|
| 719 |
+
val, _len)
|
| 720 |
+
_hash = process_return(tmp)
|
| 721 |
+
return process_return(_hash)
|
| 722 |
+
|
| 723 |
+
# This is an approximate translation of CPython's unicode_hash:
|
| 724 |
+
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663 # noqa: E501
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
@overload_method(types.UnicodeType, '__hash__')
|
| 728 |
+
def unicode_hash(val):
|
| 729 |
+
from numba.cpython.unicode import _kind_to_byte_width
|
| 730 |
+
|
| 731 |
+
def impl(val):
|
| 732 |
+
kindwidth = _kind_to_byte_width(val._kind)
|
| 733 |
+
_len = len(val)
|
| 734 |
+
# use the cache if possible
|
| 735 |
+
current_hash = val._hash
|
| 736 |
+
if current_hash != -1:
|
| 737 |
+
return current_hash
|
| 738 |
+
else:
|
| 739 |
+
# cannot write hash value to cache in the unicode struct due to
|
| 740 |
+
# pass by value on the struct making the struct member immutable
|
| 741 |
+
return _Py_HashBytes(val._data, kindwidth * _len)
|
| 742 |
+
|
| 743 |
+
return impl
|
lib/python3.10/site-packages/numba/cpython/setobj.py
ADDED
|
@@ -0,0 +1,1711 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Support for native homogeneous sets.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
import contextlib
|
| 8 |
+
import math
|
| 9 |
+
import operator
|
| 10 |
+
from functools import cached_property
|
| 11 |
+
|
| 12 |
+
from llvmlite import ir
|
| 13 |
+
from numba.core import types, typing, cgutils
|
| 14 |
+
from numba.core.imputils import (lower_builtin, lower_cast,
|
| 15 |
+
iternext_impl, impl_ret_borrowed,
|
| 16 |
+
impl_ret_new_ref, impl_ret_untracked,
|
| 17 |
+
for_iter, call_len, RefType)
|
| 18 |
+
from numba.misc import quicksort
|
| 19 |
+
from numba.cpython import slicing
|
| 20 |
+
from numba.core.errors import NumbaValueError, TypingError
|
| 21 |
+
from numba.core.extending import overload, overload_method, intrinsic
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_payload_struct(context, builder, set_type, ptr):
|
| 25 |
+
"""
|
| 26 |
+
Given a set value and type, get its payload structure (as a
|
| 27 |
+
reference, so that mutations are seen by all).
|
| 28 |
+
"""
|
| 29 |
+
payload_type = types.SetPayload(set_type)
|
| 30 |
+
ptrty = context.get_data_type(payload_type).as_pointer()
|
| 31 |
+
payload = builder.bitcast(ptr, ptrty)
|
| 32 |
+
return context.make_data_helper(builder, payload_type, ref=payload)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_entry_size(context, set_type):
|
| 36 |
+
"""
|
| 37 |
+
Return the entry size for the given set type.
|
| 38 |
+
"""
|
| 39 |
+
llty = context.get_data_type(types.SetEntry(set_type))
|
| 40 |
+
return context.get_abi_sizeof(llty)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# Note these values are special:
|
| 44 |
+
# - EMPTY is obtained by issuing memset(..., 0xFF)
|
| 45 |
+
# - (unsigned) EMPTY > (unsigned) DELETED > any other hash value
|
| 46 |
+
EMPTY = -1
|
| 47 |
+
DELETED = -2
|
| 48 |
+
FALLBACK = -43
|
| 49 |
+
|
| 50 |
+
# Minimal size of entries table. Must be a power of 2!
|
| 51 |
+
MINSIZE = 16
|
| 52 |
+
|
| 53 |
+
# Number of cache-friendly linear probes before switching to non-linear probing
|
| 54 |
+
LINEAR_PROBES = 3
|
| 55 |
+
|
| 56 |
+
DEBUG_ALLOCS = False
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def get_hash_value(context, builder, typ, value):
|
| 60 |
+
"""
|
| 61 |
+
Compute the hash of the given value.
|
| 62 |
+
"""
|
| 63 |
+
typingctx = context.typing_context
|
| 64 |
+
fnty = typingctx.resolve_value_type(hash)
|
| 65 |
+
sig = fnty.get_call_type(typingctx, (typ,), {})
|
| 66 |
+
fn = context.get_function(fnty, sig)
|
| 67 |
+
h = fn(builder, (value,))
|
| 68 |
+
# Fixup reserved values
|
| 69 |
+
is_ok = is_hash_used(context, builder, h)
|
| 70 |
+
fallback = ir.Constant(h.type, FALLBACK)
|
| 71 |
+
return builder.select(is_ok, h, fallback)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@intrinsic
|
| 75 |
+
def _get_hash_value_intrinsic(typingctx, value):
|
| 76 |
+
def impl(context, builder, typ, args):
|
| 77 |
+
return get_hash_value(context, builder, value, args[0])
|
| 78 |
+
fnty = typingctx.resolve_value_type(hash)
|
| 79 |
+
sig = fnty.get_call_type(typingctx, (value,), {})
|
| 80 |
+
return sig, impl
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def is_hash_empty(context, builder, h):
|
| 84 |
+
"""
|
| 85 |
+
Whether the hash value denotes an empty entry.
|
| 86 |
+
"""
|
| 87 |
+
empty = ir.Constant(h.type, EMPTY)
|
| 88 |
+
return builder.icmp_unsigned('==', h, empty)
|
| 89 |
+
|
| 90 |
+
def is_hash_deleted(context, builder, h):
|
| 91 |
+
"""
|
| 92 |
+
Whether the hash value denotes a deleted entry.
|
| 93 |
+
"""
|
| 94 |
+
deleted = ir.Constant(h.type, DELETED)
|
| 95 |
+
return builder.icmp_unsigned('==', h, deleted)
|
| 96 |
+
|
| 97 |
+
def is_hash_used(context, builder, h):
|
| 98 |
+
"""
|
| 99 |
+
Whether the hash value denotes an active entry.
|
| 100 |
+
"""
|
| 101 |
+
# Everything below DELETED is an used entry
|
| 102 |
+
deleted = ir.Constant(h.type, DELETED)
|
| 103 |
+
return builder.icmp_unsigned('<', h, deleted)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def check_all_set(*args):
|
| 107 |
+
if not all([isinstance(typ, types.Set) for typ in args]):
|
| 108 |
+
raise TypingError(f"All arguments must be Sets, got {args}")
|
| 109 |
+
|
| 110 |
+
if not all([args[0].dtype == s.dtype for s in args]):
|
| 111 |
+
raise TypingError(f"All Sets must be of the same type, got {args}")
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
SetLoop = collections.namedtuple('SetLoop', ('index', 'entry', 'do_break'))
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class _SetPayload(object):
|
| 118 |
+
|
| 119 |
+
def __init__(self, context, builder, set_type, ptr):
|
| 120 |
+
payload = get_payload_struct(context, builder, set_type, ptr)
|
| 121 |
+
self._context = context
|
| 122 |
+
self._builder = builder
|
| 123 |
+
self._ty = set_type
|
| 124 |
+
self._payload = payload
|
| 125 |
+
self._entries = payload._get_ptr_by_name('entries')
|
| 126 |
+
self._ptr = ptr
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def mask(self):
|
| 130 |
+
return self._payload.mask
|
| 131 |
+
|
| 132 |
+
@mask.setter
|
| 133 |
+
def mask(self, value):
|
| 134 |
+
# CAUTION: mask must be a power of 2 minus 1
|
| 135 |
+
self._payload.mask = value
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def used(self):
|
| 139 |
+
return self._payload.used
|
| 140 |
+
|
| 141 |
+
@used.setter
|
| 142 |
+
def used(self, value):
|
| 143 |
+
self._payload.used = value
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def fill(self):
|
| 147 |
+
return self._payload.fill
|
| 148 |
+
|
| 149 |
+
@fill.setter
|
| 150 |
+
def fill(self, value):
|
| 151 |
+
self._payload.fill = value
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def finger(self):
|
| 155 |
+
return self._payload.finger
|
| 156 |
+
|
| 157 |
+
@finger.setter
|
| 158 |
+
def finger(self, value):
|
| 159 |
+
self._payload.finger = value
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def dirty(self):
|
| 163 |
+
return self._payload.dirty
|
| 164 |
+
|
| 165 |
+
@dirty.setter
|
| 166 |
+
def dirty(self, value):
|
| 167 |
+
self._payload.dirty = value
|
| 168 |
+
|
| 169 |
+
@property
|
| 170 |
+
def entries(self):
|
| 171 |
+
"""
|
| 172 |
+
A pointer to the start of the entries array.
|
| 173 |
+
"""
|
| 174 |
+
return self._entries
|
| 175 |
+
|
| 176 |
+
@property
|
| 177 |
+
def ptr(self):
|
| 178 |
+
"""
|
| 179 |
+
A pointer to the start of the NRT-allocated area.
|
| 180 |
+
"""
|
| 181 |
+
return self._ptr
|
| 182 |
+
|
| 183 |
+
def get_entry(self, idx):
|
| 184 |
+
"""
|
| 185 |
+
Get entry number *idx*.
|
| 186 |
+
"""
|
| 187 |
+
entry_ptr = cgutils.gep(self._builder, self._entries, idx)
|
| 188 |
+
entry = self._context.make_data_helper(self._builder,
|
| 189 |
+
types.SetEntry(self._ty),
|
| 190 |
+
ref=entry_ptr)
|
| 191 |
+
return entry
|
| 192 |
+
|
| 193 |
+
def _lookup(self, item, h, for_insert=False):
|
| 194 |
+
"""
|
| 195 |
+
Lookup the *item* with the given hash values in the entries.
|
| 196 |
+
|
| 197 |
+
Return a (found, entry index) tuple:
|
| 198 |
+
- If found is true, <entry index> points to the entry containing
|
| 199 |
+
the item.
|
| 200 |
+
- If found is false, <entry index> points to the empty entry that
|
| 201 |
+
the item can be written to (only if *for_insert* is true)
|
| 202 |
+
"""
|
| 203 |
+
context = self._context
|
| 204 |
+
builder = self._builder
|
| 205 |
+
|
| 206 |
+
intp_t = h.type
|
| 207 |
+
|
| 208 |
+
mask = self.mask
|
| 209 |
+
dtype = self._ty.dtype
|
| 210 |
+
tyctx = context.typing_context
|
| 211 |
+
fnty = tyctx.resolve_value_type(operator.eq)
|
| 212 |
+
sig = fnty.get_call_type(tyctx, (dtype, dtype), {})
|
| 213 |
+
eqfn = context.get_function(fnty, sig)
|
| 214 |
+
|
| 215 |
+
one = ir.Constant(intp_t, 1)
|
| 216 |
+
five = ir.Constant(intp_t, 5)
|
| 217 |
+
|
| 218 |
+
# The perturbation value for probing
|
| 219 |
+
perturb = cgutils.alloca_once_value(builder, h)
|
| 220 |
+
# The index of the entry being considered: start with (hash & mask)
|
| 221 |
+
index = cgutils.alloca_once_value(builder,
|
| 222 |
+
builder.and_(h, mask))
|
| 223 |
+
if for_insert:
|
| 224 |
+
# The index of the first deleted entry in the lookup chain
|
| 225 |
+
free_index_sentinel = mask.type(-1) # highest unsigned index
|
| 226 |
+
free_index = cgutils.alloca_once_value(builder, free_index_sentinel)
|
| 227 |
+
|
| 228 |
+
bb_body = builder.append_basic_block("lookup.body")
|
| 229 |
+
bb_found = builder.append_basic_block("lookup.found")
|
| 230 |
+
bb_not_found = builder.append_basic_block("lookup.not_found")
|
| 231 |
+
bb_end = builder.append_basic_block("lookup.end")
|
| 232 |
+
|
| 233 |
+
def check_entry(i):
|
| 234 |
+
"""
|
| 235 |
+
Check entry *i* against the value being searched for.
|
| 236 |
+
"""
|
| 237 |
+
entry = self.get_entry(i)
|
| 238 |
+
entry_hash = entry.hash
|
| 239 |
+
|
| 240 |
+
with builder.if_then(builder.icmp_unsigned('==', h, entry_hash)):
|
| 241 |
+
# Hashes are equal, compare values
|
| 242 |
+
# (note this also ensures the entry is used)
|
| 243 |
+
eq = eqfn(builder, (item, entry.key))
|
| 244 |
+
with builder.if_then(eq):
|
| 245 |
+
builder.branch(bb_found)
|
| 246 |
+
|
| 247 |
+
with builder.if_then(is_hash_empty(context, builder, entry_hash)):
|
| 248 |
+
builder.branch(bb_not_found)
|
| 249 |
+
|
| 250 |
+
if for_insert:
|
| 251 |
+
# Memorize the index of the first deleted entry
|
| 252 |
+
with builder.if_then(is_hash_deleted(context, builder, entry_hash)):
|
| 253 |
+
j = builder.load(free_index)
|
| 254 |
+
j = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel),
|
| 255 |
+
i, j)
|
| 256 |
+
builder.store(j, free_index)
|
| 257 |
+
|
| 258 |
+
# First linear probing. When the number of collisions is small,
|
| 259 |
+
# the lineary probing loop achieves better cache locality and
|
| 260 |
+
# is also slightly cheaper computationally.
|
| 261 |
+
with cgutils.for_range(builder, ir.Constant(intp_t, LINEAR_PROBES)):
|
| 262 |
+
i = builder.load(index)
|
| 263 |
+
check_entry(i)
|
| 264 |
+
i = builder.add(i, one)
|
| 265 |
+
i = builder.and_(i, mask)
|
| 266 |
+
builder.store(i, index)
|
| 267 |
+
|
| 268 |
+
# If not found after linear probing, switch to a non-linear
|
| 269 |
+
# perturbation keyed on the unmasked hash value.
|
| 270 |
+
# XXX how to tell LLVM this branch is unlikely?
|
| 271 |
+
builder.branch(bb_body)
|
| 272 |
+
with builder.goto_block(bb_body):
|
| 273 |
+
i = builder.load(index)
|
| 274 |
+
check_entry(i)
|
| 275 |
+
|
| 276 |
+
# Perturb to go to next entry:
|
| 277 |
+
# perturb >>= 5
|
| 278 |
+
# i = (i * 5 + 1 + perturb) & mask
|
| 279 |
+
p = builder.load(perturb)
|
| 280 |
+
p = builder.lshr(p, five)
|
| 281 |
+
i = builder.add(one, builder.mul(i, five))
|
| 282 |
+
i = builder.and_(mask, builder.add(i, p))
|
| 283 |
+
builder.store(i, index)
|
| 284 |
+
builder.store(p, perturb)
|
| 285 |
+
# Loop
|
| 286 |
+
builder.branch(bb_body)
|
| 287 |
+
|
| 288 |
+
with builder.goto_block(bb_not_found):
|
| 289 |
+
if for_insert:
|
| 290 |
+
# Not found => for insertion, return the index of the first
|
| 291 |
+
# deleted entry (if any), to avoid creating an infinite
|
| 292 |
+
# lookup chain (issue #1913).
|
| 293 |
+
i = builder.load(index)
|
| 294 |
+
j = builder.load(free_index)
|
| 295 |
+
i = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel),
|
| 296 |
+
i, j)
|
| 297 |
+
builder.store(i, index)
|
| 298 |
+
builder.branch(bb_end)
|
| 299 |
+
|
| 300 |
+
with builder.goto_block(bb_found):
|
| 301 |
+
builder.branch(bb_end)
|
| 302 |
+
|
| 303 |
+
builder.position_at_end(bb_end)
|
| 304 |
+
|
| 305 |
+
found = builder.phi(ir.IntType(1), 'found')
|
| 306 |
+
found.add_incoming(cgutils.true_bit, bb_found)
|
| 307 |
+
found.add_incoming(cgutils.false_bit, bb_not_found)
|
| 308 |
+
|
| 309 |
+
return found, builder.load(index)
|
| 310 |
+
|
| 311 |
+
@contextlib.contextmanager
|
| 312 |
+
def _iterate(self, start=None):
|
| 313 |
+
"""
|
| 314 |
+
Iterate over the payload's entries. Yield a SetLoop.
|
| 315 |
+
"""
|
| 316 |
+
context = self._context
|
| 317 |
+
builder = self._builder
|
| 318 |
+
|
| 319 |
+
intp_t = context.get_value_type(types.intp)
|
| 320 |
+
one = ir.Constant(intp_t, 1)
|
| 321 |
+
size = builder.add(self.mask, one)
|
| 322 |
+
|
| 323 |
+
with cgutils.for_range(builder, size, start=start) as range_loop:
|
| 324 |
+
entry = self.get_entry(range_loop.index)
|
| 325 |
+
is_used = is_hash_used(context, builder, entry.hash)
|
| 326 |
+
with builder.if_then(is_used):
|
| 327 |
+
loop = SetLoop(index=range_loop.index, entry=entry,
|
| 328 |
+
do_break=range_loop.do_break)
|
| 329 |
+
yield loop
|
| 330 |
+
|
| 331 |
+
@contextlib.contextmanager
|
| 332 |
+
def _next_entry(self):
|
| 333 |
+
"""
|
| 334 |
+
Yield a random entry from the payload. Caller must ensure the
|
| 335 |
+
set isn't empty, otherwise the function won't end.
|
| 336 |
+
"""
|
| 337 |
+
context = self._context
|
| 338 |
+
builder = self._builder
|
| 339 |
+
|
| 340 |
+
intp_t = context.get_value_type(types.intp)
|
| 341 |
+
zero = ir.Constant(intp_t, 0)
|
| 342 |
+
one = ir.Constant(intp_t, 1)
|
| 343 |
+
mask = self.mask
|
| 344 |
+
|
| 345 |
+
# Start walking the entries from the stored "search finger" and
|
| 346 |
+
# break as soon as we find a used entry.
|
| 347 |
+
|
| 348 |
+
bb_body = builder.append_basic_block('next_entry_body')
|
| 349 |
+
bb_end = builder.append_basic_block('next_entry_end')
|
| 350 |
+
|
| 351 |
+
index = cgutils.alloca_once_value(builder, self.finger)
|
| 352 |
+
builder.branch(bb_body)
|
| 353 |
+
|
| 354 |
+
with builder.goto_block(bb_body):
|
| 355 |
+
i = builder.load(index)
|
| 356 |
+
# ANDing with mask ensures we stay inside the table boundaries
|
| 357 |
+
i = builder.and_(mask, builder.add(i, one))
|
| 358 |
+
builder.store(i, index)
|
| 359 |
+
entry = self.get_entry(i)
|
| 360 |
+
is_used = is_hash_used(context, builder, entry.hash)
|
| 361 |
+
builder.cbranch(is_used, bb_end, bb_body)
|
| 362 |
+
|
| 363 |
+
builder.position_at_end(bb_end)
|
| 364 |
+
|
| 365 |
+
# Update the search finger with the next position. This avoids
|
| 366 |
+
# O(n**2) behaviour when pop() is called in a loop.
|
| 367 |
+
i = builder.load(index)
|
| 368 |
+
self.finger = i
|
| 369 |
+
yield self.get_entry(i)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class SetInstance(object):
|
| 373 |
+
|
| 374 |
+
def __init__(self, context, builder, set_type, set_val):
|
| 375 |
+
self._context = context
|
| 376 |
+
self._builder = builder
|
| 377 |
+
self._ty = set_type
|
| 378 |
+
self._entrysize = get_entry_size(context, set_type)
|
| 379 |
+
self._set = context.make_helper(builder, set_type, set_val)
|
| 380 |
+
|
| 381 |
+
@property
|
| 382 |
+
def dtype(self):
|
| 383 |
+
return self._ty.dtype
|
| 384 |
+
|
| 385 |
+
@property
|
| 386 |
+
def payload(self):
|
| 387 |
+
"""
|
| 388 |
+
The _SetPayload for this set.
|
| 389 |
+
"""
|
| 390 |
+
# This cannot be cached as the pointer can move around!
|
| 391 |
+
context = self._context
|
| 392 |
+
builder = self._builder
|
| 393 |
+
|
| 394 |
+
ptr = self._context.nrt.meminfo_data(builder, self.meminfo)
|
| 395 |
+
return _SetPayload(context, builder, self._ty, ptr)
|
| 396 |
+
|
| 397 |
+
@property
|
| 398 |
+
def value(self):
|
| 399 |
+
return self._set._getvalue()
|
| 400 |
+
|
| 401 |
+
@property
|
| 402 |
+
def meminfo(self):
|
| 403 |
+
return self._set.meminfo
|
| 404 |
+
|
| 405 |
+
@property
|
| 406 |
+
def parent(self):
|
| 407 |
+
return self._set.parent
|
| 408 |
+
|
| 409 |
+
@parent.setter
|
| 410 |
+
def parent(self, value):
|
| 411 |
+
self._set.parent = value
|
| 412 |
+
|
| 413 |
+
def get_size(self):
|
| 414 |
+
"""
|
| 415 |
+
Return the number of elements in the size.
|
| 416 |
+
"""
|
| 417 |
+
return self.payload.used
|
| 418 |
+
|
| 419 |
+
def set_dirty(self, val):
|
| 420 |
+
if self._ty.reflected:
|
| 421 |
+
self.payload.dirty = cgutils.true_bit if val else cgutils.false_bit
|
| 422 |
+
|
| 423 |
+
def _add_entry(self, payload, entry, item, h, do_resize=True):
|
| 424 |
+
context = self._context
|
| 425 |
+
builder = self._builder
|
| 426 |
+
|
| 427 |
+
old_hash = entry.hash
|
| 428 |
+
entry.hash = h
|
| 429 |
+
self.incref_value(item)
|
| 430 |
+
entry.key = item
|
| 431 |
+
# used++
|
| 432 |
+
used = payload.used
|
| 433 |
+
one = ir.Constant(used.type, 1)
|
| 434 |
+
used = payload.used = builder.add(used, one)
|
| 435 |
+
# fill++ if entry wasn't a deleted one
|
| 436 |
+
with builder.if_then(is_hash_empty(context, builder, old_hash),
|
| 437 |
+
likely=True):
|
| 438 |
+
payload.fill = builder.add(payload.fill, one)
|
| 439 |
+
# Grow table if necessary
|
| 440 |
+
if do_resize:
|
| 441 |
+
self.upsize(used)
|
| 442 |
+
self.set_dirty(True)
|
| 443 |
+
|
| 444 |
+
def _add_key(self, payload, item, h, do_resize=True, do_incref=True):
|
| 445 |
+
context = self._context
|
| 446 |
+
builder = self._builder
|
| 447 |
+
|
| 448 |
+
found, i = payload._lookup(item, h, for_insert=True)
|
| 449 |
+
not_found = builder.not_(found)
|
| 450 |
+
|
| 451 |
+
with builder.if_then(not_found):
|
| 452 |
+
# Not found => add it
|
| 453 |
+
entry = payload.get_entry(i)
|
| 454 |
+
old_hash = entry.hash
|
| 455 |
+
entry.hash = h
|
| 456 |
+
if do_incref:
|
| 457 |
+
self.incref_value(item)
|
| 458 |
+
entry.key = item
|
| 459 |
+
# used++
|
| 460 |
+
used = payload.used
|
| 461 |
+
one = ir.Constant(used.type, 1)
|
| 462 |
+
used = payload.used = builder.add(used, one)
|
| 463 |
+
# fill++ if entry wasn't a deleted one
|
| 464 |
+
with builder.if_then(is_hash_empty(context, builder, old_hash),
|
| 465 |
+
likely=True):
|
| 466 |
+
payload.fill = builder.add(payload.fill, one)
|
| 467 |
+
# Grow table if necessary
|
| 468 |
+
if do_resize:
|
| 469 |
+
self.upsize(used)
|
| 470 |
+
self.set_dirty(True)
|
| 471 |
+
|
| 472 |
+
def _remove_entry(self, payload, entry, do_resize=True, do_decref=True):
|
| 473 |
+
# Mark entry deleted
|
| 474 |
+
entry.hash = ir.Constant(entry.hash.type, DELETED)
|
| 475 |
+
if do_decref:
|
| 476 |
+
self.decref_value(entry.key)
|
| 477 |
+
# used--
|
| 478 |
+
used = payload.used
|
| 479 |
+
one = ir.Constant(used.type, 1)
|
| 480 |
+
used = payload.used = self._builder.sub(used, one)
|
| 481 |
+
# Shrink table if necessary
|
| 482 |
+
if do_resize:
|
| 483 |
+
self.downsize(used)
|
| 484 |
+
self.set_dirty(True)
|
| 485 |
+
|
| 486 |
+
def _remove_key(self, payload, item, h, do_resize=True):
|
| 487 |
+
context = self._context
|
| 488 |
+
builder = self._builder
|
| 489 |
+
|
| 490 |
+
found, i = payload._lookup(item, h)
|
| 491 |
+
|
| 492 |
+
with builder.if_then(found):
|
| 493 |
+
entry = payload.get_entry(i)
|
| 494 |
+
self._remove_entry(payload, entry, do_resize)
|
| 495 |
+
|
| 496 |
+
return found
|
| 497 |
+
|
| 498 |
+
def add(self, item, do_resize=True):
|
| 499 |
+
context = self._context
|
| 500 |
+
builder = self._builder
|
| 501 |
+
|
| 502 |
+
payload = self.payload
|
| 503 |
+
h = get_hash_value(context, builder, self._ty.dtype, item)
|
| 504 |
+
self._add_key(payload, item, h, do_resize)
|
| 505 |
+
|
| 506 |
+
def add_pyapi(self, pyapi, item, do_resize=True):
|
| 507 |
+
"""A version of .add for use inside functions following Python calling
|
| 508 |
+
convention.
|
| 509 |
+
"""
|
| 510 |
+
context = self._context
|
| 511 |
+
builder = self._builder
|
| 512 |
+
|
| 513 |
+
payload = self.payload
|
| 514 |
+
h = self._pyapi_get_hash_value(pyapi, context, builder, item)
|
| 515 |
+
self._add_key(payload, item, h, do_resize)
|
| 516 |
+
|
| 517 |
+
def _pyapi_get_hash_value(self, pyapi, context, builder, item):
|
| 518 |
+
"""Python API compatible version of `get_hash_value()`.
|
| 519 |
+
"""
|
| 520 |
+
argtypes = [self._ty.dtype]
|
| 521 |
+
resty = types.intp
|
| 522 |
+
|
| 523 |
+
def wrapper(val):
|
| 524 |
+
return _get_hash_value_intrinsic(val)
|
| 525 |
+
|
| 526 |
+
args = [item]
|
| 527 |
+
sig = typing.signature(resty, *argtypes)
|
| 528 |
+
is_error, retval = pyapi.call_jit_code(wrapper, sig, args)
|
| 529 |
+
# Handle return status
|
| 530 |
+
with builder.if_then(is_error, likely=False):
|
| 531 |
+
# Raise nopython exception as a Python exception
|
| 532 |
+
builder.ret(pyapi.get_null_object())
|
| 533 |
+
return retval
|
| 534 |
+
|
| 535 |
+
def contains(self, item):
|
| 536 |
+
context = self._context
|
| 537 |
+
builder = self._builder
|
| 538 |
+
|
| 539 |
+
payload = self.payload
|
| 540 |
+
h = get_hash_value(context, builder, self._ty.dtype, item)
|
| 541 |
+
found, i = payload._lookup(item, h)
|
| 542 |
+
return found
|
| 543 |
+
|
| 544 |
+
def discard(self, item):
|
| 545 |
+
context = self._context
|
| 546 |
+
builder = self._builder
|
| 547 |
+
|
| 548 |
+
payload = self.payload
|
| 549 |
+
h = get_hash_value(context, builder, self._ty.dtype, item)
|
| 550 |
+
found = self._remove_key(payload, item, h)
|
| 551 |
+
return found
|
| 552 |
+
|
| 553 |
+
def pop(self):
|
| 554 |
+
context = self._context
|
| 555 |
+
builder = self._builder
|
| 556 |
+
|
| 557 |
+
lty = context.get_value_type(self._ty.dtype)
|
| 558 |
+
key = cgutils.alloca_once(builder, lty)
|
| 559 |
+
|
| 560 |
+
payload = self.payload
|
| 561 |
+
with payload._next_entry() as entry:
|
| 562 |
+
builder.store(entry.key, key)
|
| 563 |
+
# since the value is returned don't decref in _remove_entry()
|
| 564 |
+
self._remove_entry(payload, entry, do_decref=False)
|
| 565 |
+
|
| 566 |
+
return builder.load(key)
|
| 567 |
+
|
| 568 |
+
def clear(self):
|
| 569 |
+
context = self._context
|
| 570 |
+
builder = self._builder
|
| 571 |
+
|
| 572 |
+
intp_t = context.get_value_type(types.intp)
|
| 573 |
+
minsize = ir.Constant(intp_t, MINSIZE)
|
| 574 |
+
self._replace_payload(minsize)
|
| 575 |
+
self.set_dirty(True)
|
| 576 |
+
|
| 577 |
+
def copy(self):
|
| 578 |
+
"""
|
| 579 |
+
Return a copy of this set.
|
| 580 |
+
"""
|
| 581 |
+
context = self._context
|
| 582 |
+
builder = self._builder
|
| 583 |
+
|
| 584 |
+
payload = self.payload
|
| 585 |
+
used = payload.used
|
| 586 |
+
fill = payload.fill
|
| 587 |
+
|
| 588 |
+
other = type(self)(context, builder, self._ty, None)
|
| 589 |
+
|
| 590 |
+
no_deleted_entries = builder.icmp_unsigned('==', used, fill)
|
| 591 |
+
with builder.if_else(no_deleted_entries, likely=True) \
|
| 592 |
+
as (if_no_deleted, if_deleted):
|
| 593 |
+
with if_no_deleted:
|
| 594 |
+
# No deleted entries => raw copy the payload
|
| 595 |
+
ok = other._copy_payload(payload)
|
| 596 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 597 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 598 |
+
("cannot copy set",))
|
| 599 |
+
|
| 600 |
+
with if_deleted:
|
| 601 |
+
# Deleted entries => re-insert entries one by one
|
| 602 |
+
nentries = self.choose_alloc_size(context, builder, used)
|
| 603 |
+
ok = other._allocate_payload(nentries)
|
| 604 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 605 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 606 |
+
("cannot copy set",))
|
| 607 |
+
|
| 608 |
+
other_payload = other.payload
|
| 609 |
+
with payload._iterate() as loop:
|
| 610 |
+
entry = loop.entry
|
| 611 |
+
other._add_key(other_payload, entry.key, entry.hash,
|
| 612 |
+
do_resize=False)
|
| 613 |
+
|
| 614 |
+
return other
|
| 615 |
+
|
| 616 |
+
def intersect(self, other):
|
| 617 |
+
"""
|
| 618 |
+
In-place intersection with *other* set.
|
| 619 |
+
"""
|
| 620 |
+
context = self._context
|
| 621 |
+
builder = self._builder
|
| 622 |
+
payload = self.payload
|
| 623 |
+
other_payload = other.payload
|
| 624 |
+
|
| 625 |
+
with payload._iterate() as loop:
|
| 626 |
+
entry = loop.entry
|
| 627 |
+
found, _ = other_payload._lookup(entry.key, entry.hash)
|
| 628 |
+
with builder.if_then(builder.not_(found)):
|
| 629 |
+
self._remove_entry(payload, entry, do_resize=False)
|
| 630 |
+
|
| 631 |
+
# Final downsize
|
| 632 |
+
self.downsize(payload.used)
|
| 633 |
+
|
| 634 |
+
def difference(self, other):
|
| 635 |
+
"""
|
| 636 |
+
In-place difference with *other* set.
|
| 637 |
+
"""
|
| 638 |
+
context = self._context
|
| 639 |
+
builder = self._builder
|
| 640 |
+
payload = self.payload
|
| 641 |
+
other_payload = other.payload
|
| 642 |
+
|
| 643 |
+
with other_payload._iterate() as loop:
|
| 644 |
+
entry = loop.entry
|
| 645 |
+
self._remove_key(payload, entry.key, entry.hash, do_resize=False)
|
| 646 |
+
|
| 647 |
+
# Final downsize
|
| 648 |
+
self.downsize(payload.used)
|
| 649 |
+
|
| 650 |
+
def symmetric_difference(self, other):
|
| 651 |
+
"""
|
| 652 |
+
In-place symmetric difference with *other* set.
|
| 653 |
+
"""
|
| 654 |
+
context = self._context
|
| 655 |
+
builder = self._builder
|
| 656 |
+
other_payload = other.payload
|
| 657 |
+
|
| 658 |
+
with other_payload._iterate() as loop:
|
| 659 |
+
key = loop.entry.key
|
| 660 |
+
h = loop.entry.hash
|
| 661 |
+
# We must reload our payload as it may be resized during the loop
|
| 662 |
+
payload = self.payload
|
| 663 |
+
found, i = payload._lookup(key, h, for_insert=True)
|
| 664 |
+
entry = payload.get_entry(i)
|
| 665 |
+
with builder.if_else(found) as (if_common, if_not_common):
|
| 666 |
+
with if_common:
|
| 667 |
+
self._remove_entry(payload, entry, do_resize=False)
|
| 668 |
+
with if_not_common:
|
| 669 |
+
self._add_entry(payload, entry, key, h)
|
| 670 |
+
|
| 671 |
+
# Final downsize
|
| 672 |
+
self.downsize(self.payload.used)
|
| 673 |
+
|
| 674 |
+
def issubset(self, other, strict=False):
|
| 675 |
+
context = self._context
|
| 676 |
+
builder = self._builder
|
| 677 |
+
payload = self.payload
|
| 678 |
+
other_payload = other.payload
|
| 679 |
+
|
| 680 |
+
cmp_op = '<' if strict else '<='
|
| 681 |
+
|
| 682 |
+
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 683 |
+
with builder.if_else(
|
| 684 |
+
builder.icmp_unsigned(cmp_op, payload.used, other_payload.used)
|
| 685 |
+
) as (if_smaller, if_larger):
|
| 686 |
+
with if_larger:
|
| 687 |
+
# self larger than other => self cannot possibly a subset
|
| 688 |
+
builder.store(cgutils.false_bit, res)
|
| 689 |
+
with if_smaller:
|
| 690 |
+
# check whether each key of self is in other
|
| 691 |
+
with payload._iterate() as loop:
|
| 692 |
+
entry = loop.entry
|
| 693 |
+
found, _ = other_payload._lookup(entry.key, entry.hash)
|
| 694 |
+
with builder.if_then(builder.not_(found)):
|
| 695 |
+
builder.store(cgutils.false_bit, res)
|
| 696 |
+
loop.do_break()
|
| 697 |
+
|
| 698 |
+
return builder.load(res)
|
| 699 |
+
|
| 700 |
+
def isdisjoint(self, other):
|
| 701 |
+
context = self._context
|
| 702 |
+
builder = self._builder
|
| 703 |
+
payload = self.payload
|
| 704 |
+
other_payload = other.payload
|
| 705 |
+
|
| 706 |
+
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 707 |
+
|
| 708 |
+
def check(smaller, larger):
|
| 709 |
+
# Loop over the smaller of the two, and search in the larger
|
| 710 |
+
with smaller._iterate() as loop:
|
| 711 |
+
entry = loop.entry
|
| 712 |
+
found, _ = larger._lookup(entry.key, entry.hash)
|
| 713 |
+
with builder.if_then(found):
|
| 714 |
+
builder.store(cgutils.false_bit, res)
|
| 715 |
+
loop.do_break()
|
| 716 |
+
|
| 717 |
+
with builder.if_else(
|
| 718 |
+
builder.icmp_unsigned('>', payload.used, other_payload.used)
|
| 719 |
+
) as (if_larger, otherwise):
|
| 720 |
+
|
| 721 |
+
with if_larger:
|
| 722 |
+
# len(self) > len(other)
|
| 723 |
+
check(other_payload, payload)
|
| 724 |
+
|
| 725 |
+
with otherwise:
|
| 726 |
+
# len(self) <= len(other)
|
| 727 |
+
check(payload, other_payload)
|
| 728 |
+
|
| 729 |
+
return builder.load(res)
|
| 730 |
+
|
| 731 |
+
def equals(self, other):
|
| 732 |
+
context = self._context
|
| 733 |
+
builder = self._builder
|
| 734 |
+
payload = self.payload
|
| 735 |
+
other_payload = other.payload
|
| 736 |
+
|
| 737 |
+
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 738 |
+
with builder.if_else(
|
| 739 |
+
builder.icmp_unsigned('==', payload.used, other_payload.used)
|
| 740 |
+
) as (if_same_size, otherwise):
|
| 741 |
+
with if_same_size:
|
| 742 |
+
# same sizes => check whether each key of self is in other
|
| 743 |
+
with payload._iterate() as loop:
|
| 744 |
+
entry = loop.entry
|
| 745 |
+
found, _ = other_payload._lookup(entry.key, entry.hash)
|
| 746 |
+
with builder.if_then(builder.not_(found)):
|
| 747 |
+
builder.store(cgutils.false_bit, res)
|
| 748 |
+
loop.do_break()
|
| 749 |
+
with otherwise:
|
| 750 |
+
# different sizes => cannot possibly be equal
|
| 751 |
+
builder.store(cgutils.false_bit, res)
|
| 752 |
+
|
| 753 |
+
return builder.load(res)
|
| 754 |
+
|
| 755 |
+
@classmethod
|
| 756 |
+
def allocate_ex(cls, context, builder, set_type, nitems=None):
|
| 757 |
+
"""
|
| 758 |
+
Allocate a SetInstance with its storage.
|
| 759 |
+
Return a (ok, instance) tuple where *ok* is a LLVM boolean and
|
| 760 |
+
*instance* is a SetInstance object (the object's contents are
|
| 761 |
+
only valid when *ok* is true).
|
| 762 |
+
"""
|
| 763 |
+
intp_t = context.get_value_type(types.intp)
|
| 764 |
+
|
| 765 |
+
if nitems is None:
|
| 766 |
+
nentries = ir.Constant(intp_t, MINSIZE)
|
| 767 |
+
else:
|
| 768 |
+
if isinstance(nitems, int):
|
| 769 |
+
nitems = ir.Constant(intp_t, nitems)
|
| 770 |
+
nentries = cls.choose_alloc_size(context, builder, nitems)
|
| 771 |
+
|
| 772 |
+
self = cls(context, builder, set_type, None)
|
| 773 |
+
ok = self._allocate_payload(nentries)
|
| 774 |
+
return ok, self
|
| 775 |
+
|
| 776 |
+
@classmethod
|
| 777 |
+
def allocate(cls, context, builder, set_type, nitems=None):
|
| 778 |
+
"""
|
| 779 |
+
Allocate a SetInstance with its storage. Same as allocate_ex(),
|
| 780 |
+
but return an initialized *instance*. If allocation failed,
|
| 781 |
+
control is transferred to the caller using the target's current
|
| 782 |
+
call convention.
|
| 783 |
+
"""
|
| 784 |
+
ok, self = cls.allocate_ex(context, builder, set_type, nitems)
|
| 785 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 786 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 787 |
+
("cannot allocate set",))
|
| 788 |
+
return self
|
| 789 |
+
|
| 790 |
+
@classmethod
|
| 791 |
+
def from_meminfo(cls, context, builder, set_type, meminfo):
|
| 792 |
+
"""
|
| 793 |
+
Allocate a new set instance pointing to an existing payload
|
| 794 |
+
(a meminfo pointer).
|
| 795 |
+
Note the parent field has to be filled by the caller.
|
| 796 |
+
"""
|
| 797 |
+
self = cls(context, builder, set_type, None)
|
| 798 |
+
self._set.meminfo = meminfo
|
| 799 |
+
self._set.parent = context.get_constant_null(types.pyobject)
|
| 800 |
+
context.nrt.incref(builder, set_type, self.value)
|
| 801 |
+
# Payload is part of the meminfo, no need to touch it
|
| 802 |
+
return self
|
| 803 |
+
|
| 804 |
+
@classmethod
|
| 805 |
+
def choose_alloc_size(cls, context, builder, nitems):
|
| 806 |
+
"""
|
| 807 |
+
Choose a suitable number of entries for the given number of items.
|
| 808 |
+
"""
|
| 809 |
+
intp_t = nitems.type
|
| 810 |
+
one = ir.Constant(intp_t, 1)
|
| 811 |
+
minsize = ir.Constant(intp_t, MINSIZE)
|
| 812 |
+
|
| 813 |
+
# Ensure number of entries >= 2 * used
|
| 814 |
+
min_entries = builder.shl(nitems, one)
|
| 815 |
+
# Find out first suitable power of 2, starting from MINSIZE
|
| 816 |
+
size_p = cgutils.alloca_once_value(builder, minsize)
|
| 817 |
+
|
| 818 |
+
bb_body = builder.append_basic_block("calcsize.body")
|
| 819 |
+
bb_end = builder.append_basic_block("calcsize.end")
|
| 820 |
+
|
| 821 |
+
builder.branch(bb_body)
|
| 822 |
+
|
| 823 |
+
with builder.goto_block(bb_body):
|
| 824 |
+
size = builder.load(size_p)
|
| 825 |
+
is_large_enough = builder.icmp_unsigned('>=', size, min_entries)
|
| 826 |
+
with builder.if_then(is_large_enough, likely=False):
|
| 827 |
+
builder.branch(bb_end)
|
| 828 |
+
next_size = builder.shl(size, one)
|
| 829 |
+
builder.store(next_size, size_p)
|
| 830 |
+
builder.branch(bb_body)
|
| 831 |
+
|
| 832 |
+
builder.position_at_end(bb_end)
|
| 833 |
+
return builder.load(size_p)
|
| 834 |
+
|
| 835 |
+
def upsize(self, nitems):
|
| 836 |
+
"""
|
| 837 |
+
When adding to the set, ensure it is properly sized for the given
|
| 838 |
+
number of used entries.
|
| 839 |
+
"""
|
| 840 |
+
context = self._context
|
| 841 |
+
builder = self._builder
|
| 842 |
+
intp_t = nitems.type
|
| 843 |
+
|
| 844 |
+
one = ir.Constant(intp_t, 1)
|
| 845 |
+
two = ir.Constant(intp_t, 2)
|
| 846 |
+
|
| 847 |
+
payload = self.payload
|
| 848 |
+
|
| 849 |
+
# Ensure number of entries >= 2 * used
|
| 850 |
+
min_entries = builder.shl(nitems, one)
|
| 851 |
+
size = builder.add(payload.mask, one)
|
| 852 |
+
need_resize = builder.icmp_unsigned('>=', min_entries, size)
|
| 853 |
+
|
| 854 |
+
with builder.if_then(need_resize, likely=False):
|
| 855 |
+
# Find out next suitable size
|
| 856 |
+
new_size_p = cgutils.alloca_once_value(builder, size)
|
| 857 |
+
|
| 858 |
+
bb_body = builder.append_basic_block("calcsize.body")
|
| 859 |
+
bb_end = builder.append_basic_block("calcsize.end")
|
| 860 |
+
|
| 861 |
+
builder.branch(bb_body)
|
| 862 |
+
|
| 863 |
+
with builder.goto_block(bb_body):
|
| 864 |
+
# Multiply by 4 (ensuring size remains a power of two)
|
| 865 |
+
new_size = builder.load(new_size_p)
|
| 866 |
+
new_size = builder.shl(new_size, two)
|
| 867 |
+
builder.store(new_size, new_size_p)
|
| 868 |
+
is_too_small = builder.icmp_unsigned('>=', min_entries, new_size)
|
| 869 |
+
builder.cbranch(is_too_small, bb_body, bb_end)
|
| 870 |
+
|
| 871 |
+
builder.position_at_end(bb_end)
|
| 872 |
+
|
| 873 |
+
new_size = builder.load(new_size_p)
|
| 874 |
+
if DEBUG_ALLOCS:
|
| 875 |
+
context.printf(builder,
|
| 876 |
+
"upsize to %zd items: current size = %zd, "
|
| 877 |
+
"min entries = %zd, new size = %zd\n",
|
| 878 |
+
nitems, size, min_entries, new_size)
|
| 879 |
+
self._resize(payload, new_size, "cannot grow set")
|
| 880 |
+
|
| 881 |
+
def downsize(self, nitems):
|
| 882 |
+
"""
|
| 883 |
+
When removing from the set, ensure it is properly sized for the given
|
| 884 |
+
number of used entries.
|
| 885 |
+
"""
|
| 886 |
+
context = self._context
|
| 887 |
+
builder = self._builder
|
| 888 |
+
intp_t = nitems.type
|
| 889 |
+
|
| 890 |
+
one = ir.Constant(intp_t, 1)
|
| 891 |
+
two = ir.Constant(intp_t, 2)
|
| 892 |
+
minsize = ir.Constant(intp_t, MINSIZE)
|
| 893 |
+
|
| 894 |
+
payload = self.payload
|
| 895 |
+
|
| 896 |
+
# Ensure entries >= max(2 * used, MINSIZE)
|
| 897 |
+
min_entries = builder.shl(nitems, one)
|
| 898 |
+
min_entries = builder.select(builder.icmp_unsigned('>=', min_entries, minsize),
|
| 899 |
+
min_entries, minsize)
|
| 900 |
+
# Shrink only if size >= 4 * min_entries && size > MINSIZE
|
| 901 |
+
max_size = builder.shl(min_entries, two)
|
| 902 |
+
size = builder.add(payload.mask, one)
|
| 903 |
+
need_resize = builder.and_(
|
| 904 |
+
builder.icmp_unsigned('<=', max_size, size),
|
| 905 |
+
builder.icmp_unsigned('<', minsize, size))
|
| 906 |
+
|
| 907 |
+
with builder.if_then(need_resize, likely=False):
|
| 908 |
+
# Find out next suitable size
|
| 909 |
+
new_size_p = cgutils.alloca_once_value(builder, size)
|
| 910 |
+
|
| 911 |
+
bb_body = builder.append_basic_block("calcsize.body")
|
| 912 |
+
bb_end = builder.append_basic_block("calcsize.end")
|
| 913 |
+
|
| 914 |
+
builder.branch(bb_body)
|
| 915 |
+
|
| 916 |
+
with builder.goto_block(bb_body):
|
| 917 |
+
# Divide by 2 (ensuring size remains a power of two)
|
| 918 |
+
new_size = builder.load(new_size_p)
|
| 919 |
+
new_size = builder.lshr(new_size, one)
|
| 920 |
+
# Keep current size if new size would be < min_entries
|
| 921 |
+
is_too_small = builder.icmp_unsigned('>', min_entries, new_size)
|
| 922 |
+
with builder.if_then(is_too_small):
|
| 923 |
+
builder.branch(bb_end)
|
| 924 |
+
builder.store(new_size, new_size_p)
|
| 925 |
+
builder.branch(bb_body)
|
| 926 |
+
|
| 927 |
+
builder.position_at_end(bb_end)
|
| 928 |
+
|
| 929 |
+
# Ensure new_size >= MINSIZE
|
| 930 |
+
new_size = builder.load(new_size_p)
|
| 931 |
+
# At this point, new_size should be < size if the factors
|
| 932 |
+
# above were chosen carefully!
|
| 933 |
+
|
| 934 |
+
if DEBUG_ALLOCS:
|
| 935 |
+
context.printf(builder,
|
| 936 |
+
"downsize to %zd items: current size = %zd, "
|
| 937 |
+
"min entries = %zd, new size = %zd\n",
|
| 938 |
+
nitems, size, min_entries, new_size)
|
| 939 |
+
self._resize(payload, new_size, "cannot shrink set")
|
| 940 |
+
|
| 941 |
+
def _resize(self, payload, nentries, errmsg):
|
| 942 |
+
"""
|
| 943 |
+
Resize the payload to the given number of entries.
|
| 944 |
+
|
| 945 |
+
CAUTION: *nentries* must be a power of 2!
|
| 946 |
+
"""
|
| 947 |
+
context = self._context
|
| 948 |
+
builder = self._builder
|
| 949 |
+
|
| 950 |
+
# Allocate new entries
|
| 951 |
+
old_payload = payload
|
| 952 |
+
|
| 953 |
+
ok = self._allocate_payload(nentries, realloc=True)
|
| 954 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 955 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 956 |
+
(errmsg,))
|
| 957 |
+
|
| 958 |
+
# Re-insert old entries
|
| 959 |
+
# No incref since they already were the first time they were inserted
|
| 960 |
+
payload = self.payload
|
| 961 |
+
with old_payload._iterate() as loop:
|
| 962 |
+
entry = loop.entry
|
| 963 |
+
self._add_key(payload, entry.key, entry.hash,
|
| 964 |
+
do_resize=False, do_incref=False)
|
| 965 |
+
|
| 966 |
+
self._free_payload(old_payload.ptr)
|
| 967 |
+
|
| 968 |
+
def _replace_payload(self, nentries):
|
| 969 |
+
"""
|
| 970 |
+
Replace the payload with a new empty payload with the given number
|
| 971 |
+
of entries.
|
| 972 |
+
|
| 973 |
+
CAUTION: *nentries* must be a power of 2!
|
| 974 |
+
"""
|
| 975 |
+
context = self._context
|
| 976 |
+
builder = self._builder
|
| 977 |
+
|
| 978 |
+
# decref all of the previous entries
|
| 979 |
+
with self.payload._iterate() as loop:
|
| 980 |
+
entry = loop.entry
|
| 981 |
+
self.decref_value(entry.key)
|
| 982 |
+
|
| 983 |
+
# Free old payload
|
| 984 |
+
self._free_payload(self.payload.ptr)
|
| 985 |
+
|
| 986 |
+
ok = self._allocate_payload(nentries, realloc=True)
|
| 987 |
+
with builder.if_then(builder.not_(ok), likely=False):
|
| 988 |
+
context.call_conv.return_user_exc(builder, MemoryError,
|
| 989 |
+
("cannot reallocate set",))
|
| 990 |
+
|
| 991 |
+
def _allocate_payload(self, nentries, realloc=False):
|
| 992 |
+
"""
|
| 993 |
+
Allocate and initialize payload for the given number of entries.
|
| 994 |
+
If *realloc* is True, the existing meminfo is reused.
|
| 995 |
+
|
| 996 |
+
CAUTION: *nentries* must be a power of 2!
|
| 997 |
+
"""
|
| 998 |
+
context = self._context
|
| 999 |
+
builder = self._builder
|
| 1000 |
+
|
| 1001 |
+
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 1002 |
+
|
| 1003 |
+
intp_t = context.get_value_type(types.intp)
|
| 1004 |
+
zero = ir.Constant(intp_t, 0)
|
| 1005 |
+
one = ir.Constant(intp_t, 1)
|
| 1006 |
+
|
| 1007 |
+
payload_type = context.get_data_type(types.SetPayload(self._ty))
|
| 1008 |
+
payload_size = context.get_abi_sizeof(payload_type)
|
| 1009 |
+
entry_size = self._entrysize
|
| 1010 |
+
# Account for the fact that the payload struct already contains an entry
|
| 1011 |
+
payload_size -= entry_size
|
| 1012 |
+
|
| 1013 |
+
# Total allocation size = <payload header size> + nentries * entry_size
|
| 1014 |
+
allocsize, ovf = cgutils.muladd_with_overflow(builder, nentries,
|
| 1015 |
+
ir.Constant(intp_t, entry_size),
|
| 1016 |
+
ir.Constant(intp_t, payload_size))
|
| 1017 |
+
with builder.if_then(ovf, likely=False):
|
| 1018 |
+
builder.store(cgutils.false_bit, ok)
|
| 1019 |
+
|
| 1020 |
+
with builder.if_then(builder.load(ok), likely=True):
|
| 1021 |
+
if realloc:
|
| 1022 |
+
meminfo = self._set.meminfo
|
| 1023 |
+
ptr = context.nrt.meminfo_varsize_alloc_unchecked(builder,
|
| 1024 |
+
meminfo,
|
| 1025 |
+
size=allocsize)
|
| 1026 |
+
alloc_ok = cgutils.is_null(builder, ptr)
|
| 1027 |
+
else:
|
| 1028 |
+
# create destructor to be called upon set destruction
|
| 1029 |
+
dtor = self._imp_dtor(context, builder.module)
|
| 1030 |
+
meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked(
|
| 1031 |
+
builder, allocsize, builder.bitcast(dtor, cgutils.voidptr_t))
|
| 1032 |
+
alloc_ok = cgutils.is_null(builder, meminfo)
|
| 1033 |
+
|
| 1034 |
+
with builder.if_else(alloc_ok,
|
| 1035 |
+
likely=False) as (if_error, if_ok):
|
| 1036 |
+
with if_error:
|
| 1037 |
+
builder.store(cgutils.false_bit, ok)
|
| 1038 |
+
with if_ok:
|
| 1039 |
+
if not realloc:
|
| 1040 |
+
self._set.meminfo = meminfo
|
| 1041 |
+
self._set.parent = context.get_constant_null(types.pyobject)
|
| 1042 |
+
payload = self.payload
|
| 1043 |
+
# Initialize entries to 0xff (EMPTY)
|
| 1044 |
+
cgutils.memset(builder, payload.ptr, allocsize, 0xFF)
|
| 1045 |
+
payload.used = zero
|
| 1046 |
+
payload.fill = zero
|
| 1047 |
+
payload.finger = zero
|
| 1048 |
+
new_mask = builder.sub(nentries, one)
|
| 1049 |
+
payload.mask = new_mask
|
| 1050 |
+
|
| 1051 |
+
if DEBUG_ALLOCS:
|
| 1052 |
+
context.printf(builder,
|
| 1053 |
+
"allocated %zd bytes for set at %p: mask = %zd\n",
|
| 1054 |
+
allocsize, payload.ptr, new_mask)
|
| 1055 |
+
|
| 1056 |
+
return builder.load(ok)
|
| 1057 |
+
|
| 1058 |
+
def _free_payload(self, ptr):
|
| 1059 |
+
"""
|
| 1060 |
+
Free an allocated old payload at *ptr*.
|
| 1061 |
+
"""
|
| 1062 |
+
self._context.nrt.meminfo_varsize_free(self._builder, self.meminfo, ptr)
|
| 1063 |
+
|
| 1064 |
+
def _copy_payload(self, src_payload):
|
| 1065 |
+
"""
|
| 1066 |
+
Raw-copy the given payload into self.
|
| 1067 |
+
"""
|
| 1068 |
+
context = self._context
|
| 1069 |
+
builder = self._builder
|
| 1070 |
+
|
| 1071 |
+
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
|
| 1072 |
+
|
| 1073 |
+
intp_t = context.get_value_type(types.intp)
|
| 1074 |
+
zero = ir.Constant(intp_t, 0)
|
| 1075 |
+
one = ir.Constant(intp_t, 1)
|
| 1076 |
+
|
| 1077 |
+
payload_type = context.get_data_type(types.SetPayload(self._ty))
|
| 1078 |
+
payload_size = context.get_abi_sizeof(payload_type)
|
| 1079 |
+
entry_size = self._entrysize
|
| 1080 |
+
# Account for the fact that the payload struct already contains an entry
|
| 1081 |
+
payload_size -= entry_size
|
| 1082 |
+
|
| 1083 |
+
mask = src_payload.mask
|
| 1084 |
+
nentries = builder.add(one, mask)
|
| 1085 |
+
|
| 1086 |
+
# Total allocation size = <payload header size> + nentries * entry_size
|
| 1087 |
+
# (note there can't be any overflow since we're reusing an existing
|
| 1088 |
+
# payload's parameters)
|
| 1089 |
+
allocsize = builder.add(ir.Constant(intp_t, payload_size),
|
| 1090 |
+
builder.mul(ir.Constant(intp_t, entry_size),
|
| 1091 |
+
nentries))
|
| 1092 |
+
|
| 1093 |
+
with builder.if_then(builder.load(ok), likely=True):
|
| 1094 |
+
# create destructor for new meminfo
|
| 1095 |
+
dtor = self._imp_dtor(context, builder.module)
|
| 1096 |
+
meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked(
|
| 1097 |
+
builder, allocsize, builder.bitcast(dtor, cgutils.voidptr_t))
|
| 1098 |
+
alloc_ok = cgutils.is_null(builder, meminfo)
|
| 1099 |
+
|
| 1100 |
+
with builder.if_else(alloc_ok, likely=False) as (if_error, if_ok):
|
| 1101 |
+
with if_error:
|
| 1102 |
+
builder.store(cgutils.false_bit, ok)
|
| 1103 |
+
with if_ok:
|
| 1104 |
+
self._set.meminfo = meminfo
|
| 1105 |
+
payload = self.payload
|
| 1106 |
+
payload.used = src_payload.used
|
| 1107 |
+
payload.fill = src_payload.fill
|
| 1108 |
+
payload.finger = zero
|
| 1109 |
+
payload.mask = mask
|
| 1110 |
+
|
| 1111 |
+
# instead of using `_add_key` for every entry, since the
|
| 1112 |
+
# size of the new set is the same, we can just copy the
|
| 1113 |
+
# data directly without having to re-compute the hash
|
| 1114 |
+
cgutils.raw_memcpy(builder, payload.entries,
|
| 1115 |
+
src_payload.entries, nentries,
|
| 1116 |
+
entry_size)
|
| 1117 |
+
# increment the refcounts to simulate `_add_key` for each
|
| 1118 |
+
# element
|
| 1119 |
+
with src_payload._iterate() as loop:
|
| 1120 |
+
self.incref_value(loop.entry.key)
|
| 1121 |
+
|
| 1122 |
+
if DEBUG_ALLOCS:
|
| 1123 |
+
context.printf(builder,
|
| 1124 |
+
"allocated %zd bytes for set at %p: mask = %zd\n",
|
| 1125 |
+
allocsize, payload.ptr, mask)
|
| 1126 |
+
|
| 1127 |
+
return builder.load(ok)
|
| 1128 |
+
|
| 1129 |
+
def _imp_dtor(self, context, module):
|
| 1130 |
+
"""Define the dtor for set
|
| 1131 |
+
"""
|
| 1132 |
+
llvoidptr = cgutils.voidptr_t
|
| 1133 |
+
llsize_t= context.get_value_type(types.size_t)
|
| 1134 |
+
# create a dtor function that takes (void* set, size_t size, void* dtor_info)
|
| 1135 |
+
fnty = ir.FunctionType(
|
| 1136 |
+
ir.VoidType(),
|
| 1137 |
+
[llvoidptr, llsize_t, llvoidptr],
|
| 1138 |
+
)
|
| 1139 |
+
# create type-specific name
|
| 1140 |
+
fname = f".dtor.set.{self._ty.dtype}"
|
| 1141 |
+
|
| 1142 |
+
fn = cgutils.get_or_insert_function(module, fnty, name=fname)
|
| 1143 |
+
|
| 1144 |
+
if fn.is_declaration:
|
| 1145 |
+
# Set linkage
|
| 1146 |
+
fn.linkage = 'linkonce_odr'
|
| 1147 |
+
# Define
|
| 1148 |
+
builder = ir.IRBuilder(fn.append_basic_block())
|
| 1149 |
+
payload = _SetPayload(context, builder, self._ty, fn.args[0])
|
| 1150 |
+
with payload._iterate() as loop:
|
| 1151 |
+
entry = loop.entry
|
| 1152 |
+
context.nrt.decref(builder, self._ty.dtype, entry.key)
|
| 1153 |
+
builder.ret_void()
|
| 1154 |
+
|
| 1155 |
+
return fn
|
| 1156 |
+
|
| 1157 |
+
def incref_value(self, val):
|
| 1158 |
+
"""Incref an element value
|
| 1159 |
+
"""
|
| 1160 |
+
self._context.nrt.incref(self._builder, self._ty.dtype, val)
|
| 1161 |
+
|
| 1162 |
+
def decref_value(self, val):
|
| 1163 |
+
"""Decref an element value
|
| 1164 |
+
"""
|
| 1165 |
+
self._context.nrt.decref(self._builder, self._ty.dtype, val)
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
class SetIterInstance(object):
|
| 1169 |
+
|
| 1170 |
+
def __init__(self, context, builder, iter_type, iter_val):
|
| 1171 |
+
self._context = context
|
| 1172 |
+
self._builder = builder
|
| 1173 |
+
self._ty = iter_type
|
| 1174 |
+
self._iter = context.make_helper(builder, iter_type, iter_val)
|
| 1175 |
+
ptr = self._context.nrt.meminfo_data(builder, self.meminfo)
|
| 1176 |
+
self._payload = _SetPayload(context, builder, self._ty.container, ptr)
|
| 1177 |
+
|
| 1178 |
+
@classmethod
|
| 1179 |
+
def from_set(cls, context, builder, iter_type, set_val):
|
| 1180 |
+
set_inst = SetInstance(context, builder, iter_type.container, set_val)
|
| 1181 |
+
self = cls(context, builder, iter_type, None)
|
| 1182 |
+
index = context.get_constant(types.intp, 0)
|
| 1183 |
+
self._iter.index = cgutils.alloca_once_value(builder, index)
|
| 1184 |
+
self._iter.meminfo = set_inst.meminfo
|
| 1185 |
+
return self
|
| 1186 |
+
|
| 1187 |
+
@property
|
| 1188 |
+
def value(self):
|
| 1189 |
+
return self._iter._getvalue()
|
| 1190 |
+
|
| 1191 |
+
@property
|
| 1192 |
+
def meminfo(self):
|
| 1193 |
+
return self._iter.meminfo
|
| 1194 |
+
|
| 1195 |
+
@property
|
| 1196 |
+
def index(self):
|
| 1197 |
+
return self._builder.load(self._iter.index)
|
| 1198 |
+
|
| 1199 |
+
@index.setter
|
| 1200 |
+
def index(self, value):
|
| 1201 |
+
self._builder.store(value, self._iter.index)
|
| 1202 |
+
|
| 1203 |
+
def iternext(self, result):
|
| 1204 |
+
index = self.index
|
| 1205 |
+
payload = self._payload
|
| 1206 |
+
one = ir.Constant(index.type, 1)
|
| 1207 |
+
|
| 1208 |
+
result.set_exhausted()
|
| 1209 |
+
|
| 1210 |
+
with payload._iterate(start=index) as loop:
|
| 1211 |
+
# An entry was found
|
| 1212 |
+
entry = loop.entry
|
| 1213 |
+
result.set_valid()
|
| 1214 |
+
result.yield_(entry.key)
|
| 1215 |
+
self.index = self._builder.add(loop.index, one)
|
| 1216 |
+
loop.do_break()
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
#-------------------------------------------------------------------------------
|
| 1220 |
+
# Constructors
|
| 1221 |
+
|
| 1222 |
+
def build_set(context, builder, set_type, items):
|
| 1223 |
+
"""
|
| 1224 |
+
Build a set of the given type, containing the given items.
|
| 1225 |
+
"""
|
| 1226 |
+
nitems = len(items)
|
| 1227 |
+
inst = SetInstance.allocate(context, builder, set_type, nitems)
|
| 1228 |
+
|
| 1229 |
+
if nitems > 0:
|
| 1230 |
+
|
| 1231 |
+
# Populate set. Inlining the insertion code for each item would be very
|
| 1232 |
+
# costly, instead we create a LLVM array and iterate over it.
|
| 1233 |
+
array = cgutils.pack_array(builder, items)
|
| 1234 |
+
array_ptr = cgutils.alloca_once_value(builder, array)
|
| 1235 |
+
|
| 1236 |
+
count = context.get_constant(types.intp, nitems)
|
| 1237 |
+
with cgutils.for_range(builder, count) as loop:
|
| 1238 |
+
item = builder.load(cgutils.gep(builder, array_ptr, 0, loop.index))
|
| 1239 |
+
inst.add(item)
|
| 1240 |
+
|
| 1241 |
+
return impl_ret_new_ref(context, builder, set_type, inst.value)
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
@lower_builtin(set)
|
| 1245 |
+
def set_empty_constructor(context, builder, sig, args):
|
| 1246 |
+
set_type = sig.return_type
|
| 1247 |
+
inst = SetInstance.allocate(context, builder, set_type)
|
| 1248 |
+
return impl_ret_new_ref(context, builder, set_type, inst.value)
|
| 1249 |
+
|
| 1250 |
+
@lower_builtin(set, types.IterableType)
|
| 1251 |
+
def set_constructor(context, builder, sig, args):
|
| 1252 |
+
set_type = sig.return_type
|
| 1253 |
+
items_type, = sig.args
|
| 1254 |
+
items, = args
|
| 1255 |
+
|
| 1256 |
+
# If the argument has a len(), preallocate the set so as to
|
| 1257 |
+
# avoid resizes.
|
| 1258 |
+
# `for_iter` increfs each item in the set, so a `decref` is required each
|
| 1259 |
+
# iteration to balance. Because the `incref` from `.add` is dependent on
|
| 1260 |
+
# the item not already existing in the set, just removing its incref is not
|
| 1261 |
+
# enough to guarantee all memory is freed
|
| 1262 |
+
n = call_len(context, builder, items_type, items)
|
| 1263 |
+
inst = SetInstance.allocate(context, builder, set_type, n)
|
| 1264 |
+
with for_iter(context, builder, items_type, items) as loop:
|
| 1265 |
+
inst.add(loop.value)
|
| 1266 |
+
context.nrt.decref(builder, set_type.dtype, loop.value)
|
| 1267 |
+
|
| 1268 |
+
return impl_ret_new_ref(context, builder, set_type, inst.value)
|
| 1269 |
+
|
| 1270 |
+
|
| 1271 |
+
#-------------------------------------------------------------------------------
|
| 1272 |
+
# Various operations
|
| 1273 |
+
|
| 1274 |
+
@lower_builtin(len, types.Set)
|
| 1275 |
+
def set_len(context, builder, sig, args):
|
| 1276 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1277 |
+
return inst.get_size()
|
| 1278 |
+
|
| 1279 |
+
@lower_builtin(operator.contains, types.Set, types.Any)
|
| 1280 |
+
def in_set(context, builder, sig, args):
|
| 1281 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1282 |
+
return inst.contains(args[1])
|
| 1283 |
+
|
| 1284 |
+
@lower_builtin('getiter', types.Set)
|
| 1285 |
+
def getiter_set(context, builder, sig, args):
|
| 1286 |
+
inst = SetIterInstance.from_set(context, builder, sig.return_type, args[0])
|
| 1287 |
+
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
|
| 1288 |
+
|
| 1289 |
+
@lower_builtin('iternext', types.SetIter)
|
| 1290 |
+
@iternext_impl(RefType.BORROWED)
|
| 1291 |
+
def iternext_listiter(context, builder, sig, args, result):
|
| 1292 |
+
inst = SetIterInstance(context, builder, sig.args[0], args[0])
|
| 1293 |
+
inst.iternext(result)
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
#-------------------------------------------------------------------------------
|
| 1297 |
+
# Methods
|
| 1298 |
+
|
| 1299 |
+
# One-item-at-a-time operations
|
| 1300 |
+
|
| 1301 |
+
@lower_builtin("set.add", types.Set, types.Any)
|
| 1302 |
+
def set_add(context, builder, sig, args):
|
| 1303 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1304 |
+
item = args[1]
|
| 1305 |
+
inst.add(item)
|
| 1306 |
+
|
| 1307 |
+
return context.get_dummy_value()
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
@intrinsic
|
| 1311 |
+
def _set_discard(typingctx, s, item):
|
| 1312 |
+
sig = types.none(s, item)
|
| 1313 |
+
|
| 1314 |
+
def set_discard(context, builder, sig, args):
|
| 1315 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1316 |
+
item = args[1]
|
| 1317 |
+
inst.discard(item)
|
| 1318 |
+
|
| 1319 |
+
return context.get_dummy_value()
|
| 1320 |
+
|
| 1321 |
+
return sig, set_discard
|
| 1322 |
+
|
| 1323 |
+
|
| 1324 |
+
@overload_method(types.Set, "discard")
|
| 1325 |
+
def ol_set_discard(s, item):
|
| 1326 |
+
return lambda s, item: _set_discard(s, item)
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
@intrinsic
|
| 1330 |
+
def _set_pop(typingctx, s):
|
| 1331 |
+
sig = s.dtype(s)
|
| 1332 |
+
|
| 1333 |
+
def set_pop(context, builder, sig, args):
|
| 1334 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1335 |
+
used = inst.payload.used
|
| 1336 |
+
with builder.if_then(cgutils.is_null(builder, used), likely=False):
|
| 1337 |
+
context.call_conv.return_user_exc(builder, KeyError,
|
| 1338 |
+
("set.pop(): empty set",))
|
| 1339 |
+
|
| 1340 |
+
return inst.pop()
|
| 1341 |
+
|
| 1342 |
+
return sig, set_pop
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
@overload_method(types.Set, "pop")
|
| 1346 |
+
def ol_set_pop(s):
|
| 1347 |
+
return lambda s: _set_pop(s)
|
| 1348 |
+
|
| 1349 |
+
|
| 1350 |
+
@intrinsic
|
| 1351 |
+
def _set_remove(typingctx, s, item):
|
| 1352 |
+
sig = types.none(s, item)
|
| 1353 |
+
|
| 1354 |
+
def set_remove(context, builder, sig, args):
|
| 1355 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1356 |
+
item = args[1]
|
| 1357 |
+
found = inst.discard(item)
|
| 1358 |
+
with builder.if_then(builder.not_(found), likely=False):
|
| 1359 |
+
context.call_conv.return_user_exc(builder, KeyError,
|
| 1360 |
+
("set.remove(): key not in set",))
|
| 1361 |
+
|
| 1362 |
+
return context.get_dummy_value()
|
| 1363 |
+
|
| 1364 |
+
return sig, set_remove
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
@overload_method(types.Set, "remove")
|
| 1368 |
+
def ol_set_remove(s, item):
|
| 1369 |
+
if s.dtype == item:
|
| 1370 |
+
return lambda s, item: _set_remove(s, item)
|
| 1371 |
+
|
| 1372 |
+
|
| 1373 |
+
# Mutating set operations
|
| 1374 |
+
|
| 1375 |
+
@intrinsic
|
| 1376 |
+
def _set_clear(typingctx, s):
|
| 1377 |
+
sig = types.none(s)
|
| 1378 |
+
|
| 1379 |
+
def set_clear(context, builder, sig, args):
|
| 1380 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1381 |
+
inst.clear()
|
| 1382 |
+
return context.get_dummy_value()
|
| 1383 |
+
|
| 1384 |
+
return sig, set_clear
|
| 1385 |
+
|
| 1386 |
+
|
| 1387 |
+
@overload_method(types.Set, "clear")
|
| 1388 |
+
def ol_set_clear(s):
|
| 1389 |
+
return lambda s: _set_clear(s)
|
| 1390 |
+
|
| 1391 |
+
|
| 1392 |
+
@intrinsic
|
| 1393 |
+
def _set_copy(typingctx, s):
|
| 1394 |
+
sig = s(s)
|
| 1395 |
+
|
| 1396 |
+
def set_copy(context, builder, sig, args):
|
| 1397 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1398 |
+
other = inst.copy()
|
| 1399 |
+
return impl_ret_new_ref(context, builder, sig.return_type, other.value)
|
| 1400 |
+
|
| 1401 |
+
return sig, set_copy
|
| 1402 |
+
|
| 1403 |
+
|
| 1404 |
+
@overload_method(types.Set, "copy")
|
| 1405 |
+
def ol_set_copy(s):
|
| 1406 |
+
return lambda s: _set_copy(s)
|
| 1407 |
+
|
| 1408 |
+
|
| 1409 |
+
def set_difference_update(context, builder, sig, args):
|
| 1410 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1411 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1412 |
+
|
| 1413 |
+
inst.difference(other)
|
| 1414 |
+
|
| 1415 |
+
return context.get_dummy_value()
|
| 1416 |
+
|
| 1417 |
+
|
| 1418 |
+
@intrinsic
|
| 1419 |
+
def _set_difference_update(typingctx, a, b):
|
| 1420 |
+
sig = types.none(a, b)
|
| 1421 |
+
return sig, set_difference_update
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
@overload_method(types.Set, "difference_update")
|
| 1425 |
+
def set_difference_update_impl(a, b):
|
| 1426 |
+
check_all_set(a, b)
|
| 1427 |
+
return lambda a, b: _set_difference_update(a, b)
|
| 1428 |
+
|
| 1429 |
+
|
| 1430 |
+
def set_intersection_update(context, builder, sig, args):
|
| 1431 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1432 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1433 |
+
inst.intersect(other)
|
| 1434 |
+
return context.get_dummy_value()
|
| 1435 |
+
|
| 1436 |
+
|
| 1437 |
+
@intrinsic
|
| 1438 |
+
def _set_intersection_update(typingctx, a, b):
|
| 1439 |
+
sig = types.none(a, b)
|
| 1440 |
+
return sig, set_intersection_update
|
| 1441 |
+
|
| 1442 |
+
|
| 1443 |
+
@overload_method(types.Set, "intersection_update")
|
| 1444 |
+
def set_intersection_update_impl(a, b):
|
| 1445 |
+
check_all_set(a, b)
|
| 1446 |
+
return lambda a, b: _set_intersection_update(a, b)
|
| 1447 |
+
|
| 1448 |
+
|
| 1449 |
+
def set_symmetric_difference_update(context, builder, sig, args):
|
| 1450 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1451 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1452 |
+
inst.symmetric_difference(other)
|
| 1453 |
+
return context.get_dummy_value()
|
| 1454 |
+
|
| 1455 |
+
|
| 1456 |
+
@intrinsic
|
| 1457 |
+
def _set_symmetric_difference_update(typingctx, a, b):
|
| 1458 |
+
sig = types.none(a, b)
|
| 1459 |
+
return sig, set_symmetric_difference_update
|
| 1460 |
+
|
| 1461 |
+
|
| 1462 |
+
@overload_method(types.Set, "symmetric_difference_update")
|
| 1463 |
+
def set_symmetric_difference_update_impl(a, b):
|
| 1464 |
+
check_all_set(a, b)
|
| 1465 |
+
return lambda a, b: _set_symmetric_difference_update(a, b)
|
| 1466 |
+
|
| 1467 |
+
|
| 1468 |
+
@lower_builtin("set.update", types.Set, types.IterableType)
|
| 1469 |
+
def set_update(context, builder, sig, args):
|
| 1470 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1471 |
+
items_type = sig.args[1]
|
| 1472 |
+
items = args[1]
|
| 1473 |
+
|
| 1474 |
+
# If the argument has a len(), assume there are few collisions and
|
| 1475 |
+
# presize to len(set) + len(items)
|
| 1476 |
+
n = call_len(context, builder, items_type, items)
|
| 1477 |
+
if n is not None:
|
| 1478 |
+
new_size = builder.add(inst.payload.used, n)
|
| 1479 |
+
inst.upsize(new_size)
|
| 1480 |
+
|
| 1481 |
+
with for_iter(context, builder, items_type, items) as loop:
|
| 1482 |
+
# make sure that the items being added are of the same dtype as the
|
| 1483 |
+
# set instance
|
| 1484 |
+
casted = context.cast(builder, loop.value, items_type.dtype, inst.dtype)
|
| 1485 |
+
inst.add(casted)
|
| 1486 |
+
# decref each item to counter balance the incref from `for_iter`
|
| 1487 |
+
# `.add` will conditionally incref when the item does not already exist
|
| 1488 |
+
# in the set, therefore removing its incref is not enough to guarantee
|
| 1489 |
+
# all memory is freed
|
| 1490 |
+
context.nrt.decref(builder, items_type.dtype, loop.value)
|
| 1491 |
+
|
| 1492 |
+
if n is not None:
|
| 1493 |
+
# If we pre-grew the set, downsize in case there were many collisions
|
| 1494 |
+
inst.downsize(inst.payload.used)
|
| 1495 |
+
|
| 1496 |
+
return context.get_dummy_value()
|
| 1497 |
+
|
| 1498 |
+
def gen_operator_impl(op, impl):
|
| 1499 |
+
@intrinsic
|
| 1500 |
+
def _set_operator_intr(typingctx, a, b):
|
| 1501 |
+
sig = a(a, b)
|
| 1502 |
+
def codegen(context, builder, sig, args):
|
| 1503 |
+
assert sig.return_type == sig.args[0]
|
| 1504 |
+
impl(context, builder, sig, args)
|
| 1505 |
+
return impl_ret_borrowed(context, builder, sig.args[0], args[0])
|
| 1506 |
+
return sig, codegen
|
| 1507 |
+
|
| 1508 |
+
@overload(op)
|
| 1509 |
+
def _ol_set_operator(a, b):
|
| 1510 |
+
check_all_set(a, b)
|
| 1511 |
+
return lambda a, b: _set_operator_intr(a, b)
|
| 1512 |
+
|
| 1513 |
+
|
| 1514 |
+
for op_, op_impl in [
|
| 1515 |
+
(operator.iand, set_intersection_update),
|
| 1516 |
+
(operator.ior, set_update),
|
| 1517 |
+
(operator.isub, set_difference_update),
|
| 1518 |
+
(operator.ixor, set_symmetric_difference_update),
|
| 1519 |
+
]:
|
| 1520 |
+
gen_operator_impl(op_, op_impl)
|
| 1521 |
+
|
| 1522 |
+
|
| 1523 |
+
# Set operations creating a new set
|
| 1524 |
+
|
| 1525 |
+
@overload(operator.sub)
|
| 1526 |
+
@overload_method(types.Set, "difference")
|
| 1527 |
+
def impl_set_difference(a, b):
|
| 1528 |
+
check_all_set(a, b)
|
| 1529 |
+
|
| 1530 |
+
def difference_impl(a, b):
|
| 1531 |
+
s = a.copy()
|
| 1532 |
+
s.difference_update(b)
|
| 1533 |
+
return s
|
| 1534 |
+
|
| 1535 |
+
return difference_impl
|
| 1536 |
+
|
| 1537 |
+
@overload(operator.and_)
|
| 1538 |
+
@overload_method(types.Set, "intersection")
|
| 1539 |
+
def set_intersection(a, b):
|
| 1540 |
+
check_all_set(a, b)
|
| 1541 |
+
|
| 1542 |
+
def intersection_impl(a, b):
|
| 1543 |
+
if len(a) < len(b):
|
| 1544 |
+
s = a.copy()
|
| 1545 |
+
s.intersection_update(b)
|
| 1546 |
+
return s
|
| 1547 |
+
else:
|
| 1548 |
+
s = b.copy()
|
| 1549 |
+
s.intersection_update(a)
|
| 1550 |
+
return s
|
| 1551 |
+
|
| 1552 |
+
return intersection_impl
|
| 1553 |
+
|
| 1554 |
+
@overload(operator.xor)
|
| 1555 |
+
@overload_method(types.Set, "symmetric_difference")
|
| 1556 |
+
def set_symmetric_difference(a, b):
|
| 1557 |
+
check_all_set(a, b)
|
| 1558 |
+
|
| 1559 |
+
def symmetric_difference_impl(a, b):
|
| 1560 |
+
if len(a) > len(b):
|
| 1561 |
+
s = a.copy()
|
| 1562 |
+
s.symmetric_difference_update(b)
|
| 1563 |
+
return s
|
| 1564 |
+
else:
|
| 1565 |
+
s = b.copy()
|
| 1566 |
+
s.symmetric_difference_update(a)
|
| 1567 |
+
return s
|
| 1568 |
+
|
| 1569 |
+
return symmetric_difference_impl
|
| 1570 |
+
|
| 1571 |
+
@overload(operator.or_)
|
| 1572 |
+
@overload_method(types.Set, "union")
|
| 1573 |
+
def set_union(a, b):
|
| 1574 |
+
check_all_set(a, b)
|
| 1575 |
+
|
| 1576 |
+
def union_impl(a, b):
|
| 1577 |
+
if len(a) > len(b):
|
| 1578 |
+
s = a.copy()
|
| 1579 |
+
s.update(b)
|
| 1580 |
+
return s
|
| 1581 |
+
else:
|
| 1582 |
+
s = b.copy()
|
| 1583 |
+
s.update(a)
|
| 1584 |
+
return s
|
| 1585 |
+
|
| 1586 |
+
return union_impl
|
| 1587 |
+
|
| 1588 |
+
|
| 1589 |
+
# Predicates
|
| 1590 |
+
|
| 1591 |
+
@intrinsic
|
| 1592 |
+
def _set_isdisjoint(typingctx, a, b):
|
| 1593 |
+
sig = types.boolean(a, b)
|
| 1594 |
+
|
| 1595 |
+
def codegen(context, builder, sig, args):
|
| 1596 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1597 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1598 |
+
|
| 1599 |
+
return inst.isdisjoint(other)
|
| 1600 |
+
|
| 1601 |
+
return sig, codegen
|
| 1602 |
+
|
| 1603 |
+
|
| 1604 |
+
@overload_method(types.Set, "isdisjoint")
|
| 1605 |
+
def set_isdisjoint(a, b):
|
| 1606 |
+
check_all_set(a, b)
|
| 1607 |
+
|
| 1608 |
+
return lambda a, b: _set_isdisjoint(a, b)
|
| 1609 |
+
|
| 1610 |
+
|
| 1611 |
+
@intrinsic
|
| 1612 |
+
def _set_issubset(typingctx, a, b):
|
| 1613 |
+
sig = types.boolean(a, b)
|
| 1614 |
+
|
| 1615 |
+
def codegen(context, builder, sig, args):
|
| 1616 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1617 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1618 |
+
|
| 1619 |
+
return inst.issubset(other)
|
| 1620 |
+
|
| 1621 |
+
return sig, codegen
|
| 1622 |
+
|
| 1623 |
+
@overload(operator.le)
|
| 1624 |
+
@overload_method(types.Set, "issubset")
|
| 1625 |
+
def set_issubset(a, b):
|
| 1626 |
+
check_all_set(a, b)
|
| 1627 |
+
|
| 1628 |
+
return lambda a, b: _set_issubset(a, b)
|
| 1629 |
+
|
| 1630 |
+
|
| 1631 |
+
@overload(operator.ge)
|
| 1632 |
+
@overload_method(types.Set, "issuperset")
|
| 1633 |
+
def set_issuperset(a, b):
|
| 1634 |
+
check_all_set(a, b)
|
| 1635 |
+
|
| 1636 |
+
def superset_impl(a, b):
|
| 1637 |
+
return b.issubset(a)
|
| 1638 |
+
|
| 1639 |
+
return superset_impl
|
| 1640 |
+
|
| 1641 |
+
@intrinsic
|
| 1642 |
+
def _set_eq(typingctx, a, b):
|
| 1643 |
+
sig = types.boolean(a, b)
|
| 1644 |
+
|
| 1645 |
+
def codegen(context, builder, sig, args):
|
| 1646 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1647 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1648 |
+
|
| 1649 |
+
return inst.equals(other)
|
| 1650 |
+
|
| 1651 |
+
return sig, codegen
|
| 1652 |
+
|
| 1653 |
+
@overload(operator.eq)
|
| 1654 |
+
def set_eq(a, b):
|
| 1655 |
+
check_all_set(a, b)
|
| 1656 |
+
|
| 1657 |
+
return lambda a, b: _set_eq(a, b)
|
| 1658 |
+
|
| 1659 |
+
@overload(operator.ne)
|
| 1660 |
+
def set_ne(a, b):
|
| 1661 |
+
check_all_set(a, b)
|
| 1662 |
+
|
| 1663 |
+
def ne_impl(a, b):
|
| 1664 |
+
return not a == b
|
| 1665 |
+
|
| 1666 |
+
return ne_impl
|
| 1667 |
+
|
| 1668 |
+
@intrinsic
|
| 1669 |
+
def _set_lt(typingctx, a, b):
|
| 1670 |
+
sig = types.boolean(a, b)
|
| 1671 |
+
|
| 1672 |
+
def codegen(context, builder, sig, args):
|
| 1673 |
+
inst = SetInstance(context, builder, sig.args[0], args[0])
|
| 1674 |
+
other = SetInstance(context, builder, sig.args[1], args[1])
|
| 1675 |
+
|
| 1676 |
+
return inst.issubset(other, strict=True)
|
| 1677 |
+
|
| 1678 |
+
return sig, codegen
|
| 1679 |
+
|
| 1680 |
+
@overload(operator.lt)
|
| 1681 |
+
def set_lt(a, b):
|
| 1682 |
+
check_all_set(a, b)
|
| 1683 |
+
|
| 1684 |
+
return lambda a, b: _set_lt(a, b)
|
| 1685 |
+
|
| 1686 |
+
@overload(operator.gt)
|
| 1687 |
+
def set_gt(a, b):
|
| 1688 |
+
check_all_set(a, b)
|
| 1689 |
+
|
| 1690 |
+
def gt_impl(a, b):
|
| 1691 |
+
return b < a
|
| 1692 |
+
|
| 1693 |
+
return gt_impl
|
| 1694 |
+
|
| 1695 |
+
@lower_builtin(operator.is_, types.Set, types.Set)
|
| 1696 |
+
def set_is(context, builder, sig, args):
|
| 1697 |
+
a = SetInstance(context, builder, sig.args[0], args[0])
|
| 1698 |
+
b = SetInstance(context, builder, sig.args[1], args[1])
|
| 1699 |
+
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
|
| 1700 |
+
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
|
| 1701 |
+
return builder.icmp_signed('==', ma, mb)
|
| 1702 |
+
|
| 1703 |
+
|
| 1704 |
+
# -----------------------------------------------------------------------------
|
| 1705 |
+
# Implicit casting
|
| 1706 |
+
|
| 1707 |
+
@lower_cast(types.Set, types.Set)
|
| 1708 |
+
def set_to_set(context, builder, fromty, toty, val):
|
| 1709 |
+
# Casting from non-reflected to reflected
|
| 1710 |
+
assert fromty.dtype == toty.dtype
|
| 1711 |
+
return val
|
lib/python3.10/site-packages/numba/cpython/unicode_support.py
ADDED
|
@@ -0,0 +1,768 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains support functions for more advanced unicode operations.
|
| 3 |
+
This is not a public API and is for Numba internal use only. Most of the
|
| 4 |
+
functions are relatively straightforward translations of the functions with the
|
| 5 |
+
same name in CPython.
|
| 6 |
+
"""
|
| 7 |
+
from collections import namedtuple
|
| 8 |
+
from enum import IntEnum
|
| 9 |
+
|
| 10 |
+
import llvmlite.ir
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from numba.core import types, cgutils, config
|
| 14 |
+
from numba.core.imputils import (impl_ret_untracked)
|
| 15 |
+
|
| 16 |
+
from numba.core.extending import overload, intrinsic, register_jitable
|
| 17 |
+
from numba.core.errors import TypingError
|
| 18 |
+
|
| 19 |
+
# This is equivalent to the struct `_PyUnicode_TypeRecord defined in CPython's
|
| 20 |
+
# Objects/unicodectype.c
|
| 21 |
+
typerecord = namedtuple('typerecord',
|
| 22 |
+
'upper lower title decimal digit flags')
|
| 23 |
+
|
| 24 |
+
# The Py_UCS4 type from CPython:
|
| 25 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/unicodeobject.h#L112 # noqa: E501
|
| 26 |
+
if config.USE_LEGACY_TYPE_SYSTEM:
|
| 27 |
+
_Py_UCS4 = types.uint32
|
| 28 |
+
else:
|
| 29 |
+
_Py_UCS4 = types.c_uint32
|
| 30 |
+
|
| 31 |
+
# ------------------------------------------------------------------------------
|
| 32 |
+
# Start code related to/from CPython's unicodectype impl
|
| 33 |
+
#
|
| 34 |
+
# NOTE: the original source at:
|
| 35 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c # noqa: E501
|
| 36 |
+
# contains this statement:
|
| 37 |
+
#
|
| 38 |
+
# /*
|
| 39 |
+
# Unicode character type helpers.
|
| 40 |
+
#
|
| 41 |
+
# Written by Marc-Andre Lemburg (mal@lemburg.com).
|
| 42 |
+
# Modified for Python 2.0 by Fredrik Lundh (fredrik@pythonware.com)
|
| 43 |
+
#
|
| 44 |
+
# Copyright (c) Corporation for National Research Initiatives.
|
| 45 |
+
#
|
| 46 |
+
# */
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# This enum contains the values defined in CPython's Objects/unicodectype.c that
|
| 50 |
+
# provide masks for use against the various members of the typerecord
|
| 51 |
+
#
|
| 52 |
+
# See: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L13-L27 # noqa: E501
|
| 53 |
+
#
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
_Py_TAB = 0x9
|
| 57 |
+
_Py_LINEFEED = 0xa
|
| 58 |
+
_Py_CARRIAGE_RETURN = 0xd
|
| 59 |
+
_Py_SPACE = 0x20
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class _PyUnicode_TyperecordMasks(IntEnum):
|
| 63 |
+
ALPHA_MASK = 0x01
|
| 64 |
+
DECIMAL_MASK = 0x02
|
| 65 |
+
DIGIT_MASK = 0x04
|
| 66 |
+
LOWER_MASK = 0x08
|
| 67 |
+
LINEBREAK_MASK = 0x10
|
| 68 |
+
SPACE_MASK = 0x20
|
| 69 |
+
TITLE_MASK = 0x40
|
| 70 |
+
UPPER_MASK = 0x80
|
| 71 |
+
XID_START_MASK = 0x100
|
| 72 |
+
XID_CONTINUE_MASK = 0x200
|
| 73 |
+
PRINTABLE_MASK = 0x400
|
| 74 |
+
NUMERIC_MASK = 0x800
|
| 75 |
+
CASE_IGNORABLE_MASK = 0x1000
|
| 76 |
+
CASED_MASK = 0x2000
|
| 77 |
+
EXTENDED_CASE_MASK = 0x4000
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _PyUnicode_gettyperecord(a):
|
| 81 |
+
raise RuntimeError("Calling the Python definition is invalid")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@intrinsic
|
| 85 |
+
def _gettyperecord_impl(typingctx, codepoint):
|
| 86 |
+
"""
|
| 87 |
+
Provides the binding to numba_gettyperecord, returns a `typerecord`
|
| 88 |
+
namedtuple of properties from the codepoint.
|
| 89 |
+
"""
|
| 90 |
+
if not isinstance(codepoint, types.Integer):
|
| 91 |
+
raise TypingError("codepoint must be an integer")
|
| 92 |
+
|
| 93 |
+
def details(context, builder, signature, args):
|
| 94 |
+
ll_void = context.get_value_type(types.void)
|
| 95 |
+
ll_Py_UCS4 = context.get_value_type(_Py_UCS4)
|
| 96 |
+
ll_intc = context.get_value_type(types.intc)
|
| 97 |
+
ll_intc_ptr = ll_intc.as_pointer()
|
| 98 |
+
ll_uchar = context.get_value_type(types.uchar)
|
| 99 |
+
ll_uchar_ptr = ll_uchar.as_pointer()
|
| 100 |
+
ll_ushort = context.get_value_type(types.ushort)
|
| 101 |
+
ll_ushort_ptr = ll_ushort.as_pointer()
|
| 102 |
+
fnty = llvmlite.ir.FunctionType(ll_void, [
|
| 103 |
+
ll_Py_UCS4, # code
|
| 104 |
+
ll_intc_ptr, # upper
|
| 105 |
+
ll_intc_ptr, # lower
|
| 106 |
+
ll_intc_ptr, # title
|
| 107 |
+
ll_uchar_ptr, # decimal
|
| 108 |
+
ll_uchar_ptr, # digit
|
| 109 |
+
ll_ushort_ptr, # flags
|
| 110 |
+
])
|
| 111 |
+
fn = cgutils.get_or_insert_function(
|
| 112 |
+
builder.module,
|
| 113 |
+
fnty, name="numba_gettyperecord")
|
| 114 |
+
upper = cgutils.alloca_once(builder, ll_intc, name='upper')
|
| 115 |
+
lower = cgutils.alloca_once(builder, ll_intc, name='lower')
|
| 116 |
+
title = cgutils.alloca_once(builder, ll_intc, name='title')
|
| 117 |
+
decimal = cgutils.alloca_once(builder, ll_uchar, name='decimal')
|
| 118 |
+
digit = cgutils.alloca_once(builder, ll_uchar, name='digit')
|
| 119 |
+
flags = cgutils.alloca_once(builder, ll_ushort, name='flags')
|
| 120 |
+
|
| 121 |
+
byref = [ upper, lower, title, decimal, digit, flags]
|
| 122 |
+
builder.call(fn, [args[0]] + byref)
|
| 123 |
+
buf = []
|
| 124 |
+
for x in byref:
|
| 125 |
+
buf.append(builder.load(x))
|
| 126 |
+
|
| 127 |
+
res = context.make_tuple(builder, signature.return_type, tuple(buf))
|
| 128 |
+
return impl_ret_untracked(context, builder, signature.return_type, res)
|
| 129 |
+
|
| 130 |
+
tupty = types.NamedTuple([types.intc, types.intc, types.intc, types.uchar,
|
| 131 |
+
types.uchar, types.ushort], typerecord)
|
| 132 |
+
sig = tupty(_Py_UCS4)
|
| 133 |
+
return sig, details
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@overload(_PyUnicode_gettyperecord)
|
| 137 |
+
def gettyperecord_impl(a):
|
| 138 |
+
"""
|
| 139 |
+
Provides a _PyUnicode_gettyperecord binding, for convenience it will accept
|
| 140 |
+
single character strings and code points.
|
| 141 |
+
"""
|
| 142 |
+
if isinstance(a, types.UnicodeType):
|
| 143 |
+
from numba.cpython.unicode import _get_code_point
|
| 144 |
+
|
| 145 |
+
def impl(a):
|
| 146 |
+
if len(a) > 1:
|
| 147 |
+
msg = "gettyperecord takes a single unicode character"
|
| 148 |
+
raise ValueError(msg)
|
| 149 |
+
code_point = _get_code_point(a, 0)
|
| 150 |
+
data = _gettyperecord_impl(_Py_UCS4(code_point))
|
| 151 |
+
return data
|
| 152 |
+
return impl
|
| 153 |
+
if isinstance(a, types.Integer):
|
| 154 |
+
return lambda a: _gettyperecord_impl(_Py_UCS4(a))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# whilst it's possible to grab the _PyUnicode_ExtendedCase symbol as it's global
|
| 158 |
+
# it is safer to use a defined api:
|
| 159 |
+
@intrinsic
|
| 160 |
+
def _PyUnicode_ExtendedCase(typingctx, index):
|
| 161 |
+
"""
|
| 162 |
+
Accessor function for the _PyUnicode_ExtendedCase array, binds to
|
| 163 |
+
numba_get_PyUnicode_ExtendedCase which wraps the array and does the lookup
|
| 164 |
+
"""
|
| 165 |
+
if not isinstance(index, types.Integer):
|
| 166 |
+
raise TypingError("Expected an index")
|
| 167 |
+
|
| 168 |
+
def details(context, builder, signature, args):
|
| 169 |
+
ll_Py_UCS4 = context.get_value_type(_Py_UCS4)
|
| 170 |
+
ll_intc = context.get_value_type(types.intc)
|
| 171 |
+
fnty = llvmlite.ir.FunctionType(ll_Py_UCS4, [ll_intc])
|
| 172 |
+
fn = cgutils.get_or_insert_function(
|
| 173 |
+
builder.module,
|
| 174 |
+
fnty, name="numba_get_PyUnicode_ExtendedCase")
|
| 175 |
+
return builder.call(fn, [args[0]])
|
| 176 |
+
|
| 177 |
+
sig = _Py_UCS4(types.intc)
|
| 178 |
+
return sig, details
|
| 179 |
+
|
| 180 |
+
# The following functions are replications of the functions with the same name
|
| 181 |
+
# in CPython's Objects/unicodectype.c
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L64-L71 # noqa: E501
|
| 185 |
+
@register_jitable
|
| 186 |
+
def _PyUnicode_ToTitlecase(ch):
|
| 187 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 188 |
+
if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK):
|
| 189 |
+
return _PyUnicode_ExtendedCase(ctype.title & 0xFFFF)
|
| 190 |
+
return ch + ctype.title
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L76-L81 # noqa: E501
|
| 194 |
+
@register_jitable
|
| 195 |
+
def _PyUnicode_IsTitlecase(ch):
|
| 196 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 197 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.TITLE_MASK != 0
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L86-L91 # noqa: E501
|
| 201 |
+
@register_jitable
|
| 202 |
+
def _PyUnicode_IsXidStart(ch):
|
| 203 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 204 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.XID_START_MASK != 0
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L96-L101 # noqa: E501
|
| 208 |
+
@register_jitable
|
| 209 |
+
def _PyUnicode_IsXidContinue(ch):
|
| 210 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 211 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.XID_CONTINUE_MASK != 0
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
@register_jitable
|
| 215 |
+
def _PyUnicode_ToDecimalDigit(ch):
|
| 216 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 217 |
+
if ctype.flags & _PyUnicode_TyperecordMasks.DECIMAL_MASK:
|
| 218 |
+
return ctype.decimal
|
| 219 |
+
return -1
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L123-L1128 # noqa: E501
|
| 223 |
+
@register_jitable
|
| 224 |
+
def _PyUnicode_ToDigit(ch):
|
| 225 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 226 |
+
if ctype.flags & _PyUnicode_TyperecordMasks.DIGIT_MASK:
|
| 227 |
+
return ctype.digit
|
| 228 |
+
return -1
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L140-L145 # noqa: E501
|
| 232 |
+
@register_jitable
|
| 233 |
+
def _PyUnicode_IsNumeric(ch):
|
| 234 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 235 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.NUMERIC_MASK != 0
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L160-L165 # noqa: E501
|
| 239 |
+
@register_jitable
|
| 240 |
+
def _PyUnicode_IsPrintable(ch):
|
| 241 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 242 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.PRINTABLE_MASK != 0
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L170-L175 # noqa: E501
|
| 246 |
+
@register_jitable
|
| 247 |
+
def _PyUnicode_IsLowercase(ch):
|
| 248 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 249 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.LOWER_MASK != 0
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L180-L185 # noqa: E501
|
| 253 |
+
@register_jitable
|
| 254 |
+
def _PyUnicode_IsUppercase(ch):
|
| 255 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 256 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.UPPER_MASK != 0
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
@register_jitable
|
| 260 |
+
def _PyUnicode_IsLineBreak(ch):
|
| 261 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 262 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.LINEBREAK_MASK != 0
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@register_jitable
|
| 266 |
+
def _PyUnicode_ToUppercase(ch):
|
| 267 |
+
raise NotImplementedError
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
@register_jitable
|
| 271 |
+
def _PyUnicode_ToLowercase(ch):
|
| 272 |
+
raise NotImplementedError
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# From: https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Objects/unicodectype.c#L211-L225 # noqa: E501
|
| 276 |
+
@register_jitable
|
| 277 |
+
def _PyUnicode_ToLowerFull(ch, res):
|
| 278 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 279 |
+
if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK):
|
| 280 |
+
index = ctype.lower & 0xFFFF
|
| 281 |
+
n = ctype.lower >> 24
|
| 282 |
+
for i in range(n):
|
| 283 |
+
res[i] = _PyUnicode_ExtendedCase(index + i)
|
| 284 |
+
return n
|
| 285 |
+
res[0] = ch + ctype.lower
|
| 286 |
+
return 1
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# From: https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Objects/unicodectype.c#L227-L241 # noqa: E501
|
| 290 |
+
@register_jitable
|
| 291 |
+
def _PyUnicode_ToTitleFull(ch, res):
|
| 292 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 293 |
+
if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK):
|
| 294 |
+
index = ctype.title & 0xFFFF
|
| 295 |
+
n = ctype.title >> 24
|
| 296 |
+
for i in range(n):
|
| 297 |
+
res[i] = _PyUnicode_ExtendedCase(index + i)
|
| 298 |
+
return n
|
| 299 |
+
res[0] = ch + ctype.title
|
| 300 |
+
return 1
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L243-L257 # noqa: E501
|
| 304 |
+
@register_jitable
|
| 305 |
+
def _PyUnicode_ToUpperFull(ch, res):
|
| 306 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 307 |
+
if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK):
|
| 308 |
+
index = ctype.upper & 0xFFFF
|
| 309 |
+
n = ctype.upper >> 24
|
| 310 |
+
for i in range(n):
|
| 311 |
+
# Perhaps needed to use unicode._set_code_point() here
|
| 312 |
+
res[i] = _PyUnicode_ExtendedCase(index + i)
|
| 313 |
+
return n
|
| 314 |
+
res[0] = ch + ctype.upper
|
| 315 |
+
return 1
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L259-L272 # noqa: E501
|
| 319 |
+
@register_jitable
|
| 320 |
+
def _PyUnicode_ToFoldedFull(ch, res):
|
| 321 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 322 |
+
extended_case_mask = _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK
|
| 323 |
+
if ctype.flags & extended_case_mask and (ctype.lower >> 20) & 7:
|
| 324 |
+
index = (ctype.lower & 0xFFFF) + (ctype.lower >> 24)
|
| 325 |
+
n = (ctype.lower >> 20) & 7
|
| 326 |
+
for i in range(n):
|
| 327 |
+
res[i] = _PyUnicode_ExtendedCase(index + i)
|
| 328 |
+
return n
|
| 329 |
+
return _PyUnicode_ToLowerFull(ch, res)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L274-L279 # noqa: E501
|
| 333 |
+
@register_jitable
|
| 334 |
+
def _PyUnicode_IsCased(ch):
|
| 335 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 336 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.CASED_MASK != 0
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L281-L286 # noqa: E501
|
| 340 |
+
@register_jitable
|
| 341 |
+
def _PyUnicode_IsCaseIgnorable(ch):
|
| 342 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 343 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.CASE_IGNORABLE_MASK != 0
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L123-L135 # noqa: E501
|
| 347 |
+
@register_jitable
|
| 348 |
+
def _PyUnicode_IsDigit(ch):
|
| 349 |
+
if _PyUnicode_ToDigit(ch) < 0:
|
| 350 |
+
return 0
|
| 351 |
+
return 1
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L106-L118 # noqa: E501
|
| 355 |
+
@register_jitable
|
| 356 |
+
def _PyUnicode_IsDecimalDigit(ch):
|
| 357 |
+
if _PyUnicode_ToDecimalDigit(ch) < 0:
|
| 358 |
+
return 0
|
| 359 |
+
return 1
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L291-L296 # noqa: E501
|
| 363 |
+
@register_jitable
|
| 364 |
+
def _PyUnicode_IsSpace(ch):
|
| 365 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 366 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.SPACE_MASK != 0
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
@register_jitable
|
| 370 |
+
def _PyUnicode_IsAlpha(ch):
|
| 371 |
+
ctype = _PyUnicode_gettyperecord(ch)
|
| 372 |
+
return ctype.flags & _PyUnicode_TyperecordMasks.ALPHA_MASK != 0
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
# End code related to/from CPython's unicodectype impl
|
| 376 |
+
# ------------------------------------------------------------------------------
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
# ------------------------------------------------------------------------------
|
| 380 |
+
# Start code related to/from CPython's pyctype
|
| 381 |
+
|
| 382 |
+
# From the definition in CPython's Include/pyctype.h
|
| 383 |
+
# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L5-L11 # noqa: E501
|
| 384 |
+
class _PY_CTF(IntEnum):
|
| 385 |
+
LOWER = 0x01
|
| 386 |
+
UPPER = 0x02
|
| 387 |
+
ALPHA = 0x01 | 0x02
|
| 388 |
+
DIGIT = 0x04
|
| 389 |
+
ALNUM = 0x01 | 0x02 | 0x04
|
| 390 |
+
SPACE = 0x08
|
| 391 |
+
XDIGIT = 0x10
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# From the definition in CPython's Python/pyctype.c
|
| 395 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L5 # noqa: E501
|
| 396 |
+
_Py_ctype_table = np.array([
|
| 397 |
+
0, # 0x0 '\x00'
|
| 398 |
+
0, # 0x1 '\x01'
|
| 399 |
+
0, # 0x2 '\x02'
|
| 400 |
+
0, # 0x3 '\x03'
|
| 401 |
+
0, # 0x4 '\x04'
|
| 402 |
+
0, # 0x5 '\x05'
|
| 403 |
+
0, # 0x6 '\x06'
|
| 404 |
+
0, # 0x7 '\x07'
|
| 405 |
+
0, # 0x8 '\x08'
|
| 406 |
+
_PY_CTF.SPACE, # 0x9 '\t'
|
| 407 |
+
_PY_CTF.SPACE, # 0xa '\n'
|
| 408 |
+
_PY_CTF.SPACE, # 0xb '\v'
|
| 409 |
+
_PY_CTF.SPACE, # 0xc '\f'
|
| 410 |
+
_PY_CTF.SPACE, # 0xd '\r'
|
| 411 |
+
0, # 0xe '\x0e'
|
| 412 |
+
0, # 0xf '\x0f'
|
| 413 |
+
0, # 0x10 '\x10'
|
| 414 |
+
0, # 0x11 '\x11'
|
| 415 |
+
0, # 0x12 '\x12'
|
| 416 |
+
0, # 0x13 '\x13'
|
| 417 |
+
0, # 0x14 '\x14'
|
| 418 |
+
0, # 0x15 '\x15'
|
| 419 |
+
0, # 0x16 '\x16'
|
| 420 |
+
0, # 0x17 '\x17'
|
| 421 |
+
0, # 0x18 '\x18'
|
| 422 |
+
0, # 0x19 '\x19'
|
| 423 |
+
0, # 0x1a '\x1a'
|
| 424 |
+
0, # 0x1b '\x1b'
|
| 425 |
+
0, # 0x1c '\x1c'
|
| 426 |
+
0, # 0x1d '\x1d'
|
| 427 |
+
0, # 0x1e '\x1e'
|
| 428 |
+
0, # 0x1f '\x1f'
|
| 429 |
+
_PY_CTF.SPACE, # 0x20 ' '
|
| 430 |
+
0, # 0x21 '!'
|
| 431 |
+
0, # 0x22 '"'
|
| 432 |
+
0, # 0x23 '#'
|
| 433 |
+
0, # 0x24 '$'
|
| 434 |
+
0, # 0x25 '%'
|
| 435 |
+
0, # 0x26 '&'
|
| 436 |
+
0, # 0x27 "'"
|
| 437 |
+
0, # 0x28 '('
|
| 438 |
+
0, # 0x29 ')'
|
| 439 |
+
0, # 0x2a '*'
|
| 440 |
+
0, # 0x2b '+'
|
| 441 |
+
0, # 0x2c ','
|
| 442 |
+
0, # 0x2d '-'
|
| 443 |
+
0, # 0x2e '.'
|
| 444 |
+
0, # 0x2f '/'
|
| 445 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x30 '0'
|
| 446 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x31 '1'
|
| 447 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x32 '2'
|
| 448 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x33 '3'
|
| 449 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x34 '4'
|
| 450 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x35 '5'
|
| 451 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x36 '6'
|
| 452 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x37 '7'
|
| 453 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x38 '8'
|
| 454 |
+
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x39 '9'
|
| 455 |
+
0, # 0x3a ':'
|
| 456 |
+
0, # 0x3b ';'
|
| 457 |
+
0, # 0x3c '<'
|
| 458 |
+
0, # 0x3d '='
|
| 459 |
+
0, # 0x3e '>'
|
| 460 |
+
0, # 0x3f '?'
|
| 461 |
+
0, # 0x40 '@'
|
| 462 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x41 'A'
|
| 463 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x42 'B'
|
| 464 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x43 'C'
|
| 465 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x44 'D'
|
| 466 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x45 'E'
|
| 467 |
+
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x46 'F'
|
| 468 |
+
_PY_CTF.UPPER, # 0x47 'G'
|
| 469 |
+
_PY_CTF.UPPER, # 0x48 'H'
|
| 470 |
+
_PY_CTF.UPPER, # 0x49 'I'
|
| 471 |
+
_PY_CTF.UPPER, # 0x4a 'J'
|
| 472 |
+
_PY_CTF.UPPER, # 0x4b 'K'
|
| 473 |
+
_PY_CTF.UPPER, # 0x4c 'L'
|
| 474 |
+
_PY_CTF.UPPER, # 0x4d 'M'
|
| 475 |
+
_PY_CTF.UPPER, # 0x4e 'N'
|
| 476 |
+
_PY_CTF.UPPER, # 0x4f 'O'
|
| 477 |
+
_PY_CTF.UPPER, # 0x50 'P'
|
| 478 |
+
_PY_CTF.UPPER, # 0x51 'Q'
|
| 479 |
+
_PY_CTF.UPPER, # 0x52 'R'
|
| 480 |
+
_PY_CTF.UPPER, # 0x53 'S'
|
| 481 |
+
_PY_CTF.UPPER, # 0x54 'T'
|
| 482 |
+
_PY_CTF.UPPER, # 0x55 'U'
|
| 483 |
+
_PY_CTF.UPPER, # 0x56 'V'
|
| 484 |
+
_PY_CTF.UPPER, # 0x57 'W'
|
| 485 |
+
_PY_CTF.UPPER, # 0x58 'X'
|
| 486 |
+
_PY_CTF.UPPER, # 0x59 'Y'
|
| 487 |
+
_PY_CTF.UPPER, # 0x5a 'Z'
|
| 488 |
+
0, # 0x5b '['
|
| 489 |
+
0, # 0x5c '\\'
|
| 490 |
+
0, # 0x5d ']'
|
| 491 |
+
0, # 0x5e '^'
|
| 492 |
+
0, # 0x5f '_'
|
| 493 |
+
0, # 0x60 '`'
|
| 494 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x61 'a'
|
| 495 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x62 'b'
|
| 496 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x63 'c'
|
| 497 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x64 'd'
|
| 498 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x65 'e'
|
| 499 |
+
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x66 'f'
|
| 500 |
+
_PY_CTF.LOWER, # 0x67 'g'
|
| 501 |
+
_PY_CTF.LOWER, # 0x68 'h'
|
| 502 |
+
_PY_CTF.LOWER, # 0x69 'i'
|
| 503 |
+
_PY_CTF.LOWER, # 0x6a 'j'
|
| 504 |
+
_PY_CTF.LOWER, # 0x6b 'k'
|
| 505 |
+
_PY_CTF.LOWER, # 0x6c 'l'
|
| 506 |
+
_PY_CTF.LOWER, # 0x6d 'm'
|
| 507 |
+
_PY_CTF.LOWER, # 0x6e 'n'
|
| 508 |
+
_PY_CTF.LOWER, # 0x6f 'o'
|
| 509 |
+
_PY_CTF.LOWER, # 0x70 'p'
|
| 510 |
+
_PY_CTF.LOWER, # 0x71 'q'
|
| 511 |
+
_PY_CTF.LOWER, # 0x72 'r'
|
| 512 |
+
_PY_CTF.LOWER, # 0x73 's'
|
| 513 |
+
_PY_CTF.LOWER, # 0x74 't'
|
| 514 |
+
_PY_CTF.LOWER, # 0x75 'u'
|
| 515 |
+
_PY_CTF.LOWER, # 0x76 'v'
|
| 516 |
+
_PY_CTF.LOWER, # 0x77 'w'
|
| 517 |
+
_PY_CTF.LOWER, # 0x78 'x'
|
| 518 |
+
_PY_CTF.LOWER, # 0x79 'y'
|
| 519 |
+
_PY_CTF.LOWER, # 0x7a 'z'
|
| 520 |
+
0, # 0x7b '{'
|
| 521 |
+
0, # 0x7c '|'
|
| 522 |
+
0, # 0x7d '}'
|
| 523 |
+
0, # 0x7e '~'
|
| 524 |
+
0, # 0x7f '\x7f'
|
| 525 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 526 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 527 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 528 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 529 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 530 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 531 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 532 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 533 |
+
], dtype=np.intc)
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
# From the definition in CPython's Python/pyctype.c
|
| 537 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L145 # noqa: E501
|
| 538 |
+
_Py_ctype_tolower = np.array([
|
| 539 |
+
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
| 540 |
+
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
| 541 |
+
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
| 542 |
+
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
| 543 |
+
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
|
| 544 |
+
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
|
| 545 |
+
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
| 546 |
+
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
|
| 547 |
+
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
| 548 |
+
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
|
| 549 |
+
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
|
| 550 |
+
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
|
| 551 |
+
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
| 552 |
+
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
|
| 553 |
+
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
|
| 554 |
+
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
|
| 555 |
+
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
| 556 |
+
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
|
| 557 |
+
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
|
| 558 |
+
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
|
| 559 |
+
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
|
| 560 |
+
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
|
| 561 |
+
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
|
| 562 |
+
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
|
| 563 |
+
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
|
| 564 |
+
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
|
| 565 |
+
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
|
| 566 |
+
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
|
| 567 |
+
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
|
| 568 |
+
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
|
| 569 |
+
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
|
| 570 |
+
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
|
| 571 |
+
], dtype=np.uint8)
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
# From the definition in CPython's Python/pyctype.c
|
| 575 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L180
|
| 576 |
+
_Py_ctype_toupper = np.array([
|
| 577 |
+
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
| 578 |
+
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
| 579 |
+
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
| 580 |
+
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
| 581 |
+
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
|
| 582 |
+
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
|
| 583 |
+
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
| 584 |
+
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
|
| 585 |
+
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
| 586 |
+
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
|
| 587 |
+
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
|
| 588 |
+
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
|
| 589 |
+
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
| 590 |
+
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
|
| 591 |
+
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
|
| 592 |
+
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
|
| 593 |
+
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
| 594 |
+
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
|
| 595 |
+
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
|
| 596 |
+
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
|
| 597 |
+
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
|
| 598 |
+
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
|
| 599 |
+
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
|
| 600 |
+
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
|
| 601 |
+
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
|
| 602 |
+
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
|
| 603 |
+
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
|
| 604 |
+
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
|
| 605 |
+
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
|
| 606 |
+
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
|
| 607 |
+
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
|
| 608 |
+
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
|
| 609 |
+
], dtype=np.uint8)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class _PY_CTF_LB(IntEnum):
|
| 613 |
+
LINE_BREAK = 0x01
|
| 614 |
+
LINE_FEED = 0x02
|
| 615 |
+
CARRIAGE_RETURN = 0x04
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
_Py_ctype_islinebreak = np.array([
|
| 619 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 620 |
+
_PY_CTF_LB.LINE_BREAK | _PY_CTF_LB.LINE_FEED, # 0xa '\n'
|
| 621 |
+
_PY_CTF_LB.LINE_BREAK, # 0xb '\v'
|
| 622 |
+
_PY_CTF_LB.LINE_BREAK, # 0xc '\f'
|
| 623 |
+
_PY_CTF_LB.LINE_BREAK | _PY_CTF_LB.CARRIAGE_RETURN, # 0xd '\r'
|
| 624 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 625 |
+
_PY_CTF_LB.LINE_BREAK, # 0x1c '\x1c'
|
| 626 |
+
_PY_CTF_LB.LINE_BREAK, # 0x1d '\x1d'
|
| 627 |
+
_PY_CTF_LB.LINE_BREAK, # 0x1e '\x1e'
|
| 628 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 629 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 630 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 631 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 632 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 633 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 634 |
+
_PY_CTF_LB.LINE_BREAK, # 0x85 '\x85'
|
| 635 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 636 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 637 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 638 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 639 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 640 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 641 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 642 |
+
0, 0, 0,
|
| 643 |
+
], dtype=np.intc)
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
# Translation of:
|
| 647 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pymacro.h#L25 # noqa: E501
|
| 648 |
+
@register_jitable
|
| 649 |
+
def _Py_CHARMASK(ch):
|
| 650 |
+
"""
|
| 651 |
+
Equivalent to the CPython macro `Py_CHARMASK()`, masks off all but the
|
| 652 |
+
lowest 256 bits of ch.
|
| 653 |
+
"""
|
| 654 |
+
return types.uint8(ch) & types.uint8(0xff)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
# Translation of:
|
| 658 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L30 # noqa: E501
|
| 659 |
+
@register_jitable
|
| 660 |
+
def _Py_TOUPPER(ch):
|
| 661 |
+
"""
|
| 662 |
+
Equivalent to the CPython macro `Py_TOUPPER()` converts an ASCII range
|
| 663 |
+
code point to the upper equivalent
|
| 664 |
+
"""
|
| 665 |
+
return _Py_ctype_toupper[_Py_CHARMASK(ch)]
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
# Translation of:
|
| 669 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L29 # noqa: E501
|
| 670 |
+
@register_jitable
|
| 671 |
+
def _Py_TOLOWER(ch):
|
| 672 |
+
"""
|
| 673 |
+
Equivalent to the CPython macro `Py_TOLOWER()` converts an ASCII range
|
| 674 |
+
code point to the lower equivalent
|
| 675 |
+
"""
|
| 676 |
+
return _Py_ctype_tolower[_Py_CHARMASK(ch)]
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
# Translation of:
|
| 680 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L18 # noqa: E501
|
| 681 |
+
@register_jitable
|
| 682 |
+
def _Py_ISLOWER(ch):
|
| 683 |
+
"""
|
| 684 |
+
Equivalent to the CPython macro `Py_ISLOWER()`
|
| 685 |
+
"""
|
| 686 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.LOWER
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
# Translation of:
|
| 690 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L19 # noqa: E501
|
| 691 |
+
@register_jitable
|
| 692 |
+
def _Py_ISUPPER(ch):
|
| 693 |
+
"""
|
| 694 |
+
Equivalent to the CPython macro `Py_ISUPPER()`
|
| 695 |
+
"""
|
| 696 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.UPPER
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
# Translation of:
|
| 700 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L20 # noqa: E501
|
| 701 |
+
@register_jitable
|
| 702 |
+
def _Py_ISALPHA(ch):
|
| 703 |
+
"""
|
| 704 |
+
Equivalent to the CPython macro `Py_ISALPHA()`
|
| 705 |
+
"""
|
| 706 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.ALPHA
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
# Translation of:
|
| 710 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L21 # noqa: E501
|
| 711 |
+
@register_jitable
|
| 712 |
+
def _Py_ISDIGIT(ch):
|
| 713 |
+
"""
|
| 714 |
+
Equivalent to the CPython macro `Py_ISDIGIT()`
|
| 715 |
+
"""
|
| 716 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.DIGIT
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
# Translation of:
|
| 720 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L22 # noqa: E501
|
| 721 |
+
@register_jitable
|
| 722 |
+
def _Py_ISXDIGIT(ch):
|
| 723 |
+
"""
|
| 724 |
+
Equivalent to the CPython macro `Py_ISXDIGIT()`
|
| 725 |
+
"""
|
| 726 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.XDIGIT
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
# Translation of:
|
| 730 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L23 # noqa: E501
|
| 731 |
+
@register_jitable
|
| 732 |
+
def _Py_ISALNUM(ch):
|
| 733 |
+
"""
|
| 734 |
+
Equivalent to the CPython macro `Py_ISALNUM()`
|
| 735 |
+
"""
|
| 736 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.ALNUM
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
# Translation of:
|
| 740 |
+
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L24 # noqa: E501
|
| 741 |
+
@register_jitable
|
| 742 |
+
def _Py_ISSPACE(ch):
|
| 743 |
+
"""
|
| 744 |
+
Equivalent to the CPython macro `Py_ISSPACE()`
|
| 745 |
+
"""
|
| 746 |
+
return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.SPACE
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
@register_jitable
|
| 750 |
+
def _Py_ISLINEBREAK(ch):
|
| 751 |
+
"""Check if character is ASCII line break"""
|
| 752 |
+
return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.LINE_BREAK
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
@register_jitable
|
| 756 |
+
def _Py_ISLINEFEED(ch):
|
| 757 |
+
"""Check if character is line feed `\n`"""
|
| 758 |
+
return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.LINE_FEED
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
@register_jitable
|
| 762 |
+
def _Py_ISCARRIAGERETURN(ch):
|
| 763 |
+
"""Check if character is carriage return `\r`"""
|
| 764 |
+
return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.CARRIAGE_RETURN
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
# End code related to/from CPython's pyctype
|
| 768 |
+
# ------------------------------------------------------------------------------
|
lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Expose each GPU devices directly.
|
| 3 |
+
|
| 4 |
+
This module implements a API that is like the "CUDA runtime" context manager
|
| 5 |
+
for managing CUDA context stack and clean up. It relies on thread-local globals
|
| 6 |
+
to separate the context stack management of each thread. Contexts are also
|
| 7 |
+
shareable among threads. Only the main thread can destroy Contexts.
|
| 8 |
+
|
| 9 |
+
Note:
|
| 10 |
+
- This module must be imported by the main-thread.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
import functools
|
| 14 |
+
import threading
|
| 15 |
+
from contextlib import contextmanager
|
| 16 |
+
|
| 17 |
+
from .driver import driver, USE_NV_BINDING
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class _DeviceList(object):
|
| 21 |
+
def __getattr__(self, attr):
|
| 22 |
+
# First time looking at "lst" attribute.
|
| 23 |
+
if attr == "lst":
|
| 24 |
+
# Device list is not initialized.
|
| 25 |
+
# Query all CUDA devices.
|
| 26 |
+
numdev = driver.get_device_count()
|
| 27 |
+
gpus = [_DeviceContextManager(driver.get_device(devid))
|
| 28 |
+
for devid in range(numdev)]
|
| 29 |
+
# Define "lst" to avoid re-initialization
|
| 30 |
+
self.lst = gpus
|
| 31 |
+
return gpus
|
| 32 |
+
|
| 33 |
+
# Other attributes
|
| 34 |
+
return super(_DeviceList, self).__getattr__(attr)
|
| 35 |
+
|
| 36 |
+
def __getitem__(self, devnum):
|
| 37 |
+
'''
|
| 38 |
+
Returns the context manager for device *devnum*.
|
| 39 |
+
'''
|
| 40 |
+
return self.lst[devnum]
|
| 41 |
+
|
| 42 |
+
def __str__(self):
|
| 43 |
+
return ', '.join([str(d) for d in self.lst])
|
| 44 |
+
|
| 45 |
+
def __iter__(self):
|
| 46 |
+
return iter(self.lst)
|
| 47 |
+
|
| 48 |
+
def __len__(self):
|
| 49 |
+
return len(self.lst)
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def current(self):
|
| 53 |
+
"""Returns the active device or None if there's no active device
|
| 54 |
+
"""
|
| 55 |
+
with driver.get_active_context() as ac:
|
| 56 |
+
devnum = ac.devnum
|
| 57 |
+
if devnum is not None:
|
| 58 |
+
return self[devnum]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class _DeviceContextManager(object):
|
| 62 |
+
"""
|
| 63 |
+
Provides a context manager for executing in the context of the chosen
|
| 64 |
+
device. The normal use of instances of this type is from
|
| 65 |
+
``numba.cuda.gpus``. For example, to execute on device 2::
|
| 66 |
+
|
| 67 |
+
with numba.cuda.gpus[2]:
|
| 68 |
+
d_a = numba.cuda.to_device(a)
|
| 69 |
+
|
| 70 |
+
to copy the array *a* onto device 2, referred to by *d_a*.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(self, device):
|
| 74 |
+
self._device = device
|
| 75 |
+
|
| 76 |
+
def __getattr__(self, item):
|
| 77 |
+
return getattr(self._device, item)
|
| 78 |
+
|
| 79 |
+
def __enter__(self):
|
| 80 |
+
_runtime.get_or_create_context(self._device.id)
|
| 81 |
+
|
| 82 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 83 |
+
# this will verify that we are popping the right device context.
|
| 84 |
+
self._device.get_primary_context().pop()
|
| 85 |
+
|
| 86 |
+
def __str__(self):
|
| 87 |
+
return "<Managed Device {self.id}>".format(self=self)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class _Runtime(object):
|
| 91 |
+
"""Emulate the CUDA runtime context management.
|
| 92 |
+
|
| 93 |
+
It owns all Devices and Contexts.
|
| 94 |
+
Keeps at most one Context per Device
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
def __init__(self):
|
| 98 |
+
self.gpus = _DeviceList()
|
| 99 |
+
|
| 100 |
+
# For caching the attached CUDA Context
|
| 101 |
+
self._tls = threading.local()
|
| 102 |
+
|
| 103 |
+
# Remember the main thread
|
| 104 |
+
# Only the main thread can *actually* destroy
|
| 105 |
+
self._mainthread = threading.current_thread()
|
| 106 |
+
|
| 107 |
+
# Avoid mutation of runtime state in multithreaded programs
|
| 108 |
+
self._lock = threading.RLock()
|
| 109 |
+
|
| 110 |
+
@contextmanager
|
| 111 |
+
def ensure_context(self):
|
| 112 |
+
"""Ensure a CUDA context is available inside the context.
|
| 113 |
+
|
| 114 |
+
On entrance, queries the CUDA driver for an active CUDA context and
|
| 115 |
+
attaches it in TLS for subsequent calls so they do not need to query
|
| 116 |
+
the CUDA driver again. On exit, detach the CUDA context from the TLS.
|
| 117 |
+
|
| 118 |
+
This will allow us to pickup thirdparty activated CUDA context in
|
| 119 |
+
any top-level Numba CUDA API.
|
| 120 |
+
"""
|
| 121 |
+
with driver.get_active_context():
|
| 122 |
+
oldctx = self._get_attached_context()
|
| 123 |
+
newctx = self.get_or_create_context(None)
|
| 124 |
+
self._set_attached_context(newctx)
|
| 125 |
+
try:
|
| 126 |
+
yield
|
| 127 |
+
finally:
|
| 128 |
+
self._set_attached_context(oldctx)
|
| 129 |
+
|
| 130 |
+
def get_or_create_context(self, devnum):
|
| 131 |
+
"""Returns the primary context and push+create it if needed
|
| 132 |
+
for *devnum*. If *devnum* is None, use the active CUDA context (must
|
| 133 |
+
be primary) or create a new one with ``devnum=0``.
|
| 134 |
+
"""
|
| 135 |
+
if devnum is None:
|
| 136 |
+
attached_ctx = self._get_attached_context()
|
| 137 |
+
if attached_ctx is None:
|
| 138 |
+
return self._get_or_create_context_uncached(devnum)
|
| 139 |
+
else:
|
| 140 |
+
return attached_ctx
|
| 141 |
+
else:
|
| 142 |
+
if USE_NV_BINDING:
|
| 143 |
+
devnum = int(devnum)
|
| 144 |
+
return self._activate_context_for(devnum)
|
| 145 |
+
|
| 146 |
+
def _get_or_create_context_uncached(self, devnum):
|
| 147 |
+
"""See also ``get_or_create_context(devnum)``.
|
| 148 |
+
This version does not read the cache.
|
| 149 |
+
"""
|
| 150 |
+
with self._lock:
|
| 151 |
+
# Try to get the active context in the CUDA stack or
|
| 152 |
+
# activate GPU-0 with the primary context
|
| 153 |
+
with driver.get_active_context() as ac:
|
| 154 |
+
if not ac:
|
| 155 |
+
return self._activate_context_for(0)
|
| 156 |
+
else:
|
| 157 |
+
# Get primary context for the active device
|
| 158 |
+
ctx = self.gpus[ac.devnum].get_primary_context()
|
| 159 |
+
# Is active context the primary context?
|
| 160 |
+
if USE_NV_BINDING:
|
| 161 |
+
ctx_handle = int(ctx.handle)
|
| 162 |
+
ac_ctx_handle = int(ac.context_handle)
|
| 163 |
+
else:
|
| 164 |
+
ctx_handle = ctx.handle.value
|
| 165 |
+
ac_ctx_handle = ac.context_handle.value
|
| 166 |
+
if ctx_handle != ac_ctx_handle:
|
| 167 |
+
msg = ('Numba cannot operate on non-primary'
|
| 168 |
+
' CUDA context {:x}')
|
| 169 |
+
raise RuntimeError(msg.format(ac_ctx_handle))
|
| 170 |
+
# Ensure the context is ready
|
| 171 |
+
ctx.prepare_for_use()
|
| 172 |
+
return ctx
|
| 173 |
+
|
| 174 |
+
def _activate_context_for(self, devnum):
|
| 175 |
+
with self._lock:
|
| 176 |
+
gpu = self.gpus[devnum]
|
| 177 |
+
newctx = gpu.get_primary_context()
|
| 178 |
+
# Detect unexpected context switch
|
| 179 |
+
cached_ctx = self._get_attached_context()
|
| 180 |
+
if cached_ctx is not None and cached_ctx is not newctx:
|
| 181 |
+
raise RuntimeError('Cannot switch CUDA-context.')
|
| 182 |
+
newctx.push()
|
| 183 |
+
return newctx
|
| 184 |
+
|
| 185 |
+
def _get_attached_context(self):
|
| 186 |
+
return getattr(self._tls, 'attached_context', None)
|
| 187 |
+
|
| 188 |
+
def _set_attached_context(self, ctx):
|
| 189 |
+
self._tls.attached_context = ctx
|
| 190 |
+
|
| 191 |
+
def reset(self):
|
| 192 |
+
"""Clear all contexts in the thread. Destroy the context if and only
|
| 193 |
+
if we are in the main thread.
|
| 194 |
+
"""
|
| 195 |
+
# Pop all active context.
|
| 196 |
+
while driver.pop_active_context() is not None:
|
| 197 |
+
pass
|
| 198 |
+
|
| 199 |
+
# If it is the main thread
|
| 200 |
+
if threading.current_thread() == self._mainthread:
|
| 201 |
+
self._destroy_all_contexts()
|
| 202 |
+
|
| 203 |
+
def _destroy_all_contexts(self):
|
| 204 |
+
# Reset all devices
|
| 205 |
+
for gpu in self.gpus:
|
| 206 |
+
gpu.reset()
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
_runtime = _Runtime()
|
| 210 |
+
|
| 211 |
+
# ================================ PUBLIC API ================================
|
| 212 |
+
|
| 213 |
+
gpus = _runtime.gpus
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def get_context(devnum=None):
|
| 217 |
+
"""Get the current device or use a device by device number, and
|
| 218 |
+
return the CUDA context.
|
| 219 |
+
"""
|
| 220 |
+
return _runtime.get_or_create_context(devnum)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def require_context(fn):
|
| 224 |
+
"""
|
| 225 |
+
A decorator that ensures a CUDA context is available when *fn* is executed.
|
| 226 |
+
|
| 227 |
+
Note: The function *fn* cannot switch CUDA-context.
|
| 228 |
+
"""
|
| 229 |
+
@functools.wraps(fn)
|
| 230 |
+
def _require_cuda_context(*args, **kws):
|
| 231 |
+
with _runtime.ensure_context():
|
| 232 |
+
return fn(*args, **kws)
|
| 233 |
+
|
| 234 |
+
return _require_cuda_context
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def reset():
|
| 238 |
+
"""Reset the CUDA subsystem for the current thread.
|
| 239 |
+
|
| 240 |
+
In the main thread:
|
| 241 |
+
This removes all CUDA contexts. Only use this at shutdown or for
|
| 242 |
+
cleaning up between tests.
|
| 243 |
+
|
| 244 |
+
In non-main threads:
|
| 245 |
+
This clear the CUDA context stack only.
|
| 246 |
+
|
| 247 |
+
"""
|
| 248 |
+
_runtime.reset()
|
lib/python3.10/site-packages/numba/cuda/cudadrv/libs.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CUDA Toolkit libraries lookup utilities.
|
| 2 |
+
|
| 3 |
+
CUDA Toolkit libraries can be available via either:
|
| 4 |
+
|
| 5 |
+
- the `cuda-nvcc` and `cuda-nvrtc` conda packages for CUDA 12,
|
| 6 |
+
- the `cudatoolkit` conda package for CUDA 11,
|
| 7 |
+
- a user supplied location from CUDA_HOME,
|
| 8 |
+
- a system wide location,
|
| 9 |
+
- package-specific locations (e.g. the Debian NVIDIA packages),
|
| 10 |
+
- or can be discovered by the system loader.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import ctypes
|
| 16 |
+
|
| 17 |
+
from numba.misc.findlib import find_lib
|
| 18 |
+
from numba.cuda.cuda_paths import get_cuda_paths
|
| 19 |
+
from numba.cuda.cudadrv.driver import locate_driver_and_loader, load_driver
|
| 20 |
+
from numba.cuda.cudadrv.error import CudaSupportError
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if sys.platform == 'win32':
|
| 24 |
+
_dllnamepattern = '%s.dll'
|
| 25 |
+
_staticnamepattern = '%s.lib'
|
| 26 |
+
elif sys.platform == 'darwin':
|
| 27 |
+
_dllnamepattern = 'lib%s.dylib'
|
| 28 |
+
_staticnamepattern = 'lib%s.a'
|
| 29 |
+
else:
|
| 30 |
+
_dllnamepattern = 'lib%s.so'
|
| 31 |
+
_staticnamepattern = 'lib%s.a'
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_libdevice():
|
| 35 |
+
d = get_cuda_paths()
|
| 36 |
+
paths = d['libdevice'].info
|
| 37 |
+
return paths
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def open_libdevice():
|
| 41 |
+
with open(get_libdevice(), 'rb') as bcfile:
|
| 42 |
+
return bcfile.read()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_cudalib(lib, static=False):
|
| 46 |
+
"""
|
| 47 |
+
Find the path of a CUDA library based on a search of known locations. If
|
| 48 |
+
the search fails, return a generic filename for the library (e.g.
|
| 49 |
+
'libnvvm.so' for 'nvvm') so that we may attempt to load it using the system
|
| 50 |
+
loader's search mechanism.
|
| 51 |
+
"""
|
| 52 |
+
if lib == 'nvvm':
|
| 53 |
+
return get_cuda_paths()['nvvm'].info or _dllnamepattern % 'nvvm'
|
| 54 |
+
else:
|
| 55 |
+
dir_type = 'static_cudalib_dir' if static else 'cudalib_dir'
|
| 56 |
+
libdir = get_cuda_paths()[dir_type].info
|
| 57 |
+
|
| 58 |
+
candidates = find_lib(lib, libdir, static=static)
|
| 59 |
+
namepattern = _staticnamepattern if static else _dllnamepattern
|
| 60 |
+
return max(candidates) if candidates else namepattern % lib
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def open_cudalib(lib):
|
| 64 |
+
path = get_cudalib(lib)
|
| 65 |
+
return ctypes.CDLL(path)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def check_static_lib(path):
|
| 69 |
+
if not os.path.isfile(path):
|
| 70 |
+
raise FileNotFoundError(f'{path} not found')
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _get_source_variable(lib, static=False):
|
| 74 |
+
if lib == 'nvvm':
|
| 75 |
+
return get_cuda_paths()['nvvm'].by
|
| 76 |
+
elif lib == 'libdevice':
|
| 77 |
+
return get_cuda_paths()['libdevice'].by
|
| 78 |
+
else:
|
| 79 |
+
dir_type = 'static_cudalib_dir' if static else 'cudalib_dir'
|
| 80 |
+
return get_cuda_paths()[dir_type].by
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def test():
|
| 84 |
+
"""Test library lookup. Path info is printed to stdout.
|
| 85 |
+
"""
|
| 86 |
+
failed = False
|
| 87 |
+
|
| 88 |
+
# Check for the driver
|
| 89 |
+
try:
|
| 90 |
+
dlloader, candidates = locate_driver_and_loader()
|
| 91 |
+
print('Finding driver from candidates:')
|
| 92 |
+
for location in candidates:
|
| 93 |
+
print(f'\t{location}')
|
| 94 |
+
print(f'Using loader {dlloader}')
|
| 95 |
+
print('\tTrying to load driver', end='...')
|
| 96 |
+
dll, path = load_driver(dlloader, candidates)
|
| 97 |
+
print('\tok')
|
| 98 |
+
print(f'\t\tLoaded from {path}')
|
| 99 |
+
except CudaSupportError as e:
|
| 100 |
+
print(f'\tERROR: failed to open driver: {e}')
|
| 101 |
+
failed = True
|
| 102 |
+
|
| 103 |
+
# Find the absolute location of the driver on Linux. Various driver-related
|
| 104 |
+
# issues have been reported by WSL2 users, and it is almost always due to a
|
| 105 |
+
# Linux (i.e. not- WSL2) driver being installed in a WSL2 system.
|
| 106 |
+
# Providing the absolute location of the driver indicates its version
|
| 107 |
+
# number in the soname (e.g. "libcuda.so.530.30.02"), which can be used to
|
| 108 |
+
# look up whether the driver was intended for "native" Linux.
|
| 109 |
+
if sys.platform == 'linux' and not failed:
|
| 110 |
+
pid = os.getpid()
|
| 111 |
+
mapsfile = os.path.join(os.path.sep, 'proc', f'{pid}', 'maps')
|
| 112 |
+
try:
|
| 113 |
+
with open(mapsfile) as f:
|
| 114 |
+
maps = f.read()
|
| 115 |
+
# It's difficult to predict all that might go wrong reading the maps
|
| 116 |
+
# file - in case various error conditions ensue (the file is not found,
|
| 117 |
+
# not readable, etc.) we use OSError to hopefully catch any of them.
|
| 118 |
+
except OSError:
|
| 119 |
+
# It's helpful to report that this went wrong to the user, but we
|
| 120 |
+
# don't set failed to True because this doesn't have any connection
|
| 121 |
+
# to actual CUDA functionality.
|
| 122 |
+
print(f'\tERROR: Could not open {mapsfile} to determine absolute '
|
| 123 |
+
'path to libcuda.so')
|
| 124 |
+
else:
|
| 125 |
+
# In this case we could read the maps, so we can report the
|
| 126 |
+
# relevant ones to the user
|
| 127 |
+
locations = set(s for s in maps.split() if 'libcuda.so' in s)
|
| 128 |
+
print('\tMapped libcuda.so paths:')
|
| 129 |
+
for location in locations:
|
| 130 |
+
print(f'\t\t{location}')
|
| 131 |
+
|
| 132 |
+
# Checks for dynamic libraries
|
| 133 |
+
libs = 'nvvm nvrtc cudart'.split()
|
| 134 |
+
for lib in libs:
|
| 135 |
+
path = get_cudalib(lib)
|
| 136 |
+
print('Finding {} from {}'.format(lib, _get_source_variable(lib)))
|
| 137 |
+
print('\tLocated at', path)
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
print('\tTrying to open library', end='...')
|
| 141 |
+
open_cudalib(lib)
|
| 142 |
+
print('\tok')
|
| 143 |
+
except OSError as e:
|
| 144 |
+
print('\tERROR: failed to open %s:\n%s' % (lib, e))
|
| 145 |
+
failed = True
|
| 146 |
+
|
| 147 |
+
# Check for cudadevrt (the only static library)
|
| 148 |
+
lib = 'cudadevrt'
|
| 149 |
+
path = get_cudalib(lib, static=True)
|
| 150 |
+
print('Finding {} from {}'.format(lib, _get_source_variable(lib,
|
| 151 |
+
static=True)))
|
| 152 |
+
print('\tLocated at', path)
|
| 153 |
+
|
| 154 |
+
try:
|
| 155 |
+
print('\tChecking library', end='...')
|
| 156 |
+
check_static_lib(path)
|
| 157 |
+
print('\tok')
|
| 158 |
+
except FileNotFoundError as e:
|
| 159 |
+
print('\tERROR: failed to find %s:\n%s' % (lib, e))
|
| 160 |
+
failed = True
|
| 161 |
+
|
| 162 |
+
# Check for libdevice
|
| 163 |
+
where = _get_source_variable('libdevice')
|
| 164 |
+
print(f'Finding libdevice from {where}')
|
| 165 |
+
path = get_libdevice()
|
| 166 |
+
print('\tLocated at', path)
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
print('\tChecking library', end='...')
|
| 170 |
+
check_static_lib(path)
|
| 171 |
+
print('\tok')
|
| 172 |
+
except FileNotFoundError as e:
|
| 173 |
+
print('\tERROR: failed to find %s:\n%s' % (lib, e))
|
| 174 |
+
failed = True
|
| 175 |
+
|
| 176 |
+
return not failed
|
lib/python3.10/site-packages/numba/cuda/cudadrv/ndarray.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.cudadrv import devices, driver
|
| 2 |
+
from numba.core.registry import cpu_target
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def _calc_array_sizeof(ndim):
|
| 6 |
+
"""
|
| 7 |
+
Use the ABI size in the CPU target
|
| 8 |
+
"""
|
| 9 |
+
ctx = cpu_target.target_context
|
| 10 |
+
return ctx.calc_array_sizeof(ndim)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def ndarray_device_allocate_data(ary):
|
| 14 |
+
"""
|
| 15 |
+
Allocate gpu data buffer
|
| 16 |
+
"""
|
| 17 |
+
datasize = driver.host_memory_size(ary)
|
| 18 |
+
# allocate
|
| 19 |
+
gpu_data = devices.get_context().memalloc(datasize)
|
| 20 |
+
return gpu_data
|
lib/python3.10/site-packages/numba/cuda/cudadrv/nvvm.py
ADDED
|
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This is a direct translation of nvvm.h
|
| 3 |
+
"""
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
import sys
|
| 7 |
+
import warnings
|
| 8 |
+
from ctypes import (c_void_p, c_int, POINTER, c_char_p, c_size_t, byref,
|
| 9 |
+
c_char)
|
| 10 |
+
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
from llvmlite import ir
|
| 14 |
+
|
| 15 |
+
from .error import NvvmError, NvvmSupportError, NvvmWarning
|
| 16 |
+
from .libs import get_libdevice, open_libdevice, open_cudalib
|
| 17 |
+
from numba.core import cgutils, config
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
ADDRSPACE_GENERIC = 0
|
| 23 |
+
ADDRSPACE_GLOBAL = 1
|
| 24 |
+
ADDRSPACE_SHARED = 3
|
| 25 |
+
ADDRSPACE_CONSTANT = 4
|
| 26 |
+
ADDRSPACE_LOCAL = 5
|
| 27 |
+
|
| 28 |
+
# Opaque handle for compilation unit
|
| 29 |
+
nvvm_program = c_void_p
|
| 30 |
+
|
| 31 |
+
# Result code
|
| 32 |
+
nvvm_result = c_int
|
| 33 |
+
|
| 34 |
+
RESULT_CODE_NAMES = '''
|
| 35 |
+
NVVM_SUCCESS
|
| 36 |
+
NVVM_ERROR_OUT_OF_MEMORY
|
| 37 |
+
NVVM_ERROR_PROGRAM_CREATION_FAILURE
|
| 38 |
+
NVVM_ERROR_IR_VERSION_MISMATCH
|
| 39 |
+
NVVM_ERROR_INVALID_INPUT
|
| 40 |
+
NVVM_ERROR_INVALID_PROGRAM
|
| 41 |
+
NVVM_ERROR_INVALID_IR
|
| 42 |
+
NVVM_ERROR_INVALID_OPTION
|
| 43 |
+
NVVM_ERROR_NO_MODULE_IN_PROGRAM
|
| 44 |
+
NVVM_ERROR_COMPILATION
|
| 45 |
+
'''.split()
|
| 46 |
+
|
| 47 |
+
for i, k in enumerate(RESULT_CODE_NAMES):
|
| 48 |
+
setattr(sys.modules[__name__], k, i)
|
| 49 |
+
|
| 50 |
+
# Data layouts. NVVM IR 1.8 (CUDA 11.6) introduced 128-bit integer support.
|
| 51 |
+
|
| 52 |
+
_datalayout_original = ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-'
|
| 53 |
+
'i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-'
|
| 54 |
+
'v64:64:64-v128:128:128-n16:32:64')
|
| 55 |
+
_datalayout_i128 = ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-'
|
| 56 |
+
'i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-'
|
| 57 |
+
'v64:64:64-v128:128:128-n16:32:64')
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def is_available():
|
| 61 |
+
"""
|
| 62 |
+
Return if libNVVM is available
|
| 63 |
+
"""
|
| 64 |
+
try:
|
| 65 |
+
NVVM()
|
| 66 |
+
except NvvmSupportError:
|
| 67 |
+
return False
|
| 68 |
+
else:
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
_nvvm_lock = threading.Lock()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class NVVM(object):
|
| 76 |
+
'''Process-wide singleton.
|
| 77 |
+
'''
|
| 78 |
+
_PROTOTYPES = {
|
| 79 |
+
|
| 80 |
+
# nvvmResult nvvmVersion(int *major, int *minor)
|
| 81 |
+
'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)),
|
| 82 |
+
|
| 83 |
+
# nvvmResult nvvmCreateProgram(nvvmProgram *cu)
|
| 84 |
+
'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)),
|
| 85 |
+
|
| 86 |
+
# nvvmResult nvvmDestroyProgram(nvvmProgram *cu)
|
| 87 |
+
'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)),
|
| 88 |
+
|
| 89 |
+
# nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer,
|
| 90 |
+
# size_t size, const char *name)
|
| 91 |
+
'nvvmAddModuleToProgram': (
|
| 92 |
+
nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p),
|
| 93 |
+
|
| 94 |
+
# nvvmResult nvvmLazyAddModuleToProgram(nvvmProgram cu,
|
| 95 |
+
# const char* buffer,
|
| 96 |
+
# size_t size,
|
| 97 |
+
# const char *name)
|
| 98 |
+
'nvvmLazyAddModuleToProgram': (
|
| 99 |
+
nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p),
|
| 100 |
+
|
| 101 |
+
# nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions,
|
| 102 |
+
# const char **options)
|
| 103 |
+
'nvvmCompileProgram': (
|
| 104 |
+
nvvm_result, nvvm_program, c_int, POINTER(c_char_p)),
|
| 105 |
+
|
| 106 |
+
# nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu,
|
| 107 |
+
# size_t *bufferSizeRet)
|
| 108 |
+
'nvvmGetCompiledResultSize': (
|
| 109 |
+
nvvm_result, nvvm_program, POINTER(c_size_t)),
|
| 110 |
+
|
| 111 |
+
# nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer)
|
| 112 |
+
'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p),
|
| 113 |
+
|
| 114 |
+
# nvvmResult nvvmGetProgramLogSize(nvvmProgram cu,
|
| 115 |
+
# size_t *bufferSizeRet)
|
| 116 |
+
'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)),
|
| 117 |
+
|
| 118 |
+
# nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer)
|
| 119 |
+
'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p),
|
| 120 |
+
|
| 121 |
+
# nvvmResult nvvmIRVersion (int* majorIR, int* minorIR, int* majorDbg,
|
| 122 |
+
# int* minorDbg )
|
| 123 |
+
'nvvmIRVersion': (nvvm_result, POINTER(c_int), POINTER(c_int),
|
| 124 |
+
POINTER(c_int), POINTER(c_int)),
|
| 125 |
+
# nvvmResult nvvmVerifyProgram (nvvmProgram prog, int numOptions,
|
| 126 |
+
# const char** options)
|
| 127 |
+
'nvvmVerifyProgram': (nvvm_result, nvvm_program, c_int,
|
| 128 |
+
POINTER(c_char_p))
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
# Singleton reference
|
| 132 |
+
__INSTANCE = None
|
| 133 |
+
|
| 134 |
+
def __new__(cls):
|
| 135 |
+
with _nvvm_lock:
|
| 136 |
+
if cls.__INSTANCE is None:
|
| 137 |
+
cls.__INSTANCE = inst = object.__new__(cls)
|
| 138 |
+
try:
|
| 139 |
+
inst.driver = open_cudalib('nvvm')
|
| 140 |
+
except OSError as e:
|
| 141 |
+
cls.__INSTANCE = None
|
| 142 |
+
errmsg = ("libNVVM cannot be found. Do `conda install "
|
| 143 |
+
"cudatoolkit`:\n%s")
|
| 144 |
+
raise NvvmSupportError(errmsg % e)
|
| 145 |
+
|
| 146 |
+
# Find & populate functions
|
| 147 |
+
for name, proto in inst._PROTOTYPES.items():
|
| 148 |
+
func = getattr(inst.driver, name)
|
| 149 |
+
func.restype = proto[0]
|
| 150 |
+
func.argtypes = proto[1:]
|
| 151 |
+
setattr(inst, name, func)
|
| 152 |
+
|
| 153 |
+
return cls.__INSTANCE
|
| 154 |
+
|
| 155 |
+
def __init__(self):
|
| 156 |
+
ir_versions = self.get_ir_version()
|
| 157 |
+
self._majorIR = ir_versions[0]
|
| 158 |
+
self._minorIR = ir_versions[1]
|
| 159 |
+
self._majorDbg = ir_versions[2]
|
| 160 |
+
self._minorDbg = ir_versions[3]
|
| 161 |
+
self._supported_ccs = get_supported_ccs()
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
def data_layout(self):
|
| 165 |
+
if (self._majorIR, self._minorIR) < (1, 8):
|
| 166 |
+
return _datalayout_original
|
| 167 |
+
else:
|
| 168 |
+
return _datalayout_i128
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def supported_ccs(self):
|
| 172 |
+
return self._supported_ccs
|
| 173 |
+
|
| 174 |
+
def get_version(self):
|
| 175 |
+
major = c_int()
|
| 176 |
+
minor = c_int()
|
| 177 |
+
err = self.nvvmVersion(byref(major), byref(minor))
|
| 178 |
+
self.check_error(err, 'Failed to get version.')
|
| 179 |
+
return major.value, minor.value
|
| 180 |
+
|
| 181 |
+
def get_ir_version(self):
|
| 182 |
+
majorIR = c_int()
|
| 183 |
+
minorIR = c_int()
|
| 184 |
+
majorDbg = c_int()
|
| 185 |
+
minorDbg = c_int()
|
| 186 |
+
err = self.nvvmIRVersion(byref(majorIR), byref(minorIR),
|
| 187 |
+
byref(majorDbg), byref(minorDbg))
|
| 188 |
+
self.check_error(err, 'Failed to get IR version.')
|
| 189 |
+
return majorIR.value, minorIR.value, majorDbg.value, minorDbg.value
|
| 190 |
+
|
| 191 |
+
def check_error(self, error, msg, exit=False):
|
| 192 |
+
if error:
|
| 193 |
+
exc = NvvmError(msg, RESULT_CODE_NAMES[error])
|
| 194 |
+
if exit:
|
| 195 |
+
print(exc)
|
| 196 |
+
sys.exit(1)
|
| 197 |
+
else:
|
| 198 |
+
raise exc
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class CompilationUnit(object):
|
| 202 |
+
def __init__(self):
|
| 203 |
+
self.driver = NVVM()
|
| 204 |
+
self._handle = nvvm_program()
|
| 205 |
+
err = self.driver.nvvmCreateProgram(byref(self._handle))
|
| 206 |
+
self.driver.check_error(err, 'Failed to create CU')
|
| 207 |
+
|
| 208 |
+
def __del__(self):
|
| 209 |
+
driver = NVVM()
|
| 210 |
+
err = driver.nvvmDestroyProgram(byref(self._handle))
|
| 211 |
+
driver.check_error(err, 'Failed to destroy CU', exit=True)
|
| 212 |
+
|
| 213 |
+
def add_module(self, buffer):
|
| 214 |
+
"""
|
| 215 |
+
Add a module level NVVM IR to a compilation unit.
|
| 216 |
+
- The buffer should contain an NVVM module IR either in the bitcode
|
| 217 |
+
representation (LLVM3.0) or in the text representation.
|
| 218 |
+
"""
|
| 219 |
+
err = self.driver.nvvmAddModuleToProgram(self._handle, buffer,
|
| 220 |
+
len(buffer), None)
|
| 221 |
+
self.driver.check_error(err, 'Failed to add module')
|
| 222 |
+
|
| 223 |
+
def lazy_add_module(self, buffer):
|
| 224 |
+
"""
|
| 225 |
+
Lazily add an NVVM IR module to a compilation unit.
|
| 226 |
+
The buffer should contain NVVM module IR either in the bitcode
|
| 227 |
+
representation or in the text representation.
|
| 228 |
+
"""
|
| 229 |
+
err = self.driver.nvvmLazyAddModuleToProgram(self._handle, buffer,
|
| 230 |
+
len(buffer), None)
|
| 231 |
+
self.driver.check_error(err, 'Failed to add module')
|
| 232 |
+
|
| 233 |
+
def compile(self, **options):
|
| 234 |
+
"""Perform Compilation.
|
| 235 |
+
|
| 236 |
+
Compilation options are accepted as keyword arguments, with the
|
| 237 |
+
following considerations:
|
| 238 |
+
|
| 239 |
+
- Underscores (`_`) in option names are converted to dashes (`-`), to
|
| 240 |
+
match NVVM's option name format.
|
| 241 |
+
- Options that take a value will be emitted in the form
|
| 242 |
+
"-<name>=<value>".
|
| 243 |
+
- Booleans passed as option values will be converted to integers.
|
| 244 |
+
- Options which take no value (such as `-gen-lto`) should have a value
|
| 245 |
+
of `None` passed in and will be emitted in the form "-<name>".
|
| 246 |
+
|
| 247 |
+
For documentation on NVVM compilation options, see the CUDA Toolkit
|
| 248 |
+
Documentation:
|
| 249 |
+
|
| 250 |
+
https://docs.nvidia.com/cuda/libnvvm-api/index.html#_CPPv418nvvmCompileProgram11nvvmProgramiPPKc
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
def stringify_option(k, v):
|
| 254 |
+
k = k.replace('_', '-')
|
| 255 |
+
|
| 256 |
+
if v is None:
|
| 257 |
+
return f'-{k}'
|
| 258 |
+
|
| 259 |
+
if isinstance(v, bool):
|
| 260 |
+
v = int(v)
|
| 261 |
+
|
| 262 |
+
return f'-{k}={v}'
|
| 263 |
+
|
| 264 |
+
options = [stringify_option(k, v) for k, v in options.items()]
|
| 265 |
+
|
| 266 |
+
c_opts = (c_char_p * len(options))(*[c_char_p(x.encode('utf8'))
|
| 267 |
+
for x in options])
|
| 268 |
+
# verify
|
| 269 |
+
err = self.driver.nvvmVerifyProgram(self._handle, len(options), c_opts)
|
| 270 |
+
self._try_error(err, 'Failed to verify\n')
|
| 271 |
+
|
| 272 |
+
# compile
|
| 273 |
+
err = self.driver.nvvmCompileProgram(self._handle, len(options), c_opts)
|
| 274 |
+
self._try_error(err, 'Failed to compile\n')
|
| 275 |
+
|
| 276 |
+
# get result
|
| 277 |
+
reslen = c_size_t()
|
| 278 |
+
err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen))
|
| 279 |
+
|
| 280 |
+
self._try_error(err, 'Failed to get size of compiled result.')
|
| 281 |
+
|
| 282 |
+
output_buffer = (c_char * reslen.value)()
|
| 283 |
+
err = self.driver.nvvmGetCompiledResult(self._handle, output_buffer)
|
| 284 |
+
self._try_error(err, 'Failed to get compiled result.')
|
| 285 |
+
|
| 286 |
+
# get log
|
| 287 |
+
self.log = self.get_log()
|
| 288 |
+
if self.log:
|
| 289 |
+
warnings.warn(self.log, category=NvvmWarning)
|
| 290 |
+
|
| 291 |
+
return output_buffer[:]
|
| 292 |
+
|
| 293 |
+
def _try_error(self, err, msg):
|
| 294 |
+
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
|
| 295 |
+
|
| 296 |
+
def get_log(self):
|
| 297 |
+
reslen = c_size_t()
|
| 298 |
+
err = self.driver.nvvmGetProgramLogSize(self._handle, byref(reslen))
|
| 299 |
+
self.driver.check_error(err, 'Failed to get compilation log size.')
|
| 300 |
+
|
| 301 |
+
if reslen.value > 1:
|
| 302 |
+
logbuf = (c_char * reslen.value)()
|
| 303 |
+
err = self.driver.nvvmGetProgramLog(self._handle, logbuf)
|
| 304 |
+
self.driver.check_error(err, 'Failed to get compilation log.')
|
| 305 |
+
|
| 306 |
+
return logbuf.value.decode('utf8') # populate log attribute
|
| 307 |
+
|
| 308 |
+
return ''
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
COMPUTE_CAPABILITIES = (
|
| 312 |
+
(3, 5), (3, 7),
|
| 313 |
+
(5, 0), (5, 2), (5, 3),
|
| 314 |
+
(6, 0), (6, 1), (6, 2),
|
| 315 |
+
(7, 0), (7, 2), (7, 5),
|
| 316 |
+
(8, 0), (8, 6), (8, 7), (8, 9),
|
| 317 |
+
(9, 0)
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
# Maps CTK version -> (min supported cc, max supported cc) inclusive
|
| 321 |
+
CTK_SUPPORTED = {
|
| 322 |
+
(11, 2): ((3, 5), (8, 6)),
|
| 323 |
+
(11, 3): ((3, 5), (8, 6)),
|
| 324 |
+
(11, 4): ((3, 5), (8, 7)),
|
| 325 |
+
(11, 5): ((3, 5), (8, 7)),
|
| 326 |
+
(11, 6): ((3, 5), (8, 7)),
|
| 327 |
+
(11, 7): ((3, 5), (8, 7)),
|
| 328 |
+
(11, 8): ((3, 5), (9, 0)),
|
| 329 |
+
(12, 0): ((5, 0), (9, 0)),
|
| 330 |
+
(12, 1): ((5, 0), (9, 0)),
|
| 331 |
+
(12, 2): ((5, 0), (9, 0)),
|
| 332 |
+
(12, 3): ((5, 0), (9, 0)),
|
| 333 |
+
(12, 4): ((5, 0), (9, 0)),
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def ccs_supported_by_ctk(ctk_version):
|
| 338 |
+
try:
|
| 339 |
+
# For supported versions, we look up the range of supported CCs
|
| 340 |
+
min_cc, max_cc = CTK_SUPPORTED[ctk_version]
|
| 341 |
+
return tuple([cc for cc in COMPUTE_CAPABILITIES
|
| 342 |
+
if min_cc <= cc <= max_cc])
|
| 343 |
+
except KeyError:
|
| 344 |
+
# For unsupported CUDA toolkit versions, all we can do is assume all
|
| 345 |
+
# non-deprecated versions we are aware of are supported.
|
| 346 |
+
return tuple([cc for cc in COMPUTE_CAPABILITIES
|
| 347 |
+
if cc >= config.CUDA_DEFAULT_PTX_CC])
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def get_supported_ccs():
|
| 351 |
+
try:
|
| 352 |
+
from numba.cuda.cudadrv.runtime import runtime
|
| 353 |
+
cudart_version = runtime.get_version()
|
| 354 |
+
except: # noqa: E722
|
| 355 |
+
# We can't support anything if there's an error getting the runtime
|
| 356 |
+
# version (e.g. if it's not present or there's another issue)
|
| 357 |
+
_supported_cc = ()
|
| 358 |
+
return _supported_cc
|
| 359 |
+
|
| 360 |
+
# Ensure the minimum CTK version requirement is met
|
| 361 |
+
min_cudart = min(CTK_SUPPORTED)
|
| 362 |
+
if cudart_version < min_cudart:
|
| 363 |
+
_supported_cc = ()
|
| 364 |
+
ctk_ver = f"{cudart_version[0]}.{cudart_version[1]}"
|
| 365 |
+
unsupported_ver = (f"CUDA Toolkit {ctk_ver} is unsupported by Numba - "
|
| 366 |
+
f"{min_cudart[0]}.{min_cudart[1]} is the minimum "
|
| 367 |
+
"required version.")
|
| 368 |
+
warnings.warn(unsupported_ver)
|
| 369 |
+
return _supported_cc
|
| 370 |
+
|
| 371 |
+
_supported_cc = ccs_supported_by_ctk(cudart_version)
|
| 372 |
+
return _supported_cc
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def find_closest_arch(mycc):
|
| 376 |
+
"""
|
| 377 |
+
Given a compute capability, return the closest compute capability supported
|
| 378 |
+
by the CUDA toolkit.
|
| 379 |
+
|
| 380 |
+
:param mycc: Compute capability as a tuple ``(MAJOR, MINOR)``
|
| 381 |
+
:return: Closest supported CC as a tuple ``(MAJOR, MINOR)``
|
| 382 |
+
"""
|
| 383 |
+
supported_ccs = NVVM().supported_ccs
|
| 384 |
+
|
| 385 |
+
if not supported_ccs:
|
| 386 |
+
msg = "No supported GPU compute capabilities found. " \
|
| 387 |
+
"Please check your cudatoolkit version matches your CUDA version."
|
| 388 |
+
raise NvvmSupportError(msg)
|
| 389 |
+
|
| 390 |
+
for i, cc in enumerate(supported_ccs):
|
| 391 |
+
if cc == mycc:
|
| 392 |
+
# Matches
|
| 393 |
+
return cc
|
| 394 |
+
elif cc > mycc:
|
| 395 |
+
# Exceeded
|
| 396 |
+
if i == 0:
|
| 397 |
+
# CC lower than supported
|
| 398 |
+
msg = "GPU compute capability %d.%d is not supported" \
|
| 399 |
+
"(requires >=%d.%d)" % (mycc + cc)
|
| 400 |
+
raise NvvmSupportError(msg)
|
| 401 |
+
else:
|
| 402 |
+
# return the previous CC
|
| 403 |
+
return supported_ccs[i - 1]
|
| 404 |
+
|
| 405 |
+
# CC higher than supported
|
| 406 |
+
return supported_ccs[-1] # Choose the highest
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def get_arch_option(major, minor):
|
| 410 |
+
"""Matches with the closest architecture option
|
| 411 |
+
"""
|
| 412 |
+
if config.FORCE_CUDA_CC:
|
| 413 |
+
arch = config.FORCE_CUDA_CC
|
| 414 |
+
else:
|
| 415 |
+
arch = find_closest_arch((major, minor))
|
| 416 |
+
return 'compute_%d%d' % arch
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
MISSING_LIBDEVICE_FILE_MSG = '''Missing libdevice file.
|
| 420 |
+
Please ensure you have a CUDA Toolkit 11.2 or higher.
|
| 421 |
+
For CUDA 12, ``cuda-nvcc`` and ``cuda-nvrtc`` are required:
|
| 422 |
+
|
| 423 |
+
$ conda install -c conda-forge cuda-nvcc cuda-nvrtc "cuda-version>=12.0"
|
| 424 |
+
|
| 425 |
+
For CUDA 11, ``cudatoolkit`` is required:
|
| 426 |
+
|
| 427 |
+
$ conda install -c conda-forge cudatoolkit "cuda-version>=11.2,<12.0"
|
| 428 |
+
'''
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class LibDevice(object):
|
| 432 |
+
_cache_ = None
|
| 433 |
+
|
| 434 |
+
def __init__(self):
|
| 435 |
+
if self._cache_ is None:
|
| 436 |
+
if get_libdevice() is None:
|
| 437 |
+
raise RuntimeError(MISSING_LIBDEVICE_FILE_MSG)
|
| 438 |
+
self._cache_ = open_libdevice()
|
| 439 |
+
|
| 440 |
+
self.bc = self._cache_
|
| 441 |
+
|
| 442 |
+
def get(self):
|
| 443 |
+
return self.bc
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
cas_nvvm = """
|
| 447 |
+
%cas_success = cmpxchg volatile {Ti}* %iptr, {Ti} %old, {Ti} %new monotonic monotonic
|
| 448 |
+
%cas = extractvalue {{ {Ti}, i1 }} %cas_success, 0
|
| 449 |
+
""" # noqa: E501
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
# Translation of code from CUDA Programming Guide v6.5, section B.12
|
| 453 |
+
ir_numba_atomic_binary_template = """
|
| 454 |
+
define internal {T} @___numba_atomic_{T}_{FUNC}({T}* %ptr, {T} %val) alwaysinline {{
|
| 455 |
+
entry:
|
| 456 |
+
%iptr = bitcast {T}* %ptr to {Ti}*
|
| 457 |
+
%old2 = load volatile {Ti}, {Ti}* %iptr
|
| 458 |
+
br label %attempt
|
| 459 |
+
|
| 460 |
+
attempt:
|
| 461 |
+
%old = phi {Ti} [ %old2, %entry ], [ %cas, %attempt ]
|
| 462 |
+
%dold = bitcast {Ti} %old to {T}
|
| 463 |
+
%dnew = {OP} {T} %dold, %val
|
| 464 |
+
%new = bitcast {T} %dnew to {Ti}
|
| 465 |
+
{CAS}
|
| 466 |
+
%repeat = icmp ne {Ti} %cas, %old
|
| 467 |
+
br i1 %repeat, label %attempt, label %done
|
| 468 |
+
|
| 469 |
+
done:
|
| 470 |
+
%result = bitcast {Ti} %old to {T}
|
| 471 |
+
ret {T} %result
|
| 472 |
+
}}
|
| 473 |
+
""" # noqa: E501
|
| 474 |
+
|
| 475 |
+
ir_numba_atomic_inc_template = """
|
| 476 |
+
define internal {T} @___numba_atomic_{Tu}_inc({T}* %iptr, {T} %val) alwaysinline {{
|
| 477 |
+
entry:
|
| 478 |
+
%old2 = load volatile {T}, {T}* %iptr
|
| 479 |
+
br label %attempt
|
| 480 |
+
|
| 481 |
+
attempt:
|
| 482 |
+
%old = phi {T} [ %old2, %entry ], [ %cas, %attempt ]
|
| 483 |
+
%bndchk = icmp ult {T} %old, %val
|
| 484 |
+
%inc = add {T} %old, 1
|
| 485 |
+
%new = select i1 %bndchk, {T} %inc, {T} 0
|
| 486 |
+
{CAS}
|
| 487 |
+
%repeat = icmp ne {T} %cas, %old
|
| 488 |
+
br i1 %repeat, label %attempt, label %done
|
| 489 |
+
|
| 490 |
+
done:
|
| 491 |
+
ret {T} %old
|
| 492 |
+
}}
|
| 493 |
+
""" # noqa: E501
|
| 494 |
+
|
| 495 |
+
ir_numba_atomic_dec_template = """
|
| 496 |
+
define internal {T} @___numba_atomic_{Tu}_dec({T}* %iptr, {T} %val) alwaysinline {{
|
| 497 |
+
entry:
|
| 498 |
+
%old2 = load volatile {T}, {T}* %iptr
|
| 499 |
+
br label %attempt
|
| 500 |
+
|
| 501 |
+
attempt:
|
| 502 |
+
%old = phi {T} [ %old2, %entry ], [ %cas, %attempt ]
|
| 503 |
+
%dec = add {T} %old, -1
|
| 504 |
+
%bndchk = icmp ult {T} %dec, %val
|
| 505 |
+
%new = select i1 %bndchk, {T} %dec, {T} %val
|
| 506 |
+
{CAS}
|
| 507 |
+
%repeat = icmp ne {T} %cas, %old
|
| 508 |
+
br i1 %repeat, label %attempt, label %done
|
| 509 |
+
|
| 510 |
+
done:
|
| 511 |
+
ret {T} %old
|
| 512 |
+
}}
|
| 513 |
+
""" # noqa: E501
|
| 514 |
+
|
| 515 |
+
ir_numba_atomic_minmax_template = """
|
| 516 |
+
define internal {T} @___numba_atomic_{T}_{NAN}{FUNC}({T}* %ptr, {T} %val) alwaysinline {{
|
| 517 |
+
entry:
|
| 518 |
+
%ptrval = load volatile {T}, {T}* %ptr
|
| 519 |
+
; Return early when:
|
| 520 |
+
; - For nanmin / nanmax when val is a NaN
|
| 521 |
+
; - For min / max when val or ptr is a NaN
|
| 522 |
+
%early_return = fcmp uno {T} %val, %{PTR_OR_VAL}val
|
| 523 |
+
br i1 %early_return, label %done, label %lt_check
|
| 524 |
+
|
| 525 |
+
lt_check:
|
| 526 |
+
%dold = phi {T} [ %ptrval, %entry ], [ %dcas, %attempt ]
|
| 527 |
+
; Continue attempts if dold less or greater than val (depending on whether min or max)
|
| 528 |
+
; or if dold is NaN (for nanmin / nanmax)
|
| 529 |
+
%cmp = fcmp {OP} {T} %dold, %val
|
| 530 |
+
br i1 %cmp, label %attempt, label %done
|
| 531 |
+
|
| 532 |
+
attempt:
|
| 533 |
+
; Attempt to swap in the value
|
| 534 |
+
%old = bitcast {T} %dold to {Ti}
|
| 535 |
+
%iptr = bitcast {T}* %ptr to {Ti}*
|
| 536 |
+
%new = bitcast {T} %val to {Ti}
|
| 537 |
+
{CAS}
|
| 538 |
+
%dcas = bitcast {Ti} %cas to {T}
|
| 539 |
+
br label %lt_check
|
| 540 |
+
|
| 541 |
+
done:
|
| 542 |
+
ret {T} %ptrval
|
| 543 |
+
}}
|
| 544 |
+
""" # noqa: E501
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def ir_cas(Ti):
|
| 548 |
+
return cas_nvvm.format(Ti=Ti)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def ir_numba_atomic_binary(T, Ti, OP, FUNC):
|
| 552 |
+
params = dict(T=T, Ti=Ti, OP=OP, FUNC=FUNC, CAS=ir_cas(Ti))
|
| 553 |
+
return ir_numba_atomic_binary_template.format(**params)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def ir_numba_atomic_minmax(T, Ti, NAN, OP, PTR_OR_VAL, FUNC):
|
| 557 |
+
params = dict(T=T, Ti=Ti, NAN=NAN, OP=OP, PTR_OR_VAL=PTR_OR_VAL,
|
| 558 |
+
FUNC=FUNC, CAS=ir_cas(Ti))
|
| 559 |
+
|
| 560 |
+
return ir_numba_atomic_minmax_template.format(**params)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def ir_numba_atomic_inc(T, Tu):
|
| 564 |
+
return ir_numba_atomic_inc_template.format(T=T, Tu=Tu, CAS=ir_cas(T))
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
def ir_numba_atomic_dec(T, Tu):
|
| 568 |
+
return ir_numba_atomic_dec_template.format(T=T, Tu=Tu, CAS=ir_cas(T))
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def llvm_replace(llvmir):
|
| 572 |
+
replacements = [
|
| 573 |
+
('declare double @"___numba_atomic_double_add"(double* %".1", double %".2")', # noqa: E501
|
| 574 |
+
ir_numba_atomic_binary(T='double', Ti='i64', OP='fadd', FUNC='add')),
|
| 575 |
+
('declare float @"___numba_atomic_float_sub"(float* %".1", float %".2")', # noqa: E501
|
| 576 |
+
ir_numba_atomic_binary(T='float', Ti='i32', OP='fsub', FUNC='sub')),
|
| 577 |
+
('declare double @"___numba_atomic_double_sub"(double* %".1", double %".2")', # noqa: E501
|
| 578 |
+
ir_numba_atomic_binary(T='double', Ti='i64', OP='fsub', FUNC='sub')),
|
| 579 |
+
('declare i64 @"___numba_atomic_u64_inc"(i64* %".1", i64 %".2")',
|
| 580 |
+
ir_numba_atomic_inc(T='i64', Tu='u64')),
|
| 581 |
+
('declare i64 @"___numba_atomic_u64_dec"(i64* %".1", i64 %".2")',
|
| 582 |
+
ir_numba_atomic_dec(T='i64', Tu='u64')),
|
| 583 |
+
('declare float @"___numba_atomic_float_max"(float* %".1", float %".2")', # noqa: E501
|
| 584 |
+
ir_numba_atomic_minmax(T='float', Ti='i32', NAN='', OP='nnan olt',
|
| 585 |
+
PTR_OR_VAL='ptr', FUNC='max')),
|
| 586 |
+
('declare double @"___numba_atomic_double_max"(double* %".1", double %".2")', # noqa: E501
|
| 587 |
+
ir_numba_atomic_minmax(T='double', Ti='i64', NAN='', OP='nnan olt',
|
| 588 |
+
PTR_OR_VAL='ptr', FUNC='max')),
|
| 589 |
+
('declare float @"___numba_atomic_float_min"(float* %".1", float %".2")', # noqa: E501
|
| 590 |
+
ir_numba_atomic_minmax(T='float', Ti='i32', NAN='', OP='nnan ogt',
|
| 591 |
+
PTR_OR_VAL='ptr', FUNC='min')),
|
| 592 |
+
('declare double @"___numba_atomic_double_min"(double* %".1", double %".2")', # noqa: E501
|
| 593 |
+
ir_numba_atomic_minmax(T='double', Ti='i64', NAN='', OP='nnan ogt',
|
| 594 |
+
PTR_OR_VAL='ptr', FUNC='min')),
|
| 595 |
+
('declare float @"___numba_atomic_float_nanmax"(float* %".1", float %".2")', # noqa: E501
|
| 596 |
+
ir_numba_atomic_minmax(T='float', Ti='i32', NAN='nan', OP='ult',
|
| 597 |
+
PTR_OR_VAL='', FUNC='max')),
|
| 598 |
+
('declare double @"___numba_atomic_double_nanmax"(double* %".1", double %".2")', # noqa: E501
|
| 599 |
+
ir_numba_atomic_minmax(T='double', Ti='i64', NAN='nan', OP='ult',
|
| 600 |
+
PTR_OR_VAL='', FUNC='max')),
|
| 601 |
+
('declare float @"___numba_atomic_float_nanmin"(float* %".1", float %".2")', # noqa: E501
|
| 602 |
+
ir_numba_atomic_minmax(T='float', Ti='i32', NAN='nan', OP='ugt',
|
| 603 |
+
PTR_OR_VAL='', FUNC='min')),
|
| 604 |
+
('declare double @"___numba_atomic_double_nanmin"(double* %".1", double %".2")', # noqa: E501
|
| 605 |
+
ir_numba_atomic_minmax(T='double', Ti='i64', NAN='nan', OP='ugt',
|
| 606 |
+
PTR_OR_VAL='', FUNC='min')),
|
| 607 |
+
('immarg', '')
|
| 608 |
+
]
|
| 609 |
+
|
| 610 |
+
for decl, fn in replacements:
|
| 611 |
+
llvmir = llvmir.replace(decl, fn)
|
| 612 |
+
|
| 613 |
+
llvmir = llvm140_to_70_ir(llvmir)
|
| 614 |
+
|
| 615 |
+
return llvmir
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def compile_ir(llvmir, **opts):
|
| 619 |
+
if isinstance(llvmir, str):
|
| 620 |
+
llvmir = [llvmir]
|
| 621 |
+
|
| 622 |
+
if opts.pop('fastmath', False):
|
| 623 |
+
opts.update({
|
| 624 |
+
'ftz': True,
|
| 625 |
+
'fma': True,
|
| 626 |
+
'prec_div': False,
|
| 627 |
+
'prec_sqrt': False,
|
| 628 |
+
})
|
| 629 |
+
|
| 630 |
+
cu = CompilationUnit()
|
| 631 |
+
libdevice = LibDevice()
|
| 632 |
+
|
| 633 |
+
for mod in llvmir:
|
| 634 |
+
mod = llvm_replace(mod)
|
| 635 |
+
cu.add_module(mod.encode('utf8'))
|
| 636 |
+
cu.lazy_add_module(libdevice.get())
|
| 637 |
+
|
| 638 |
+
return cu.compile(**opts)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
re_attributes_def = re.compile(r"^attributes #\d+ = \{ ([\w\s]+)\ }")
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
def llvm140_to_70_ir(ir):
|
| 645 |
+
"""
|
| 646 |
+
Convert LLVM 14.0 IR for LLVM 7.0.
|
| 647 |
+
"""
|
| 648 |
+
buf = []
|
| 649 |
+
for line in ir.splitlines():
|
| 650 |
+
if line.startswith('attributes #'):
|
| 651 |
+
# Remove function attributes unsupported by LLVM 7.0
|
| 652 |
+
m = re_attributes_def.match(line)
|
| 653 |
+
attrs = m.group(1).split()
|
| 654 |
+
attrs = ' '.join(a for a in attrs if a != 'willreturn')
|
| 655 |
+
line = line.replace(m.group(1), attrs)
|
| 656 |
+
|
| 657 |
+
buf.append(line)
|
| 658 |
+
|
| 659 |
+
return '\n'.join(buf)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def set_cuda_kernel(function):
|
| 663 |
+
"""
|
| 664 |
+
Mark a function as a CUDA kernel. Kernels have the following requirements:
|
| 665 |
+
|
| 666 |
+
- Metadata that marks them as a kernel.
|
| 667 |
+
- Addition to the @llvm.used list, so that they will not be discarded.
|
| 668 |
+
- The noinline attribute is not permitted, because this causes NVVM to emit
|
| 669 |
+
a warning, which counts as failing IR verification.
|
| 670 |
+
|
| 671 |
+
Presently it is assumed that there is one kernel per module, which holds
|
| 672 |
+
for Numba-jitted functions. If this changes in future or this function is
|
| 673 |
+
to be used externally, this function may need modification to add to the
|
| 674 |
+
@llvm.used list rather than creating it.
|
| 675 |
+
"""
|
| 676 |
+
module = function.module
|
| 677 |
+
|
| 678 |
+
# Add kernel metadata
|
| 679 |
+
mdstr = ir.MetaDataString(module, "kernel")
|
| 680 |
+
mdvalue = ir.Constant(ir.IntType(32), 1)
|
| 681 |
+
md = module.add_metadata((function, mdstr, mdvalue))
|
| 682 |
+
|
| 683 |
+
nmd = cgutils.get_or_insert_named_metadata(module, 'nvvm.annotations')
|
| 684 |
+
nmd.add(md)
|
| 685 |
+
|
| 686 |
+
# Create the used list
|
| 687 |
+
ptrty = ir.IntType(8).as_pointer()
|
| 688 |
+
usedty = ir.ArrayType(ptrty, 1)
|
| 689 |
+
|
| 690 |
+
fnptr = function.bitcast(ptrty)
|
| 691 |
+
|
| 692 |
+
llvm_used = ir.GlobalVariable(module, usedty, 'llvm.used')
|
| 693 |
+
llvm_used.linkage = 'appending'
|
| 694 |
+
llvm_used.section = 'llvm.metadata'
|
| 695 |
+
llvm_used.initializer = ir.Constant(usedty, [fnptr])
|
| 696 |
+
|
| 697 |
+
# Remove 'noinline' if it is present.
|
| 698 |
+
function.attributes.discard('noinline')
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def add_ir_version(mod):
|
| 702 |
+
"""Add NVVM IR version to module"""
|
| 703 |
+
# We specify the IR version to match the current NVVM's IR version
|
| 704 |
+
i32 = ir.IntType(32)
|
| 705 |
+
ir_versions = [i32(v) for v in NVVM().get_ir_version()]
|
| 706 |
+
md_ver = mod.add_metadata(ir_versions)
|
| 707 |
+
mod.add_named_metadata('nvvmir.version', md_ver)
|
lib/python3.10/site-packages/numba/cuda/cudadrv/rtapi.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Declarations of the Runtime API functions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from ctypes import c_int, POINTER
|
| 6 |
+
|
| 7 |
+
API_PROTOTYPES = {
|
| 8 |
+
# cudaError_t cudaRuntimeGetVersion ( int* runtimeVersion )
|
| 9 |
+
'cudaRuntimeGetVersion': (c_int, POINTER(c_int)),
|
| 10 |
+
}
|
lib/python3.10/site-packages/numba/cuda/kernels/__init__.py
ADDED
|
File without changes
|
lib/python3.10/site-packages/numba/cuda/kernels/reduction.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A library written in CUDA Python for generating reduction kernels
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from numba.np.numpy_support import from_dtype
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_WARPSIZE = 32
|
| 9 |
+
_NUMWARPS = 4
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _gpu_reduce_factory(fn, nbtype):
|
| 13 |
+
from numba import cuda
|
| 14 |
+
|
| 15 |
+
reduce_op = cuda.jit(device=True)(fn)
|
| 16 |
+
inner_sm_size = _WARPSIZE + 1 # plus one to avoid SM collision
|
| 17 |
+
max_blocksize = _NUMWARPS * _WARPSIZE
|
| 18 |
+
|
| 19 |
+
@cuda.jit(device=True)
|
| 20 |
+
def inner_warp_reduction(sm_partials, init):
|
| 21 |
+
"""
|
| 22 |
+
Compute reduction within a single warp
|
| 23 |
+
"""
|
| 24 |
+
tid = cuda.threadIdx.x
|
| 25 |
+
warpid = tid // _WARPSIZE
|
| 26 |
+
laneid = tid % _WARPSIZE
|
| 27 |
+
|
| 28 |
+
sm_this = sm_partials[warpid, :]
|
| 29 |
+
sm_this[laneid] = init
|
| 30 |
+
cuda.syncwarp()
|
| 31 |
+
|
| 32 |
+
width = _WARPSIZE // 2
|
| 33 |
+
while width:
|
| 34 |
+
if laneid < width:
|
| 35 |
+
old = sm_this[laneid]
|
| 36 |
+
sm_this[laneid] = reduce_op(old, sm_this[laneid + width])
|
| 37 |
+
cuda.syncwarp()
|
| 38 |
+
width //= 2
|
| 39 |
+
|
| 40 |
+
@cuda.jit(device=True)
|
| 41 |
+
def device_reduce_full_block(arr, partials, sm_partials):
|
| 42 |
+
"""
|
| 43 |
+
Partially reduce `arr` into `partials` using `sm_partials` as working
|
| 44 |
+
space. The algorithm goes like:
|
| 45 |
+
|
| 46 |
+
array chunks of 128: | 0 | 128 | 256 | 384 | 512 |
|
| 47 |
+
block-0: | x | | | x | |
|
| 48 |
+
block-1: | | x | | | x |
|
| 49 |
+
block-2: | | | x | | |
|
| 50 |
+
|
| 51 |
+
The array is divided into chunks of 128 (size of a threadblock).
|
| 52 |
+
The threadblocks consumes the chunks in roundrobin scheduling.
|
| 53 |
+
First, a threadblock loads a chunk into temp memory. Then, all
|
| 54 |
+
subsequent chunks are combined into the temp memory.
|
| 55 |
+
|
| 56 |
+
Once all chunks are processed. Inner-block reduction is performed
|
| 57 |
+
on the temp memory. So that, there will just be one scalar result
|
| 58 |
+
per block. The result from each block is stored to `partials` at
|
| 59 |
+
the dedicated slot.
|
| 60 |
+
"""
|
| 61 |
+
tid = cuda.threadIdx.x
|
| 62 |
+
blkid = cuda.blockIdx.x
|
| 63 |
+
blksz = cuda.blockDim.x
|
| 64 |
+
gridsz = cuda.gridDim.x
|
| 65 |
+
|
| 66 |
+
# block strided loop to compute the reduction
|
| 67 |
+
start = tid + blksz * blkid
|
| 68 |
+
stop = arr.size
|
| 69 |
+
step = blksz * gridsz
|
| 70 |
+
|
| 71 |
+
# load first value
|
| 72 |
+
tmp = arr[start]
|
| 73 |
+
# loop over all values in block-stride
|
| 74 |
+
for i in range(start + step, stop, step):
|
| 75 |
+
tmp = reduce_op(tmp, arr[i])
|
| 76 |
+
|
| 77 |
+
cuda.syncthreads()
|
| 78 |
+
# inner-warp reduction
|
| 79 |
+
inner_warp_reduction(sm_partials, tmp)
|
| 80 |
+
|
| 81 |
+
cuda.syncthreads()
|
| 82 |
+
# at this point, only the first slot for each warp in tsm_partials
|
| 83 |
+
# is valid.
|
| 84 |
+
|
| 85 |
+
# finish up block reduction
|
| 86 |
+
# warning: this is assuming 4 warps.
|
| 87 |
+
# assert numwarps == 4
|
| 88 |
+
if tid < 2:
|
| 89 |
+
sm_partials[tid, 0] = reduce_op(sm_partials[tid, 0],
|
| 90 |
+
sm_partials[tid + 2, 0])
|
| 91 |
+
cuda.syncwarp()
|
| 92 |
+
if tid == 0:
|
| 93 |
+
partials[blkid] = reduce_op(sm_partials[0, 0], sm_partials[1, 0])
|
| 94 |
+
|
| 95 |
+
@cuda.jit(device=True)
|
| 96 |
+
def device_reduce_partial_block(arr, partials, sm_partials):
|
| 97 |
+
"""
|
| 98 |
+
This computes reduction on `arr`.
|
| 99 |
+
This device function must be used by 1 threadblock only.
|
| 100 |
+
The blocksize must match `arr.size` and must not be greater than 128.
|
| 101 |
+
"""
|
| 102 |
+
tid = cuda.threadIdx.x
|
| 103 |
+
blkid = cuda.blockIdx.x
|
| 104 |
+
blksz = cuda.blockDim.x
|
| 105 |
+
warpid = tid // _WARPSIZE
|
| 106 |
+
laneid = tid % _WARPSIZE
|
| 107 |
+
|
| 108 |
+
size = arr.size
|
| 109 |
+
# load first value
|
| 110 |
+
tid = cuda.threadIdx.x
|
| 111 |
+
value = arr[tid]
|
| 112 |
+
sm_partials[warpid, laneid] = value
|
| 113 |
+
|
| 114 |
+
cuda.syncthreads()
|
| 115 |
+
|
| 116 |
+
if (warpid + 1) * _WARPSIZE < size:
|
| 117 |
+
# fully populated warps
|
| 118 |
+
inner_warp_reduction(sm_partials, value)
|
| 119 |
+
else:
|
| 120 |
+
# partially populated warps
|
| 121 |
+
# NOTE: this uses a very inefficient sequential algorithm
|
| 122 |
+
if laneid == 0:
|
| 123 |
+
sm_this = sm_partials[warpid, :]
|
| 124 |
+
base = warpid * _WARPSIZE
|
| 125 |
+
for i in range(1, size - base):
|
| 126 |
+
sm_this[0] = reduce_op(sm_this[0], sm_this[i])
|
| 127 |
+
|
| 128 |
+
cuda.syncthreads()
|
| 129 |
+
# finish up
|
| 130 |
+
if tid == 0:
|
| 131 |
+
num_active_warps = (blksz + _WARPSIZE - 1) // _WARPSIZE
|
| 132 |
+
|
| 133 |
+
result = sm_partials[0, 0]
|
| 134 |
+
for i in range(1, num_active_warps):
|
| 135 |
+
result = reduce_op(result, sm_partials[i, 0])
|
| 136 |
+
|
| 137 |
+
partials[blkid] = result
|
| 138 |
+
|
| 139 |
+
def gpu_reduce_block_strided(arr, partials, init, use_init):
|
| 140 |
+
"""
|
| 141 |
+
Perform reductions on *arr* and writing out partial reduction result
|
| 142 |
+
into *partials*. The length of *partials* is determined by the
|
| 143 |
+
number of threadblocks. The initial value is set with *init*.
|
| 144 |
+
|
| 145 |
+
Launch config:
|
| 146 |
+
|
| 147 |
+
Blocksize must be multiple of warpsize and it is limited to 4 warps.
|
| 148 |
+
"""
|
| 149 |
+
tid = cuda.threadIdx.x
|
| 150 |
+
|
| 151 |
+
sm_partials = cuda.shared.array((_NUMWARPS, inner_sm_size),
|
| 152 |
+
dtype=nbtype)
|
| 153 |
+
if cuda.blockDim.x == max_blocksize:
|
| 154 |
+
device_reduce_full_block(arr, partials, sm_partials)
|
| 155 |
+
else:
|
| 156 |
+
device_reduce_partial_block(arr, partials, sm_partials)
|
| 157 |
+
# deal with the initializer
|
| 158 |
+
if use_init and tid == 0 and cuda.blockIdx.x == 0:
|
| 159 |
+
partials[0] = reduce_op(partials[0], init)
|
| 160 |
+
|
| 161 |
+
return cuda.jit(gpu_reduce_block_strided)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class Reduce(object):
|
| 165 |
+
"""Create a reduction object that reduces values using a given binary
|
| 166 |
+
function. The binary function is compiled once and cached inside this
|
| 167 |
+
object. Keeping this object alive will prevent re-compilation.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
_cache = {}
|
| 171 |
+
|
| 172 |
+
def __init__(self, functor):
|
| 173 |
+
"""
|
| 174 |
+
:param functor: A function implementing a binary operation for
|
| 175 |
+
reduction. It will be compiled as a CUDA device
|
| 176 |
+
function using ``cuda.jit(device=True)``.
|
| 177 |
+
"""
|
| 178 |
+
self._functor = functor
|
| 179 |
+
|
| 180 |
+
def _compile(self, dtype):
|
| 181 |
+
key = self._functor, dtype
|
| 182 |
+
if key in self._cache:
|
| 183 |
+
kernel = self._cache[key]
|
| 184 |
+
else:
|
| 185 |
+
kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype))
|
| 186 |
+
self._cache[key] = kernel
|
| 187 |
+
return kernel
|
| 188 |
+
|
| 189 |
+
def __call__(self, arr, size=None, res=None, init=0, stream=0):
|
| 190 |
+
"""Performs a full reduction.
|
| 191 |
+
|
| 192 |
+
:param arr: A host or device array.
|
| 193 |
+
:param size: Optional integer specifying the number of elements in
|
| 194 |
+
``arr`` to reduce. If this parameter is not specified, the
|
| 195 |
+
entire array is reduced.
|
| 196 |
+
:param res: Optional device array into which to write the reduction
|
| 197 |
+
result to. The result is written into the first element of
|
| 198 |
+
this array. If this parameter is specified, then no
|
| 199 |
+
communication of the reduction output takes place from the
|
| 200 |
+
device to the host.
|
| 201 |
+
:param init: Optional initial value for the reduction, the type of which
|
| 202 |
+
must match ``arr.dtype``.
|
| 203 |
+
:param stream: Optional CUDA stream in which to perform the reduction.
|
| 204 |
+
If no stream is specified, the default stream of 0 is
|
| 205 |
+
used.
|
| 206 |
+
:return: If ``res`` is specified, ``None`` is returned. Otherwise, the
|
| 207 |
+
result of the reduction is returned.
|
| 208 |
+
"""
|
| 209 |
+
from numba import cuda
|
| 210 |
+
|
| 211 |
+
# ensure 1d array
|
| 212 |
+
if arr.ndim != 1:
|
| 213 |
+
raise TypeError("only support 1D array")
|
| 214 |
+
|
| 215 |
+
# adjust array size
|
| 216 |
+
if size is not None:
|
| 217 |
+
arr = arr[:size]
|
| 218 |
+
|
| 219 |
+
init = arr.dtype.type(init) # ensure the right type
|
| 220 |
+
|
| 221 |
+
# return `init` if `arr` is empty
|
| 222 |
+
if arr.size < 1:
|
| 223 |
+
return init
|
| 224 |
+
|
| 225 |
+
kernel = self._compile(arr.dtype)
|
| 226 |
+
|
| 227 |
+
# Perform the reduction on the GPU
|
| 228 |
+
blocksize = _NUMWARPS * _WARPSIZE
|
| 229 |
+
size_full = (arr.size // blocksize) * blocksize
|
| 230 |
+
size_partial = arr.size - size_full
|
| 231 |
+
full_blockct = min(size_full // blocksize, _WARPSIZE * 2)
|
| 232 |
+
|
| 233 |
+
# allocate size of partials array
|
| 234 |
+
partials_size = full_blockct
|
| 235 |
+
if size_partial:
|
| 236 |
+
partials_size += 1
|
| 237 |
+
partials = cuda.device_array(shape=partials_size, dtype=arr.dtype)
|
| 238 |
+
|
| 239 |
+
if size_full:
|
| 240 |
+
# kernel for the fully populated threadblocks
|
| 241 |
+
kernel[full_blockct, blocksize, stream](arr[:size_full],
|
| 242 |
+
partials[:full_blockct],
|
| 243 |
+
init,
|
| 244 |
+
True)
|
| 245 |
+
|
| 246 |
+
if size_partial:
|
| 247 |
+
# kernel for partially populated threadblocks
|
| 248 |
+
kernel[1, size_partial, stream](arr[size_full:],
|
| 249 |
+
partials[full_blockct:],
|
| 250 |
+
init,
|
| 251 |
+
not full_blockct)
|
| 252 |
+
|
| 253 |
+
if partials.size > 1:
|
| 254 |
+
# finish up
|
| 255 |
+
kernel[1, partials_size, stream](partials, partials, init, False)
|
| 256 |
+
|
| 257 |
+
# handle return value
|
| 258 |
+
if res is not None:
|
| 259 |
+
res[:1].copy_to_device(partials[:1], stream=stream)
|
| 260 |
+
return
|
| 261 |
+
else:
|
| 262 |
+
return partials[0]
|
lib/python3.10/site-packages/numba/cuda/kernels/transpose.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba import cuda
|
| 2 |
+
from numba.cuda.cudadrv.driver import driver
|
| 3 |
+
import math
|
| 4 |
+
from numba.np import numpy_support as nps
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def transpose(a, b=None):
|
| 8 |
+
"""Compute the transpose of 'a' and store it into 'b', if given,
|
| 9 |
+
and return it. If 'b' is not given, allocate a new array
|
| 10 |
+
and return that.
|
| 11 |
+
|
| 12 |
+
This implements the algorithm documented in
|
| 13 |
+
http://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/
|
| 14 |
+
|
| 15 |
+
:param a: an `np.ndarray` or a `DeviceNDArrayBase` subclass. If already on
|
| 16 |
+
the device its stream will be used to perform the transpose (and to copy
|
| 17 |
+
`b` to the device if necessary).
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
# prefer `a`'s stream if
|
| 21 |
+
stream = getattr(a, 'stream', 0)
|
| 22 |
+
|
| 23 |
+
if not b:
|
| 24 |
+
cols, rows = a.shape
|
| 25 |
+
strides = a.dtype.itemsize * cols, a.dtype.itemsize
|
| 26 |
+
b = cuda.cudadrv.devicearray.DeviceNDArray(
|
| 27 |
+
(rows, cols),
|
| 28 |
+
strides,
|
| 29 |
+
dtype=a.dtype,
|
| 30 |
+
stream=stream)
|
| 31 |
+
|
| 32 |
+
dt = nps.from_dtype(a.dtype)
|
| 33 |
+
|
| 34 |
+
tpb = driver.get_device().MAX_THREADS_PER_BLOCK
|
| 35 |
+
# we need to factor available threads into x and y axis
|
| 36 |
+
tile_width = int(math.pow(2, math.log(tpb, 2) / 2))
|
| 37 |
+
tile_height = int(tpb / tile_width)
|
| 38 |
+
|
| 39 |
+
tile_shape = (tile_height, tile_width + 1)
|
| 40 |
+
|
| 41 |
+
@cuda.jit
|
| 42 |
+
def kernel(input, output):
|
| 43 |
+
|
| 44 |
+
tile = cuda.shared.array(shape=tile_shape, dtype=dt)
|
| 45 |
+
|
| 46 |
+
tx = cuda.threadIdx.x
|
| 47 |
+
ty = cuda.threadIdx.y
|
| 48 |
+
bx = cuda.blockIdx.x * cuda.blockDim.x
|
| 49 |
+
by = cuda.blockIdx.y * cuda.blockDim.y
|
| 50 |
+
x = by + tx
|
| 51 |
+
y = bx + ty
|
| 52 |
+
|
| 53 |
+
if by + ty < input.shape[0] and bx + tx < input.shape[1]:
|
| 54 |
+
tile[ty, tx] = input[by + ty, bx + tx]
|
| 55 |
+
cuda.syncthreads()
|
| 56 |
+
if y < output.shape[0] and x < output.shape[1]:
|
| 57 |
+
output[y, x] = tile[tx, ty]
|
| 58 |
+
|
| 59 |
+
# one block per tile, plus one for remainders
|
| 60 |
+
blocks = int(b.shape[0] / tile_height + 1), int(b.shape[1] / tile_width + 1)
|
| 61 |
+
# one thread per tile element
|
| 62 |
+
threads = tile_height, tile_width
|
| 63 |
+
kernel[blocks, threads, stream](a, b)
|
| 64 |
+
|
| 65 |
+
return b
|
lib/python3.10/site-packages/numba/cuda/simulator/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
from .api import *
|
| 4 |
+
from .vector_types import vector_types
|
| 5 |
+
from .reduction import Reduce
|
| 6 |
+
from .cudadrv.devicearray import (device_array, device_array_like, pinned,
|
| 7 |
+
pinned_array, pinned_array_like,
|
| 8 |
+
mapped_array, to_device, auto_device)
|
| 9 |
+
from .cudadrv import devicearray
|
| 10 |
+
from .cudadrv.devices import require_context, gpus
|
| 11 |
+
from .cudadrv.devices import get_context as current_context
|
| 12 |
+
from .cudadrv.runtime import runtime
|
| 13 |
+
from numba.core import config
|
| 14 |
+
reduce = Reduce
|
| 15 |
+
|
| 16 |
+
# Register simulated vector types as module level variables
|
| 17 |
+
for name, svty in vector_types.items():
|
| 18 |
+
setattr(sys.modules[__name__], name, svty)
|
| 19 |
+
for alias in svty.aliases:
|
| 20 |
+
setattr(sys.modules[__name__], alias, svty)
|
| 21 |
+
del vector_types, name, svty, alias
|
| 22 |
+
|
| 23 |
+
# Ensure that any user code attempting to import cudadrv etc. gets the
|
| 24 |
+
# simulator's version and not the real version if the simulator is enabled.
|
| 25 |
+
if config.ENABLE_CUDASIM:
|
| 26 |
+
import sys
|
| 27 |
+
from numba.cuda.simulator import cudadrv
|
| 28 |
+
sys.modules['numba.cuda.cudadrv'] = cudadrv
|
| 29 |
+
sys.modules['numba.cuda.cudadrv.devicearray'] = cudadrv.devicearray
|
| 30 |
+
sys.modules['numba.cuda.cudadrv.devices'] = cudadrv.devices
|
| 31 |
+
sys.modules['numba.cuda.cudadrv.driver'] = cudadrv.driver
|
| 32 |
+
sys.modules['numba.cuda.cudadrv.runtime'] = cudadrv.runtime
|
| 33 |
+
sys.modules['numba.cuda.cudadrv.drvapi'] = cudadrv.drvapi
|
| 34 |
+
sys.modules['numba.cuda.cudadrv.error'] = cudadrv.error
|
| 35 |
+
sys.modules['numba.cuda.cudadrv.nvvm'] = cudadrv.nvvm
|
| 36 |
+
|
| 37 |
+
from . import compiler
|
| 38 |
+
sys.modules['numba.cuda.compiler'] = compiler
|
lib/python3.10/site-packages/numba/cuda/simulator/api.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
Contains CUDA API functions
|
| 3 |
+
'''
|
| 4 |
+
|
| 5 |
+
# Imports here bring together parts of the API from other modules, so some of
|
| 6 |
+
# them appear unused.
|
| 7 |
+
from contextlib import contextmanager
|
| 8 |
+
|
| 9 |
+
from .cudadrv.devices import require_context, reset, gpus # noqa: F401
|
| 10 |
+
from .kernel import FakeCUDAKernel
|
| 11 |
+
from numba.core.sigutils import is_signature
|
| 12 |
+
from warnings import warn
|
| 13 |
+
from ..args import In, Out, InOut # noqa: F401
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def select_device(dev=0):
|
| 17 |
+
assert dev == 0, 'Only a single device supported by the simulator'
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def is_float16_supported():
|
| 21 |
+
return True
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class stream(object):
|
| 25 |
+
'''
|
| 26 |
+
The stream API is supported in the simulator - however, all execution
|
| 27 |
+
occurs synchronously, so synchronization requires no operation.
|
| 28 |
+
'''
|
| 29 |
+
@contextmanager
|
| 30 |
+
def auto_synchronize(self):
|
| 31 |
+
yield
|
| 32 |
+
|
| 33 |
+
def synchronize(self):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def synchronize():
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def close():
|
| 42 |
+
gpus.closed = True
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def declare_device(*args, **kwargs):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def detect():
|
| 50 |
+
print('Found 1 CUDA devices')
|
| 51 |
+
print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))
|
| 52 |
+
print('%40s: 5.0' % 'compute capability')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def list_devices():
|
| 56 |
+
return gpus
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Events
|
| 60 |
+
|
| 61 |
+
class Event(object):
|
| 62 |
+
'''
|
| 63 |
+
The simulator supports the event API, but they do not record timing info,
|
| 64 |
+
and all simulation is synchronous. Execution time is not recorded.
|
| 65 |
+
'''
|
| 66 |
+
def record(self, stream=0):
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
def wait(self, stream=0):
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def synchronize(self):
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
def elapsed_time(self, event):
|
| 76 |
+
warn('Simulator timings are bogus')
|
| 77 |
+
return 0.0
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
event = Event
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def jit(func_or_sig=None, device=False, debug=False, argtypes=None,
|
| 84 |
+
inline=False, restype=None, fastmath=False, link=None,
|
| 85 |
+
boundscheck=None, opt=True, cache=None
|
| 86 |
+
):
|
| 87 |
+
# Here for API compatibility
|
| 88 |
+
if boundscheck:
|
| 89 |
+
raise NotImplementedError("bounds checking is not supported for CUDA")
|
| 90 |
+
|
| 91 |
+
if link is not None:
|
| 92 |
+
raise NotImplementedError('Cannot link PTX in the simulator')
|
| 93 |
+
|
| 94 |
+
# Check for first argument specifying types - in that case the
|
| 95 |
+
# decorator is not being passed a function
|
| 96 |
+
if (func_or_sig is None or is_signature(func_or_sig)
|
| 97 |
+
or isinstance(func_or_sig, list)):
|
| 98 |
+
def jitwrapper(fn):
|
| 99 |
+
return FakeCUDAKernel(fn,
|
| 100 |
+
device=device,
|
| 101 |
+
fastmath=fastmath,
|
| 102 |
+
debug=debug)
|
| 103 |
+
return jitwrapper
|
| 104 |
+
return FakeCUDAKernel(func_or_sig, device=device, debug=debug)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@contextmanager
|
| 108 |
+
def defer_cleanup():
|
| 109 |
+
# No effect for simulator
|
| 110 |
+
yield
|
lib/python3.10/site-packages/numba/cuda/simulator/compiler.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
The compiler is not implemented in the simulator. This module provides a stub
|
| 3 |
+
to allow tests to import successfully.
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
compile = None
|
| 7 |
+
compile_for_current_device = None
|
| 8 |
+
compile_ptx = None
|
| 9 |
+
compile_ptx_for_current_device = None
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.simulator.cudadrv import (devicearray, devices, driver, drvapi,
|
| 2 |
+
error, nvvm)
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devicearray.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
The Device Array API is not implemented in the simulator. This module provides
|
| 3 |
+
stubs to allow tests to import correctly.
|
| 4 |
+
'''
|
| 5 |
+
from contextlib import contextmanager
|
| 6 |
+
from numba.np.numpy_support import numpy_version
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
DeviceRecord = None
|
| 12 |
+
from_record_like = None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
errmsg_contiguous_buffer = ("Array contains non-contiguous buffer and cannot "
|
| 16 |
+
"be transferred as a single memory region. Please "
|
| 17 |
+
"ensure contiguous buffer with numpy "
|
| 18 |
+
".ascontiguousarray()")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class FakeShape(tuple):
|
| 22 |
+
'''
|
| 23 |
+
The FakeShape class is used to provide a shape which does not allow negative
|
| 24 |
+
indexing, similar to the shape in CUDA Python. (Numpy shape arrays allow
|
| 25 |
+
negative indexing)
|
| 26 |
+
'''
|
| 27 |
+
|
| 28 |
+
def __getitem__(self, k):
|
| 29 |
+
if isinstance(k, int) and k < 0:
|
| 30 |
+
raise IndexError('tuple index out of range')
|
| 31 |
+
return super(FakeShape, self).__getitem__(k)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class FakeWithinKernelCUDAArray(object):
|
| 35 |
+
'''
|
| 36 |
+
Created to emulate the behavior of arrays within kernels, where either
|
| 37 |
+
array.item or array['item'] is valid (that is, give all structured
|
| 38 |
+
arrays `numpy.recarray`-like semantics). This behaviour does not follow
|
| 39 |
+
the semantics of Python and NumPy with non-jitted code, and will be
|
| 40 |
+
deprecated and removed.
|
| 41 |
+
'''
|
| 42 |
+
|
| 43 |
+
def __init__(self, item):
|
| 44 |
+
assert isinstance(item, FakeCUDAArray)
|
| 45 |
+
self.__dict__['_item'] = item
|
| 46 |
+
|
| 47 |
+
def __wrap_if_fake(self, item):
|
| 48 |
+
if isinstance(item, FakeCUDAArray):
|
| 49 |
+
return FakeWithinKernelCUDAArray(item)
|
| 50 |
+
else:
|
| 51 |
+
return item
|
| 52 |
+
|
| 53 |
+
def __getattr__(self, attrname):
|
| 54 |
+
try:
|
| 55 |
+
if attrname in dir(self._item._ary): # For e.g. array size.
|
| 56 |
+
return self.__wrap_if_fake(getattr(self._item._ary, attrname))
|
| 57 |
+
else:
|
| 58 |
+
return self.__wrap_if_fake(self._item.__getitem__(attrname))
|
| 59 |
+
except Exception as e:
|
| 60 |
+
if not isinstance(e, AttributeError):
|
| 61 |
+
raise AttributeError(attrname) from e
|
| 62 |
+
|
| 63 |
+
def __setattr__(self, nm, val):
|
| 64 |
+
self._item.__setitem__(nm, val)
|
| 65 |
+
|
| 66 |
+
def __getitem__(self, idx):
|
| 67 |
+
return self.__wrap_if_fake(self._item.__getitem__(idx))
|
| 68 |
+
|
| 69 |
+
def __setitem__(self, idx, val):
|
| 70 |
+
self._item.__setitem__(idx, val)
|
| 71 |
+
|
| 72 |
+
def __len__(self):
|
| 73 |
+
return len(self._item)
|
| 74 |
+
|
| 75 |
+
def __array_ufunc__(self, ufunc, method, *args, **kwargs):
|
| 76 |
+
# ufuncs can only be called directly on instances of numpy.ndarray (not
|
| 77 |
+
# things that implement its interfaces, like the FakeCUDAArray or
|
| 78 |
+
# FakeWithinKernelCUDAArray). For other objects, __array_ufunc__ is
|
| 79 |
+
# called when they are arguments to ufuncs, to provide an opportunity
|
| 80 |
+
# to somehow implement the ufunc. Since the FakeWithinKernelCUDAArray
|
| 81 |
+
# is just a thin wrapper over an ndarray, we can implement all ufuncs
|
| 82 |
+
# by passing the underlying ndarrays to a call to the intended ufunc.
|
| 83 |
+
call = getattr(ufunc, method)
|
| 84 |
+
|
| 85 |
+
def convert_fakes(obj):
|
| 86 |
+
if isinstance(obj, FakeWithinKernelCUDAArray):
|
| 87 |
+
obj = obj._item._ary
|
| 88 |
+
|
| 89 |
+
return obj
|
| 90 |
+
|
| 91 |
+
out = kwargs.get('out')
|
| 92 |
+
if out:
|
| 93 |
+
kwargs['out'] = tuple(convert_fakes(o) for o in out)
|
| 94 |
+
args = tuple(convert_fakes(a) for a in args)
|
| 95 |
+
return call(*args, **kwargs)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class FakeCUDAArray(object):
|
| 99 |
+
'''
|
| 100 |
+
Implements the interface of a DeviceArray/DeviceRecord, but mostly just
|
| 101 |
+
wraps a NumPy array.
|
| 102 |
+
'''
|
| 103 |
+
|
| 104 |
+
__cuda_ndarray__ = True # There must be gpu_data attribute
|
| 105 |
+
|
| 106 |
+
def __init__(self, ary, stream=0):
|
| 107 |
+
self._ary = ary
|
| 108 |
+
self.stream = stream
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def alloc_size(self):
|
| 112 |
+
return self._ary.nbytes
|
| 113 |
+
|
| 114 |
+
@property
|
| 115 |
+
def nbytes(self):
|
| 116 |
+
# return nbytes -- FakeCUDAArray is a wrapper around NumPy
|
| 117 |
+
return self._ary.nbytes
|
| 118 |
+
|
| 119 |
+
def __getattr__(self, attrname):
|
| 120 |
+
try:
|
| 121 |
+
attr = getattr(self._ary, attrname)
|
| 122 |
+
return attr
|
| 123 |
+
except AttributeError as e:
|
| 124 |
+
msg = "Wrapped array has no attribute '%s'" % attrname
|
| 125 |
+
raise AttributeError(msg) from e
|
| 126 |
+
|
| 127 |
+
def bind(self, stream=0):
|
| 128 |
+
return FakeCUDAArray(self._ary, stream)
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def T(self):
|
| 132 |
+
return self.transpose()
|
| 133 |
+
|
| 134 |
+
def transpose(self, axes=None):
|
| 135 |
+
return FakeCUDAArray(np.transpose(self._ary, axes=axes))
|
| 136 |
+
|
| 137 |
+
def __getitem__(self, idx):
|
| 138 |
+
ret = self._ary.__getitem__(idx)
|
| 139 |
+
if type(ret) not in [np.ndarray, np.void]:
|
| 140 |
+
return ret
|
| 141 |
+
else:
|
| 142 |
+
return FakeCUDAArray(ret, stream=self.stream)
|
| 143 |
+
|
| 144 |
+
def __setitem__(self, idx, val):
|
| 145 |
+
return self._ary.__setitem__(idx, val)
|
| 146 |
+
|
| 147 |
+
def copy_to_host(self, ary=None, stream=0):
|
| 148 |
+
if ary is None:
|
| 149 |
+
ary = np.empty_like(self._ary)
|
| 150 |
+
else:
|
| 151 |
+
check_array_compatibility(self, ary)
|
| 152 |
+
np.copyto(ary, self._ary)
|
| 153 |
+
return ary
|
| 154 |
+
|
| 155 |
+
def copy_to_device(self, ary, stream=0):
|
| 156 |
+
'''
|
| 157 |
+
Copy from the provided array into this array.
|
| 158 |
+
|
| 159 |
+
This may be less forgiving than the CUDA Python implementation, which
|
| 160 |
+
will copy data up to the length of the smallest of the two arrays,
|
| 161 |
+
whereas this expects the size of the arrays to be equal.
|
| 162 |
+
'''
|
| 163 |
+
sentry_contiguous(self)
|
| 164 |
+
self_core, ary_core = array_core(self), array_core(ary)
|
| 165 |
+
if isinstance(ary, FakeCUDAArray):
|
| 166 |
+
sentry_contiguous(ary)
|
| 167 |
+
check_array_compatibility(self_core, ary_core)
|
| 168 |
+
else:
|
| 169 |
+
ary_core = np.array(
|
| 170 |
+
ary_core,
|
| 171 |
+
order='C' if self_core.flags['C_CONTIGUOUS'] else 'F',
|
| 172 |
+
subok=True,
|
| 173 |
+
copy=False if numpy_version < (2, 0) else None)
|
| 174 |
+
check_array_compatibility(self_core, ary_core)
|
| 175 |
+
np.copyto(self_core._ary, ary_core)
|
| 176 |
+
|
| 177 |
+
@property
|
| 178 |
+
def shape(self):
|
| 179 |
+
return FakeShape(self._ary.shape)
|
| 180 |
+
|
| 181 |
+
def ravel(self, *args, **kwargs):
|
| 182 |
+
return FakeCUDAArray(self._ary.ravel(*args, **kwargs))
|
| 183 |
+
|
| 184 |
+
def reshape(self, *args, **kwargs):
|
| 185 |
+
return FakeCUDAArray(self._ary.reshape(*args, **kwargs))
|
| 186 |
+
|
| 187 |
+
def view(self, *args, **kwargs):
|
| 188 |
+
return FakeCUDAArray(self._ary.view(*args, **kwargs))
|
| 189 |
+
|
| 190 |
+
def is_c_contiguous(self):
|
| 191 |
+
return self._ary.flags.c_contiguous
|
| 192 |
+
|
| 193 |
+
def is_f_contiguous(self):
|
| 194 |
+
return self._ary.flags.f_contiguous
|
| 195 |
+
|
| 196 |
+
def __str__(self):
|
| 197 |
+
return str(self._ary)
|
| 198 |
+
|
| 199 |
+
def __repr__(self):
|
| 200 |
+
return repr(self._ary)
|
| 201 |
+
|
| 202 |
+
def __len__(self):
|
| 203 |
+
return len(self._ary)
|
| 204 |
+
|
| 205 |
+
# TODO: Add inplace, bitwise, unary magic methods
|
| 206 |
+
# (or maybe inherit this class from numpy)?
|
| 207 |
+
def __eq__(self, other):
|
| 208 |
+
return FakeCUDAArray(self._ary == other)
|
| 209 |
+
|
| 210 |
+
def __ne__(self, other):
|
| 211 |
+
return FakeCUDAArray(self._ary != other)
|
| 212 |
+
|
| 213 |
+
def __lt__(self, other):
|
| 214 |
+
return FakeCUDAArray(self._ary < other)
|
| 215 |
+
|
| 216 |
+
def __le__(self, other):
|
| 217 |
+
return FakeCUDAArray(self._ary <= other)
|
| 218 |
+
|
| 219 |
+
def __gt__(self, other):
|
| 220 |
+
return FakeCUDAArray(self._ary > other)
|
| 221 |
+
|
| 222 |
+
def __ge__(self, other):
|
| 223 |
+
return FakeCUDAArray(self._ary >= other)
|
| 224 |
+
|
| 225 |
+
def __add__(self, other):
|
| 226 |
+
return FakeCUDAArray(self._ary + other)
|
| 227 |
+
|
| 228 |
+
def __sub__(self, other):
|
| 229 |
+
return FakeCUDAArray(self._ary - other)
|
| 230 |
+
|
| 231 |
+
def __mul__(self, other):
|
| 232 |
+
return FakeCUDAArray(self._ary * other)
|
| 233 |
+
|
| 234 |
+
def __floordiv__(self, other):
|
| 235 |
+
return FakeCUDAArray(self._ary // other)
|
| 236 |
+
|
| 237 |
+
def __truediv__(self, other):
|
| 238 |
+
return FakeCUDAArray(self._ary / other)
|
| 239 |
+
|
| 240 |
+
def __mod__(self, other):
|
| 241 |
+
return FakeCUDAArray(self._ary % other)
|
| 242 |
+
|
| 243 |
+
def __pow__(self, other):
|
| 244 |
+
return FakeCUDAArray(self._ary ** other)
|
| 245 |
+
|
| 246 |
+
def split(self, section, stream=0):
|
| 247 |
+
return [
|
| 248 |
+
FakeCUDAArray(a)
|
| 249 |
+
for a in np.split(self._ary, range(section, len(self), section))
|
| 250 |
+
]
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def array_core(ary):
|
| 254 |
+
"""
|
| 255 |
+
Extract the repeated core of a broadcast array.
|
| 256 |
+
|
| 257 |
+
Broadcast arrays are by definition non-contiguous due to repeated
|
| 258 |
+
dimensions, i.e., dimensions with stride 0. In order to ascertain memory
|
| 259 |
+
contiguity and copy the underlying data from such arrays, we must create
|
| 260 |
+
a view without the repeated dimensions.
|
| 261 |
+
|
| 262 |
+
"""
|
| 263 |
+
if not ary.strides or not ary.size:
|
| 264 |
+
return ary
|
| 265 |
+
core_index = []
|
| 266 |
+
for stride in ary.strides:
|
| 267 |
+
core_index.append(0 if stride == 0 else slice(None))
|
| 268 |
+
return ary[tuple(core_index)]
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def is_contiguous(ary):
|
| 272 |
+
"""
|
| 273 |
+
Returns True iff `ary` is C-style contiguous while ignoring
|
| 274 |
+
broadcasted and 1-sized dimensions.
|
| 275 |
+
As opposed to array_core(), it does not call require_context(),
|
| 276 |
+
which can be quite expensive.
|
| 277 |
+
"""
|
| 278 |
+
size = ary.dtype.itemsize
|
| 279 |
+
for shape, stride in zip(reversed(ary.shape), reversed(ary.strides)):
|
| 280 |
+
if shape > 1 and stride != 0:
|
| 281 |
+
if size != stride:
|
| 282 |
+
return False
|
| 283 |
+
size *= shape
|
| 284 |
+
return True
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def sentry_contiguous(ary):
|
| 288 |
+
core = array_core(ary)
|
| 289 |
+
if not core.flags['C_CONTIGUOUS'] and not core.flags['F_CONTIGUOUS']:
|
| 290 |
+
raise ValueError(errmsg_contiguous_buffer)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def check_array_compatibility(ary1, ary2):
|
| 294 |
+
ary1sq, ary2sq = ary1.squeeze(), ary2.squeeze()
|
| 295 |
+
if ary1.dtype != ary2.dtype:
|
| 296 |
+
raise TypeError('incompatible dtype: %s vs. %s' %
|
| 297 |
+
(ary1.dtype, ary2.dtype))
|
| 298 |
+
if ary1sq.shape != ary2sq.shape:
|
| 299 |
+
raise ValueError('incompatible shape: %s vs. %s' %
|
| 300 |
+
(ary1.shape, ary2.shape))
|
| 301 |
+
if ary1sq.strides != ary2sq.strides:
|
| 302 |
+
raise ValueError('incompatible strides: %s vs. %s' %
|
| 303 |
+
(ary1.strides, ary2.strides))
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def to_device(ary, stream=0, copy=True, to=None):
|
| 307 |
+
ary = np.array(ary,
|
| 308 |
+
copy=False if numpy_version < (2, 0) else None,
|
| 309 |
+
subok=True)
|
| 310 |
+
sentry_contiguous(ary)
|
| 311 |
+
if to is None:
|
| 312 |
+
buffer_dtype = np.int64 if ary.dtype.char in 'Mm' else ary.dtype
|
| 313 |
+
return FakeCUDAArray(
|
| 314 |
+
np.ndarray(
|
| 315 |
+
buffer=np.copy(array_core(ary)).view(buffer_dtype),
|
| 316 |
+
dtype=ary.dtype,
|
| 317 |
+
shape=ary.shape,
|
| 318 |
+
strides=ary.strides,
|
| 319 |
+
).view(type=type(ary)),
|
| 320 |
+
)
|
| 321 |
+
else:
|
| 322 |
+
to.copy_to_device(ary, stream=stream)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
@contextmanager
|
| 326 |
+
def pinned(arg):
|
| 327 |
+
yield
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def mapped_array(*args, **kwargs):
|
| 331 |
+
for unused_arg in ('portable', 'wc'):
|
| 332 |
+
if unused_arg in kwargs:
|
| 333 |
+
kwargs.pop(unused_arg)
|
| 334 |
+
return device_array(*args, **kwargs)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def pinned_array(shape, dtype=np.float64, strides=None, order='C'):
|
| 338 |
+
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def managed_array(shape, dtype=np.float64, strides=None, order='C'):
|
| 342 |
+
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def device_array(*args, **kwargs):
|
| 346 |
+
stream = kwargs.pop('stream') if 'stream' in kwargs else 0
|
| 347 |
+
return FakeCUDAArray(np.ndarray(*args, **kwargs), stream=stream)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _contiguous_strides_like_array(ary):
|
| 351 |
+
"""
|
| 352 |
+
Given an array, compute strides for a new contiguous array of the same
|
| 353 |
+
shape.
|
| 354 |
+
"""
|
| 355 |
+
# Don't recompute strides if the default strides will be sufficient to
|
| 356 |
+
# create a contiguous array.
|
| 357 |
+
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
|
| 358 |
+
return None
|
| 359 |
+
|
| 360 |
+
# Otherwise, we need to compute new strides using an algorithm adapted from
|
| 361 |
+
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
|
| 362 |
+
# core/src/multiarray/ctors.c. We permute the strides in ascending order
|
| 363 |
+
# then compute the stride for the dimensions with the same permutation.
|
| 364 |
+
|
| 365 |
+
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
|
| 366 |
+
# [(1, -2), (0, 4), (2, 12)]
|
| 367 |
+
strideperm = [ x for x in enumerate(ary.strides) ]
|
| 368 |
+
strideperm.sort(key=lambda x: x[1])
|
| 369 |
+
|
| 370 |
+
# Compute new strides using permutation
|
| 371 |
+
strides = [0] * len(ary.strides)
|
| 372 |
+
stride = ary.dtype.itemsize
|
| 373 |
+
for i_perm, _ in strideperm:
|
| 374 |
+
strides[i_perm] = stride
|
| 375 |
+
stride *= ary.shape[i_perm]
|
| 376 |
+
return tuple(strides)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _order_like_array(ary):
|
| 380 |
+
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
|
| 381 |
+
return 'F'
|
| 382 |
+
else:
|
| 383 |
+
return 'C'
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def device_array_like(ary, stream=0):
|
| 387 |
+
strides = _contiguous_strides_like_array(ary)
|
| 388 |
+
order = _order_like_array(ary)
|
| 389 |
+
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
|
| 390 |
+
order=order)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def pinned_array_like(ary):
|
| 394 |
+
strides = _contiguous_strides_like_array(ary)
|
| 395 |
+
order = _order_like_array(ary)
|
| 396 |
+
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
|
| 397 |
+
order=order)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def auto_device(ary, stream=0, copy=True):
|
| 401 |
+
if isinstance(ary, FakeCUDAArray):
|
| 402 |
+
return ary, False
|
| 403 |
+
|
| 404 |
+
if not isinstance(ary, np.void):
|
| 405 |
+
ary = np.array(
|
| 406 |
+
ary,
|
| 407 |
+
copy=False if numpy_version < (2, 0) else None,
|
| 408 |
+
subok=True)
|
| 409 |
+
return to_device(ary, stream, copy), True
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def is_cuda_ndarray(obj):
|
| 413 |
+
"Check if an object is a CUDA ndarray"
|
| 414 |
+
return getattr(obj, '__cuda_ndarray__', False)
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def verify_cuda_ndarray_interface(obj):
|
| 418 |
+
"Verify the CUDA ndarray interface for an obj"
|
| 419 |
+
require_cuda_ndarray(obj)
|
| 420 |
+
|
| 421 |
+
def requires_attr(attr, typ):
|
| 422 |
+
if not hasattr(obj, attr):
|
| 423 |
+
raise AttributeError(attr)
|
| 424 |
+
if not isinstance(getattr(obj, attr), typ):
|
| 425 |
+
raise AttributeError('%s must be of type %s' % (attr, typ))
|
| 426 |
+
|
| 427 |
+
requires_attr('shape', tuple)
|
| 428 |
+
requires_attr('strides', tuple)
|
| 429 |
+
requires_attr('dtype', np.dtype)
|
| 430 |
+
requires_attr('size', int)
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def require_cuda_ndarray(obj):
|
| 434 |
+
"Raises ValueError is is_cuda_ndarray(obj) evaluates False"
|
| 435 |
+
if not is_cuda_ndarray(obj):
|
| 436 |
+
raise ValueError('require an cuda ndarray object')
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devices.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
|
| 4 |
+
_MemoryInfo = namedtuple("_MemoryInfo", "free,total")
|
| 5 |
+
|
| 6 |
+
_SIMULATOR_CC = (5, 2)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FakeCUDADevice:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.uuid = 'GPU-00000000-0000-0000-0000-000000000000'
|
| 12 |
+
|
| 13 |
+
@property
|
| 14 |
+
def compute_capability(self):
|
| 15 |
+
return _SIMULATOR_CC
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class FakeCUDAContext:
|
| 19 |
+
'''
|
| 20 |
+
This stub implements functionality only for simulating a single GPU
|
| 21 |
+
at the moment.
|
| 22 |
+
'''
|
| 23 |
+
def __init__(self, device_id):
|
| 24 |
+
self._device_id = device_id
|
| 25 |
+
self._device = FakeCUDADevice()
|
| 26 |
+
|
| 27 |
+
def __enter__(self):
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
def __str__(self):
|
| 34 |
+
return "<Managed Device {self.id}>".format(self=self)
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def id(self):
|
| 38 |
+
return self._device_id
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def device(self):
|
| 42 |
+
return self._device
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def compute_capability(self):
|
| 46 |
+
return _SIMULATOR_CC
|
| 47 |
+
|
| 48 |
+
def reset(self):
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
def get_memory_info(self):
|
| 52 |
+
"""
|
| 53 |
+
Cross-platform free / total host memory is hard without external
|
| 54 |
+
dependencies, e.g. `psutil` - so return infinite memory to maintain API
|
| 55 |
+
type compatibility
|
| 56 |
+
"""
|
| 57 |
+
return _MemoryInfo(float('inf'), float('inf'))
|
| 58 |
+
|
| 59 |
+
def memalloc(self, sz):
|
| 60 |
+
"""
|
| 61 |
+
Allocates memory on the simulated device
|
| 62 |
+
At present, there is no division between simulated
|
| 63 |
+
host memory and simulated device memory.
|
| 64 |
+
"""
|
| 65 |
+
return np.ndarray(sz, dtype='u1')
|
| 66 |
+
|
| 67 |
+
def memhostalloc(self, sz, mapped=False, portable=False, wc=False):
|
| 68 |
+
'''Allocates memory on the host'''
|
| 69 |
+
return self.memalloc(sz)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class FakeDeviceList:
|
| 73 |
+
'''
|
| 74 |
+
This stub implements a device list containing a single GPU. It also
|
| 75 |
+
keeps track of the GPU status, i.e. whether the context is closed or not,
|
| 76 |
+
which may have been set by the user calling reset()
|
| 77 |
+
'''
|
| 78 |
+
def __init__(self):
|
| 79 |
+
self.lst = (FakeCUDAContext(0),)
|
| 80 |
+
self.closed = False
|
| 81 |
+
|
| 82 |
+
def __getitem__(self, devnum):
|
| 83 |
+
self.closed = False
|
| 84 |
+
return self.lst[devnum]
|
| 85 |
+
|
| 86 |
+
def __str__(self):
|
| 87 |
+
return ', '.join([str(d) for d in self.lst])
|
| 88 |
+
|
| 89 |
+
def __iter__(self):
|
| 90 |
+
return iter(self.lst)
|
| 91 |
+
|
| 92 |
+
def __len__(self):
|
| 93 |
+
return len(self.lst)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def current(self):
|
| 97 |
+
if self.closed:
|
| 98 |
+
return None
|
| 99 |
+
return self.lst[0]
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
gpus = FakeDeviceList()
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def reset():
|
| 106 |
+
gpus[0].closed = True
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_context(devnum=0):
|
| 110 |
+
return FakeCUDAContext(devnum)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def require_context(func):
|
| 114 |
+
'''
|
| 115 |
+
In the simulator, a context is always "available", so this is a no-op.
|
| 116 |
+
'''
|
| 117 |
+
return func
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/driver.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
Most of the driver API is unsupported in the simulator, but some stubs are
|
| 3 |
+
provided to allow tests to import correctly.
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def device_memset(dst, val, size, stream=0):
|
| 8 |
+
dst.view('u1')[:size].fill(bytes([val])[0])
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def host_to_device(dst, src, size, stream=0):
|
| 12 |
+
dst.view('u1')[:size] = src.view('u1')[:size]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def device_to_host(dst, src, size, stream=0):
|
| 16 |
+
host_to_device(dst, src, size)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def device_memory_size(obj):
|
| 20 |
+
return obj.itemsize * obj.size
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def device_to_device(dst, src, size, stream=0):
|
| 24 |
+
host_to_device(dst, src, size)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class FakeDriver(object):
|
| 28 |
+
def get_device_count(self):
|
| 29 |
+
return 1
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
driver = FakeDriver()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Linker:
|
| 36 |
+
@classmethod
|
| 37 |
+
def new(cls, max_registers=0, lineinfo=False, cc=None):
|
| 38 |
+
return Linker()
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def lto(self):
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class LinkerError(RuntimeError):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class NvrtcError(RuntimeError):
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class CudaAPIError(RuntimeError):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def launch_kernel(*args, **kwargs):
|
| 58 |
+
msg = 'Launching kernels directly is not supported in the simulator'
|
| 59 |
+
raise RuntimeError(msg)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
USE_NV_BINDING = False
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/drvapi.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
drvapi is not implemented in the simulator, but this module exists to allow
|
| 3 |
+
tests to import correctly.
|
| 4 |
+
'''
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/dummyarray.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dummy arrays are not implemented in the simulator. This file allows the dummy
|
| 2 |
+
# array tests to be imported, but they are skipped on the simulator.
|
| 3 |
+
|
| 4 |
+
Array = None
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/error.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class CudaSupportError(RuntimeError):
|
| 2 |
+
pass
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class NvrtcError(Exception):
|
| 6 |
+
pass
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/libs.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def check_static_lib(lib):
|
| 2 |
+
raise FileNotFoundError('Linking libraries not supported by cudasim')
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/nvvm.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
NVVM is not supported in the simulator, but stubs are provided to allow tests
|
| 3 |
+
to import correctly.
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class NvvmSupportError(ImportError):
|
| 8 |
+
pass
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class NVVM(object):
|
| 12 |
+
def __init__(self):
|
| 13 |
+
raise NvvmSupportError('NVVM not supported in the simulator')
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
CompilationUnit = None
|
| 17 |
+
compile_ir = None
|
| 18 |
+
set_cuda_kernel = None
|
| 19 |
+
get_arch_option = None
|
| 20 |
+
LibDevice = None
|
| 21 |
+
NvvmError = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def is_available():
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_supported_ccs():
|
| 29 |
+
return ()
|
lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/runtime.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
The runtime API is unsupported in the simulator, but some stubs are
|
| 3 |
+
provided to allow tests to import correctly.
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class FakeRuntime(object):
|
| 8 |
+
def get_version(self):
|
| 9 |
+
return (-1, -1)
|
| 10 |
+
|
| 11 |
+
def is_supported_version(self):
|
| 12 |
+
return True
|
| 13 |
+
|
| 14 |
+
@property
|
| 15 |
+
def supported_versions(self):
|
| 16 |
+
return (-1, -1),
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
runtime = FakeRuntime()
|
lib/python3.10/site-packages/numba/cuda/simulator/kernel.py
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
import functools
|
| 3 |
+
import sys
|
| 4 |
+
import threading
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from .cudadrv.devicearray import FakeCUDAArray, FakeWithinKernelCUDAArray
|
| 9 |
+
from .kernelapi import Dim3, FakeCUDAModule, swapped_cuda_module
|
| 10 |
+
from ..errors import normalize_kernel_dimensions
|
| 11 |
+
from ..args import wrap_arg, ArgHint
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
Global variable to keep track of the current "kernel context", i.e the
|
| 16 |
+
FakeCUDAModule. We only support one kernel launch at a time.
|
| 17 |
+
No support for concurrent kernel launch.
|
| 18 |
+
"""
|
| 19 |
+
_kernel_context = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@contextmanager
|
| 23 |
+
def _push_kernel_context(mod):
|
| 24 |
+
"""
|
| 25 |
+
Push the current kernel context.
|
| 26 |
+
"""
|
| 27 |
+
global _kernel_context
|
| 28 |
+
assert _kernel_context is None, "concurrent simulated kernel not supported"
|
| 29 |
+
_kernel_context = mod
|
| 30 |
+
try:
|
| 31 |
+
yield
|
| 32 |
+
finally:
|
| 33 |
+
_kernel_context = None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _get_kernel_context():
|
| 37 |
+
"""
|
| 38 |
+
Get the current kernel context. This is usually done by a device function.
|
| 39 |
+
"""
|
| 40 |
+
return _kernel_context
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class FakeOverload:
|
| 44 |
+
'''
|
| 45 |
+
Used only to provide the max_cooperative_grid_blocks method
|
| 46 |
+
'''
|
| 47 |
+
def max_cooperative_grid_blocks(self, blockdim):
|
| 48 |
+
# We can only run one block in a cooperative grid because we have no
|
| 49 |
+
# mechanism for synchronization between different blocks
|
| 50 |
+
return 1
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class FakeOverloadDict(dict):
|
| 54 |
+
def __getitem__(self, key):
|
| 55 |
+
# Always return a fake overload for any signature, as we don't keep
|
| 56 |
+
# track of overloads in the simulator.
|
| 57 |
+
return FakeOverload()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class FakeCUDAKernel(object):
|
| 61 |
+
'''
|
| 62 |
+
Wraps a @cuda.jit-ed function.
|
| 63 |
+
'''
|
| 64 |
+
|
| 65 |
+
def __init__(self, fn, device, fastmath=False, extensions=[], debug=False):
|
| 66 |
+
self.fn = fn
|
| 67 |
+
self._device = device
|
| 68 |
+
self._fastmath = fastmath
|
| 69 |
+
self._debug = debug
|
| 70 |
+
self.extensions = list(extensions) # defensive copy
|
| 71 |
+
# Initial configuration: grid unconfigured, stream 0, no dynamic shared
|
| 72 |
+
# memory.
|
| 73 |
+
self.grid_dim = None
|
| 74 |
+
self.block_dim = None
|
| 75 |
+
self.stream = 0
|
| 76 |
+
self.dynshared_size = 0
|
| 77 |
+
functools.update_wrapper(self, fn)
|
| 78 |
+
|
| 79 |
+
def __call__(self, *args):
|
| 80 |
+
if self._device:
|
| 81 |
+
with swapped_cuda_module(self.fn, _get_kernel_context()):
|
| 82 |
+
return self.fn(*args)
|
| 83 |
+
|
| 84 |
+
# Ensure we've been given a valid grid configuration
|
| 85 |
+
grid_dim, block_dim = normalize_kernel_dimensions(self.grid_dim,
|
| 86 |
+
self.block_dim)
|
| 87 |
+
|
| 88 |
+
fake_cuda_module = FakeCUDAModule(grid_dim, block_dim,
|
| 89 |
+
self.dynshared_size)
|
| 90 |
+
with _push_kernel_context(fake_cuda_module):
|
| 91 |
+
# fake_args substitutes all numpy arrays for FakeCUDAArrays
|
| 92 |
+
# because they implement some semantics differently
|
| 93 |
+
retr = []
|
| 94 |
+
|
| 95 |
+
def fake_arg(arg):
|
| 96 |
+
# map the arguments using any extension you've registered
|
| 97 |
+
_, arg = functools.reduce(
|
| 98 |
+
lambda ty_val, extension: extension.prepare_args(
|
| 99 |
+
*ty_val,
|
| 100 |
+
stream=0,
|
| 101 |
+
retr=retr),
|
| 102 |
+
self.extensions,
|
| 103 |
+
(None, arg)
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
if isinstance(arg, np.ndarray) and arg.ndim > 0:
|
| 107 |
+
ret = wrap_arg(arg).to_device(retr)
|
| 108 |
+
elif isinstance(arg, ArgHint):
|
| 109 |
+
ret = arg.to_device(retr)
|
| 110 |
+
elif isinstance(arg, np.void):
|
| 111 |
+
ret = FakeCUDAArray(arg) # In case a np record comes in.
|
| 112 |
+
else:
|
| 113 |
+
ret = arg
|
| 114 |
+
if isinstance(ret, FakeCUDAArray):
|
| 115 |
+
return FakeWithinKernelCUDAArray(ret)
|
| 116 |
+
return ret
|
| 117 |
+
|
| 118 |
+
fake_args = [fake_arg(arg) for arg in args]
|
| 119 |
+
with swapped_cuda_module(self.fn, fake_cuda_module):
|
| 120 |
+
# Execute one block at a time
|
| 121 |
+
for grid_point in np.ndindex(*grid_dim):
|
| 122 |
+
bm = BlockManager(self.fn, grid_dim, block_dim, self._debug)
|
| 123 |
+
bm.run(grid_point, *fake_args)
|
| 124 |
+
|
| 125 |
+
for wb in retr:
|
| 126 |
+
wb()
|
| 127 |
+
|
| 128 |
+
def __getitem__(self, configuration):
|
| 129 |
+
self.grid_dim, self.block_dim = \
|
| 130 |
+
normalize_kernel_dimensions(*configuration[:2])
|
| 131 |
+
|
| 132 |
+
if len(configuration) == 4:
|
| 133 |
+
self.dynshared_size = configuration[3]
|
| 134 |
+
|
| 135 |
+
return self
|
| 136 |
+
|
| 137 |
+
def bind(self):
|
| 138 |
+
pass
|
| 139 |
+
|
| 140 |
+
def specialize(self, *args):
|
| 141 |
+
return self
|
| 142 |
+
|
| 143 |
+
def forall(self, ntasks, tpb=0, stream=0, sharedmem=0):
|
| 144 |
+
if ntasks < 0:
|
| 145 |
+
raise ValueError("Can't create ForAll with negative task count: %s"
|
| 146 |
+
% ntasks)
|
| 147 |
+
return self[ntasks, 1, stream, sharedmem]
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def overloads(self):
|
| 151 |
+
return FakeOverloadDict()
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def py_func(self):
|
| 155 |
+
return self.fn
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# Thread emulation
|
| 159 |
+
|
| 160 |
+
class BlockThread(threading.Thread):
|
| 161 |
+
'''
|
| 162 |
+
Manages the execution of a function for a single CUDA thread.
|
| 163 |
+
'''
|
| 164 |
+
def __init__(self, f, manager, blockIdx, threadIdx, debug):
|
| 165 |
+
if debug:
|
| 166 |
+
def debug_wrapper(*args, **kwargs):
|
| 167 |
+
np.seterr(divide='raise')
|
| 168 |
+
f(*args, **kwargs)
|
| 169 |
+
target = debug_wrapper
|
| 170 |
+
else:
|
| 171 |
+
target = f
|
| 172 |
+
|
| 173 |
+
super(BlockThread, self).__init__(target=target)
|
| 174 |
+
self.syncthreads_event = threading.Event()
|
| 175 |
+
self.syncthreads_blocked = False
|
| 176 |
+
self._manager = manager
|
| 177 |
+
self.blockIdx = Dim3(*blockIdx)
|
| 178 |
+
self.threadIdx = Dim3(*threadIdx)
|
| 179 |
+
self.exception = None
|
| 180 |
+
self.daemon = True
|
| 181 |
+
self.abort = False
|
| 182 |
+
self.debug = debug
|
| 183 |
+
blockDim = Dim3(*self._manager._block_dim)
|
| 184 |
+
self.thread_id = self.threadIdx.x + (blockDim.x * (self.threadIdx.y +
|
| 185 |
+
blockDim.y *
|
| 186 |
+
self.threadIdx.z))
|
| 187 |
+
|
| 188 |
+
def run(self):
|
| 189 |
+
try:
|
| 190 |
+
super(BlockThread, self).run()
|
| 191 |
+
except Exception as e:
|
| 192 |
+
tid = 'tid=%s' % list(self.threadIdx)
|
| 193 |
+
ctaid = 'ctaid=%s' % list(self.blockIdx)
|
| 194 |
+
if str(e) == '':
|
| 195 |
+
msg = '%s %s' % (tid, ctaid)
|
| 196 |
+
else:
|
| 197 |
+
msg = '%s %s: %s' % (tid, ctaid, e)
|
| 198 |
+
tb = sys.exc_info()[2]
|
| 199 |
+
# Using `with_traceback` here would cause it to be mutated by
|
| 200 |
+
# future raise statements, which may or may not matter.
|
| 201 |
+
self.exception = (type(e)(msg), tb)
|
| 202 |
+
|
| 203 |
+
def syncthreads(self):
|
| 204 |
+
|
| 205 |
+
if self.abort:
|
| 206 |
+
raise RuntimeError("abort flag set on syncthreads call")
|
| 207 |
+
|
| 208 |
+
self.syncthreads_blocked = True
|
| 209 |
+
self.syncthreads_event.wait()
|
| 210 |
+
self.syncthreads_event.clear()
|
| 211 |
+
|
| 212 |
+
if self.abort:
|
| 213 |
+
raise RuntimeError("abort flag set on syncthreads clear")
|
| 214 |
+
|
| 215 |
+
def syncthreads_count(self, value):
|
| 216 |
+
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
|
| 217 |
+
self._manager.block_state[idx] = value
|
| 218 |
+
self.syncthreads()
|
| 219 |
+
count = np.count_nonzero(self._manager.block_state)
|
| 220 |
+
self.syncthreads()
|
| 221 |
+
return count
|
| 222 |
+
|
| 223 |
+
def syncthreads_and(self, value):
|
| 224 |
+
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
|
| 225 |
+
self._manager.block_state[idx] = value
|
| 226 |
+
self.syncthreads()
|
| 227 |
+
test = np.all(self._manager.block_state)
|
| 228 |
+
self.syncthreads()
|
| 229 |
+
return 1 if test else 0
|
| 230 |
+
|
| 231 |
+
def syncthreads_or(self, value):
|
| 232 |
+
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
|
| 233 |
+
self._manager.block_state[idx] = value
|
| 234 |
+
self.syncthreads()
|
| 235 |
+
test = np.any(self._manager.block_state)
|
| 236 |
+
self.syncthreads()
|
| 237 |
+
return 1 if test else 0
|
| 238 |
+
|
| 239 |
+
def __str__(self):
|
| 240 |
+
return 'Thread <<<%s, %s>>>' % (self.blockIdx, self.threadIdx)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class BlockManager(object):
|
| 244 |
+
'''
|
| 245 |
+
Manages the execution of a thread block.
|
| 246 |
+
|
| 247 |
+
When run() is called, all threads are started. Each thread executes until it
|
| 248 |
+
hits syncthreads(), at which point it sets its own syncthreads_blocked to
|
| 249 |
+
True so that the BlockManager knows it is blocked. It then waits on its
|
| 250 |
+
syncthreads_event.
|
| 251 |
+
|
| 252 |
+
The BlockManager polls threads to determine if they are blocked in
|
| 253 |
+
syncthreads(). If it finds a blocked thread, it adds it to the set of
|
| 254 |
+
blocked threads. When all threads are blocked, it unblocks all the threads.
|
| 255 |
+
The thread are unblocked by setting their syncthreads_blocked back to False
|
| 256 |
+
and setting their syncthreads_event.
|
| 257 |
+
|
| 258 |
+
The polling continues until no threads are alive, when execution is
|
| 259 |
+
complete.
|
| 260 |
+
'''
|
| 261 |
+
def __init__(self, f, grid_dim, block_dim, debug):
|
| 262 |
+
self._grid_dim = grid_dim
|
| 263 |
+
self._block_dim = block_dim
|
| 264 |
+
self._f = f
|
| 265 |
+
self._debug = debug
|
| 266 |
+
self.block_state = np.zeros(block_dim, dtype=np.bool_)
|
| 267 |
+
|
| 268 |
+
def run(self, grid_point, *args):
|
| 269 |
+
# Create all threads
|
| 270 |
+
threads = set()
|
| 271 |
+
livethreads = set()
|
| 272 |
+
blockedthreads = set()
|
| 273 |
+
for block_point in np.ndindex(*self._block_dim):
|
| 274 |
+
def target():
|
| 275 |
+
self._f(*args)
|
| 276 |
+
t = BlockThread(target, self, grid_point, block_point, self._debug)
|
| 277 |
+
t.start()
|
| 278 |
+
threads.add(t)
|
| 279 |
+
livethreads.add(t)
|
| 280 |
+
|
| 281 |
+
# Potential optimisations:
|
| 282 |
+
# 1. Continue the while loop immediately after finding a blocked thread
|
| 283 |
+
# 2. Don't poll already-blocked threads
|
| 284 |
+
while livethreads:
|
| 285 |
+
for t in livethreads:
|
| 286 |
+
if t.syncthreads_blocked:
|
| 287 |
+
blockedthreads.add(t)
|
| 288 |
+
elif t.exception:
|
| 289 |
+
|
| 290 |
+
# Abort all other simulator threads on exception,
|
| 291 |
+
# do *not* join immediately to facilitate debugging.
|
| 292 |
+
for t_other in threads:
|
| 293 |
+
t_other.abort = True
|
| 294 |
+
t_other.syncthreads_blocked = False
|
| 295 |
+
t_other.syncthreads_event.set()
|
| 296 |
+
|
| 297 |
+
raise t.exception[0].with_traceback(t.exception[1])
|
| 298 |
+
if livethreads == blockedthreads:
|
| 299 |
+
for t in blockedthreads:
|
| 300 |
+
t.syncthreads_blocked = False
|
| 301 |
+
t.syncthreads_event.set()
|
| 302 |
+
blockedthreads = set()
|
| 303 |
+
livethreads = set([ t for t in livethreads if t.is_alive() ])
|
| 304 |
+
# Final check for exceptions in case any were set prior to thread
|
| 305 |
+
# finishing, before we could check it
|
| 306 |
+
for t in threads:
|
| 307 |
+
if t.exception:
|
| 308 |
+
raise t.exception[0].with_traceback(t.exception[1])
|
lib/python3.10/site-packages/numba/cuda/simulator/kernelapi.py
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
Implements the cuda module as called from within an executing kernel
|
| 3 |
+
(@cuda.jit-decorated function).
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
import sys
|
| 8 |
+
import threading
|
| 9 |
+
import traceback
|
| 10 |
+
from numba.core import types
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from numba.np import numpy_support
|
| 14 |
+
|
| 15 |
+
from .vector_types import vector_types
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Dim3(object):
|
| 19 |
+
'''
|
| 20 |
+
Used to implement thread/block indices/dimensions
|
| 21 |
+
'''
|
| 22 |
+
def __init__(self, x, y, z):
|
| 23 |
+
self.x = x
|
| 24 |
+
self.y = y
|
| 25 |
+
self.z = z
|
| 26 |
+
|
| 27 |
+
def __str__(self):
|
| 28 |
+
return '(%s, %s, %s)' % (self.x, self.y, self.z)
|
| 29 |
+
|
| 30 |
+
def __repr__(self):
|
| 31 |
+
return 'Dim3(%s, %s, %s)' % (self.x, self.y, self.z)
|
| 32 |
+
|
| 33 |
+
def __iter__(self):
|
| 34 |
+
yield self.x
|
| 35 |
+
yield self.y
|
| 36 |
+
yield self.z
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class GridGroup:
|
| 40 |
+
'''
|
| 41 |
+
Used to implement the grid group.
|
| 42 |
+
'''
|
| 43 |
+
|
| 44 |
+
def sync(self):
|
| 45 |
+
# Synchronization of the grid group is equivalent to synchronization of
|
| 46 |
+
# the thread block, because we only support cooperative grids with one
|
| 47 |
+
# block.
|
| 48 |
+
threading.current_thread().syncthreads()
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class FakeCUDACg:
|
| 52 |
+
'''
|
| 53 |
+
CUDA Cooperative Groups
|
| 54 |
+
'''
|
| 55 |
+
def this_grid(self):
|
| 56 |
+
return GridGroup()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class FakeCUDALocal(object):
|
| 60 |
+
'''
|
| 61 |
+
CUDA Local arrays
|
| 62 |
+
'''
|
| 63 |
+
def array(self, shape, dtype):
|
| 64 |
+
if isinstance(dtype, types.Type):
|
| 65 |
+
dtype = numpy_support.as_dtype(dtype)
|
| 66 |
+
return np.empty(shape, dtype)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class FakeCUDAConst(object):
|
| 70 |
+
'''
|
| 71 |
+
CUDA Const arrays
|
| 72 |
+
'''
|
| 73 |
+
def array_like(self, ary):
|
| 74 |
+
return ary
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class FakeCUDAShared(object):
|
| 78 |
+
'''
|
| 79 |
+
CUDA Shared arrays.
|
| 80 |
+
|
| 81 |
+
Limitations: assumes that only one call to cuda.shared.array is on a line,
|
| 82 |
+
and that that line is only executed once per thread. i.e.::
|
| 83 |
+
|
| 84 |
+
a = cuda.shared.array(...); b = cuda.shared.array(...)
|
| 85 |
+
|
| 86 |
+
will erroneously alias a and b, and::
|
| 87 |
+
|
| 88 |
+
for i in range(10):
|
| 89 |
+
sharedarrs[i] = cuda.shared.array(...)
|
| 90 |
+
|
| 91 |
+
will alias all arrays created at that point (though it is not certain that
|
| 92 |
+
this would be supported by Numba anyway).
|
| 93 |
+
'''
|
| 94 |
+
|
| 95 |
+
def __init__(self, dynshared_size):
|
| 96 |
+
self._allocations = {}
|
| 97 |
+
self._dynshared_size = dynshared_size
|
| 98 |
+
self._dynshared = np.zeros(dynshared_size, dtype=np.byte)
|
| 99 |
+
|
| 100 |
+
def array(self, shape, dtype):
|
| 101 |
+
if isinstance(dtype, types.Type):
|
| 102 |
+
dtype = numpy_support.as_dtype(dtype)
|
| 103 |
+
# Dynamic shared memory is requested with size 0 - this all shares the
|
| 104 |
+
# same underlying memory
|
| 105 |
+
if shape == 0:
|
| 106 |
+
# Count must be the maximum number of whole elements that fit in the
|
| 107 |
+
# buffer (Numpy complains if the buffer is not a multiple of the
|
| 108 |
+
# element size)
|
| 109 |
+
count = self._dynshared_size // dtype.itemsize
|
| 110 |
+
return np.frombuffer(self._dynshared.data, dtype=dtype, count=count)
|
| 111 |
+
|
| 112 |
+
# Otherwise, identify allocations by source file and line number
|
| 113 |
+
# We pass the reference frame explicitly to work around
|
| 114 |
+
# http://bugs.python.org/issue25108
|
| 115 |
+
stack = traceback.extract_stack(sys._getframe())
|
| 116 |
+
caller = stack[-2][0:2]
|
| 117 |
+
res = self._allocations.get(caller)
|
| 118 |
+
if res is None:
|
| 119 |
+
res = np.empty(shape, dtype)
|
| 120 |
+
self._allocations[caller] = res
|
| 121 |
+
return res
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
addlock = threading.Lock()
|
| 125 |
+
sublock = threading.Lock()
|
| 126 |
+
andlock = threading.Lock()
|
| 127 |
+
orlock = threading.Lock()
|
| 128 |
+
xorlock = threading.Lock()
|
| 129 |
+
maxlock = threading.Lock()
|
| 130 |
+
minlock = threading.Lock()
|
| 131 |
+
compare_and_swaplock = threading.Lock()
|
| 132 |
+
caslock = threading.Lock()
|
| 133 |
+
inclock = threading.Lock()
|
| 134 |
+
declock = threading.Lock()
|
| 135 |
+
exchlock = threading.Lock()
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class FakeCUDAAtomic(object):
|
| 139 |
+
def add(self, array, index, val):
|
| 140 |
+
with addlock:
|
| 141 |
+
old = array[index]
|
| 142 |
+
array[index] += val
|
| 143 |
+
return old
|
| 144 |
+
|
| 145 |
+
def sub(self, array, index, val):
|
| 146 |
+
with sublock:
|
| 147 |
+
old = array[index]
|
| 148 |
+
array[index] -= val
|
| 149 |
+
return old
|
| 150 |
+
|
| 151 |
+
def and_(self, array, index, val):
|
| 152 |
+
with andlock:
|
| 153 |
+
old = array[index]
|
| 154 |
+
array[index] &= val
|
| 155 |
+
return old
|
| 156 |
+
|
| 157 |
+
def or_(self, array, index, val):
|
| 158 |
+
with orlock:
|
| 159 |
+
old = array[index]
|
| 160 |
+
array[index] |= val
|
| 161 |
+
return old
|
| 162 |
+
|
| 163 |
+
def xor(self, array, index, val):
|
| 164 |
+
with xorlock:
|
| 165 |
+
old = array[index]
|
| 166 |
+
array[index] ^= val
|
| 167 |
+
return old
|
| 168 |
+
|
| 169 |
+
def inc(self, array, index, val):
|
| 170 |
+
with inclock:
|
| 171 |
+
old = array[index]
|
| 172 |
+
if old >= val:
|
| 173 |
+
array[index] = 0
|
| 174 |
+
else:
|
| 175 |
+
array[index] += 1
|
| 176 |
+
return old
|
| 177 |
+
|
| 178 |
+
def dec(self, array, index, val):
|
| 179 |
+
with declock:
|
| 180 |
+
old = array[index]
|
| 181 |
+
if (old == 0) or (old > val):
|
| 182 |
+
array[index] = val
|
| 183 |
+
else:
|
| 184 |
+
array[index] -= 1
|
| 185 |
+
return old
|
| 186 |
+
|
| 187 |
+
def exch(self, array, index, val):
|
| 188 |
+
with exchlock:
|
| 189 |
+
old = array[index]
|
| 190 |
+
array[index] = val
|
| 191 |
+
return old
|
| 192 |
+
|
| 193 |
+
def max(self, array, index, val):
|
| 194 |
+
with maxlock:
|
| 195 |
+
old = array[index]
|
| 196 |
+
array[index] = max(old, val)
|
| 197 |
+
return old
|
| 198 |
+
|
| 199 |
+
def min(self, array, index, val):
|
| 200 |
+
with minlock:
|
| 201 |
+
old = array[index]
|
| 202 |
+
array[index] = min(old, val)
|
| 203 |
+
return old
|
| 204 |
+
|
| 205 |
+
def nanmax(self, array, index, val):
|
| 206 |
+
with maxlock:
|
| 207 |
+
old = array[index]
|
| 208 |
+
array[index] = np.nanmax([array[index], val])
|
| 209 |
+
return old
|
| 210 |
+
|
| 211 |
+
def nanmin(self, array, index, val):
|
| 212 |
+
with minlock:
|
| 213 |
+
old = array[index]
|
| 214 |
+
array[index] = np.nanmin([array[index], val])
|
| 215 |
+
return old
|
| 216 |
+
|
| 217 |
+
def compare_and_swap(self, array, old, val):
|
| 218 |
+
with compare_and_swaplock:
|
| 219 |
+
index = (0,) * array.ndim
|
| 220 |
+
loaded = array[index]
|
| 221 |
+
if loaded == old:
|
| 222 |
+
array[index] = val
|
| 223 |
+
return loaded
|
| 224 |
+
|
| 225 |
+
def cas(self, array, index, old, val):
|
| 226 |
+
with caslock:
|
| 227 |
+
loaded = array[index]
|
| 228 |
+
if loaded == old:
|
| 229 |
+
array[index] = val
|
| 230 |
+
return loaded
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
class FakeCUDAFp16(object):
|
| 234 |
+
def hadd(self, a, b):
|
| 235 |
+
return a + b
|
| 236 |
+
|
| 237 |
+
def hsub(self, a, b):
|
| 238 |
+
return a - b
|
| 239 |
+
|
| 240 |
+
def hmul(self, a, b):
|
| 241 |
+
return a * b
|
| 242 |
+
|
| 243 |
+
def hdiv(self, a, b):
|
| 244 |
+
return a / b
|
| 245 |
+
|
| 246 |
+
def hfma(self, a, b, c):
|
| 247 |
+
return a * b + c
|
| 248 |
+
|
| 249 |
+
def hneg(self, a):
|
| 250 |
+
return -a
|
| 251 |
+
|
| 252 |
+
def habs(self, a):
|
| 253 |
+
return abs(a)
|
| 254 |
+
|
| 255 |
+
def hsin(self, x):
|
| 256 |
+
return np.sin(x, dtype=np.float16)
|
| 257 |
+
|
| 258 |
+
def hcos(self, x):
|
| 259 |
+
return np.cos(x, dtype=np.float16)
|
| 260 |
+
|
| 261 |
+
def hlog(self, x):
|
| 262 |
+
return np.log(x, dtype=np.float16)
|
| 263 |
+
|
| 264 |
+
def hlog2(self, x):
|
| 265 |
+
return np.log2(x, dtype=np.float16)
|
| 266 |
+
|
| 267 |
+
def hlog10(self, x):
|
| 268 |
+
return np.log10(x, dtype=np.float16)
|
| 269 |
+
|
| 270 |
+
def hexp(self, x):
|
| 271 |
+
return np.exp(x, dtype=np.float16)
|
| 272 |
+
|
| 273 |
+
def hexp2(self, x):
|
| 274 |
+
return np.exp2(x, dtype=np.float16)
|
| 275 |
+
|
| 276 |
+
def hexp10(self, x):
|
| 277 |
+
return np.float16(10 ** x)
|
| 278 |
+
|
| 279 |
+
def hsqrt(self, x):
|
| 280 |
+
return np.sqrt(x, dtype=np.float16)
|
| 281 |
+
|
| 282 |
+
def hrsqrt(self, x):
|
| 283 |
+
return np.float16(x ** -0.5)
|
| 284 |
+
|
| 285 |
+
def hceil(self, x):
|
| 286 |
+
return np.ceil(x, dtype=np.float16)
|
| 287 |
+
|
| 288 |
+
def hfloor(self, x):
|
| 289 |
+
return np.ceil(x, dtype=np.float16)
|
| 290 |
+
|
| 291 |
+
def hrcp(self, x):
|
| 292 |
+
return np.reciprocal(x, dtype=np.float16)
|
| 293 |
+
|
| 294 |
+
def htrunc(self, x):
|
| 295 |
+
return np.trunc(x, dtype=np.float16)
|
| 296 |
+
|
| 297 |
+
def hrint(self, x):
|
| 298 |
+
return np.rint(x, dtype=np.float16)
|
| 299 |
+
|
| 300 |
+
def heq(self, a, b):
|
| 301 |
+
return a == b
|
| 302 |
+
|
| 303 |
+
def hne(self, a, b):
|
| 304 |
+
return a != b
|
| 305 |
+
|
| 306 |
+
def hge(self, a, b):
|
| 307 |
+
return a >= b
|
| 308 |
+
|
| 309 |
+
def hgt(self, a, b):
|
| 310 |
+
return a > b
|
| 311 |
+
|
| 312 |
+
def hle(self, a, b):
|
| 313 |
+
return a <= b
|
| 314 |
+
|
| 315 |
+
def hlt(self, a, b):
|
| 316 |
+
return a < b
|
| 317 |
+
|
| 318 |
+
def hmax(self, a, b):
|
| 319 |
+
return max(a, b)
|
| 320 |
+
|
| 321 |
+
def hmin(self, a, b):
|
| 322 |
+
return min(a, b)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class FakeCUDAModule(object):
|
| 326 |
+
'''
|
| 327 |
+
An instance of this class will be injected into the __globals__ for an
|
| 328 |
+
executing function in order to implement calls to cuda.*. This will fail to
|
| 329 |
+
work correctly if the user code does::
|
| 330 |
+
|
| 331 |
+
from numba import cuda as something_else
|
| 332 |
+
|
| 333 |
+
In other words, the CUDA module must be called cuda.
|
| 334 |
+
'''
|
| 335 |
+
|
| 336 |
+
def __init__(self, grid_dim, block_dim, dynshared_size):
|
| 337 |
+
self.gridDim = Dim3(*grid_dim)
|
| 338 |
+
self.blockDim = Dim3(*block_dim)
|
| 339 |
+
self._cg = FakeCUDACg()
|
| 340 |
+
self._local = FakeCUDALocal()
|
| 341 |
+
self._shared = FakeCUDAShared(dynshared_size)
|
| 342 |
+
self._const = FakeCUDAConst()
|
| 343 |
+
self._atomic = FakeCUDAAtomic()
|
| 344 |
+
self._fp16 = FakeCUDAFp16()
|
| 345 |
+
# Insert the vector types into the kernel context
|
| 346 |
+
# Note that we need to do this in addition to exposing them as module
|
| 347 |
+
# variables in `simulator.__init__.py`, because the test cases need
|
| 348 |
+
# to access the actual cuda module as well as the fake cuda module
|
| 349 |
+
# for vector types.
|
| 350 |
+
for name, svty in vector_types.items():
|
| 351 |
+
setattr(self, name, svty)
|
| 352 |
+
for alias in svty.aliases:
|
| 353 |
+
setattr(self, alias, svty)
|
| 354 |
+
|
| 355 |
+
@property
|
| 356 |
+
def cg(self):
|
| 357 |
+
return self._cg
|
| 358 |
+
|
| 359 |
+
@property
|
| 360 |
+
def local(self):
|
| 361 |
+
return self._local
|
| 362 |
+
|
| 363 |
+
@property
|
| 364 |
+
def shared(self):
|
| 365 |
+
return self._shared
|
| 366 |
+
|
| 367 |
+
@property
|
| 368 |
+
def const(self):
|
| 369 |
+
return self._const
|
| 370 |
+
|
| 371 |
+
@property
|
| 372 |
+
def atomic(self):
|
| 373 |
+
return self._atomic
|
| 374 |
+
|
| 375 |
+
@property
|
| 376 |
+
def fp16(self):
|
| 377 |
+
return self._fp16
|
| 378 |
+
|
| 379 |
+
@property
|
| 380 |
+
def threadIdx(self):
|
| 381 |
+
return threading.current_thread().threadIdx
|
| 382 |
+
|
| 383 |
+
@property
|
| 384 |
+
def blockIdx(self):
|
| 385 |
+
return threading.current_thread().blockIdx
|
| 386 |
+
|
| 387 |
+
@property
|
| 388 |
+
def warpsize(self):
|
| 389 |
+
return 32
|
| 390 |
+
|
| 391 |
+
@property
|
| 392 |
+
def laneid(self):
|
| 393 |
+
return threading.current_thread().thread_id % 32
|
| 394 |
+
|
| 395 |
+
def syncthreads(self):
|
| 396 |
+
threading.current_thread().syncthreads()
|
| 397 |
+
|
| 398 |
+
def threadfence(self):
|
| 399 |
+
# No-op
|
| 400 |
+
pass
|
| 401 |
+
|
| 402 |
+
def threadfence_block(self):
|
| 403 |
+
# No-op
|
| 404 |
+
pass
|
| 405 |
+
|
| 406 |
+
def threadfence_system(self):
|
| 407 |
+
# No-op
|
| 408 |
+
pass
|
| 409 |
+
|
| 410 |
+
def syncthreads_count(self, val):
|
| 411 |
+
return threading.current_thread().syncthreads_count(val)
|
| 412 |
+
|
| 413 |
+
def syncthreads_and(self, val):
|
| 414 |
+
return threading.current_thread().syncthreads_and(val)
|
| 415 |
+
|
| 416 |
+
def syncthreads_or(self, val):
|
| 417 |
+
return threading.current_thread().syncthreads_or(val)
|
| 418 |
+
|
| 419 |
+
def popc(self, val):
|
| 420 |
+
return bin(val).count("1")
|
| 421 |
+
|
| 422 |
+
def fma(self, a, b, c):
|
| 423 |
+
return a * b + c
|
| 424 |
+
|
| 425 |
+
def cbrt(self, a):
|
| 426 |
+
return a ** (1 / 3)
|
| 427 |
+
|
| 428 |
+
def brev(self, val):
|
| 429 |
+
return int('{:032b}'.format(val)[::-1], 2)
|
| 430 |
+
|
| 431 |
+
def clz(self, val):
|
| 432 |
+
s = '{:032b}'.format(val)
|
| 433 |
+
return len(s) - len(s.lstrip('0'))
|
| 434 |
+
|
| 435 |
+
def ffs(self, val):
|
| 436 |
+
# The algorithm is:
|
| 437 |
+
# 1. Count the number of trailing zeros.
|
| 438 |
+
# 2. Add 1, because the LSB is numbered 1 rather than 0, and so on.
|
| 439 |
+
# 3. If we've counted 32 zeros (resulting in 33), there were no bits
|
| 440 |
+
# set so we need to return zero.
|
| 441 |
+
s = '{:032b}'.format(val)
|
| 442 |
+
r = (len(s) - len(s.rstrip('0')) + 1) % 33
|
| 443 |
+
return r
|
| 444 |
+
|
| 445 |
+
def selp(self, a, b, c):
|
| 446 |
+
return b if a else c
|
| 447 |
+
|
| 448 |
+
def grid(self, n):
|
| 449 |
+
bdim = self.blockDim
|
| 450 |
+
bid = self.blockIdx
|
| 451 |
+
tid = self.threadIdx
|
| 452 |
+
x = bid.x * bdim.x + tid.x
|
| 453 |
+
if n == 1:
|
| 454 |
+
return x
|
| 455 |
+
y = bid.y * bdim.y + tid.y
|
| 456 |
+
if n == 2:
|
| 457 |
+
return (x, y)
|
| 458 |
+
z = bid.z * bdim.z + tid.z
|
| 459 |
+
if n == 3:
|
| 460 |
+
return (x, y, z)
|
| 461 |
+
|
| 462 |
+
raise RuntimeError("Global ID has 1-3 dimensions. %d requested" % n)
|
| 463 |
+
|
| 464 |
+
def gridsize(self, n):
|
| 465 |
+
bdim = self.blockDim
|
| 466 |
+
gdim = self.gridDim
|
| 467 |
+
x = bdim.x * gdim.x
|
| 468 |
+
if n == 1:
|
| 469 |
+
return x
|
| 470 |
+
y = bdim.y * gdim.y
|
| 471 |
+
if n == 2:
|
| 472 |
+
return (x, y)
|
| 473 |
+
z = bdim.z * gdim.z
|
| 474 |
+
if n == 3:
|
| 475 |
+
return (x, y, z)
|
| 476 |
+
|
| 477 |
+
raise RuntimeError("Global grid has 1-3 dimensions. %d requested" % n)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
@contextmanager
|
| 481 |
+
def swapped_cuda_module(fn, fake_cuda_module):
|
| 482 |
+
from numba import cuda
|
| 483 |
+
|
| 484 |
+
fn_globs = fn.__globals__
|
| 485 |
+
# get all globals that is the "cuda" module
|
| 486 |
+
orig = dict((k, v) for k, v in fn_globs.items() if v is cuda)
|
| 487 |
+
# build replacement dict
|
| 488 |
+
repl = dict((k, fake_cuda_module) for k, v in orig.items())
|
| 489 |
+
# replace
|
| 490 |
+
fn_globs.update(repl)
|
| 491 |
+
try:
|
| 492 |
+
yield
|
| 493 |
+
finally:
|
| 494 |
+
# revert
|
| 495 |
+
fn_globs.update(orig)
|
lib/python3.10/site-packages/numba/cuda/simulator/reduction.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import reduce as pyreduce
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def Reduce(func):
|
| 5 |
+
def reduce_wrapper(seq, res=None, init=0):
|
| 6 |
+
r = pyreduce(func, seq, init)
|
| 7 |
+
if res is not None:
|
| 8 |
+
res[0] = r
|
| 9 |
+
return None
|
| 10 |
+
else:
|
| 11 |
+
return r
|
| 12 |
+
return reduce_wrapper
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
reduce = Reduce
|
lib/python3.10/site-packages/numba/cuda/simulator/vector_types.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba import types, config
|
| 2 |
+
from numba.cuda.stubs import _vector_type_stubs
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SimulatedVectorType:
|
| 6 |
+
attributes = ['x', 'y', 'z', 'w']
|
| 7 |
+
|
| 8 |
+
def __init__(self, *args):
|
| 9 |
+
args_flattened = []
|
| 10 |
+
for arg in args:
|
| 11 |
+
if isinstance(arg, SimulatedVectorType):
|
| 12 |
+
args_flattened += arg.as_list()
|
| 13 |
+
else:
|
| 14 |
+
args_flattened.append(arg)
|
| 15 |
+
self._attrs = self.attributes[:len(args_flattened)]
|
| 16 |
+
if not self.num_elements == len(args_flattened):
|
| 17 |
+
raise TypeError(
|
| 18 |
+
f"{self.name} expects {self.num_elements}"
|
| 19 |
+
f" elements, got {len(args_flattened)}"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
for arg, attr in zip(args_flattened, self._attrs):
|
| 23 |
+
setattr(self, attr, arg)
|
| 24 |
+
|
| 25 |
+
@property
|
| 26 |
+
def name(self):
|
| 27 |
+
raise NotImplementedError()
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def num_elements(self):
|
| 31 |
+
raise NotImplementedError()
|
| 32 |
+
|
| 33 |
+
def as_list(self):
|
| 34 |
+
return [getattr(self, attr) for attr in self._attrs]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def make_simulated_vector_type(num_elements, name):
|
| 38 |
+
if config.USE_LEGACY_TYPE_SYSTEM:
|
| 39 |
+
base_type = types.float32
|
| 40 |
+
else:
|
| 41 |
+
base_type = types.np_float32
|
| 42 |
+
|
| 43 |
+
obj = type(name, (SimulatedVectorType,), {
|
| 44 |
+
"num_elements": num_elements,
|
| 45 |
+
"base_type": base_type,
|
| 46 |
+
"name": name
|
| 47 |
+
})
|
| 48 |
+
obj.user_facing_object = obj
|
| 49 |
+
return obj
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _initialize():
|
| 53 |
+
_simulated_vector_types = {}
|
| 54 |
+
for stub in _vector_type_stubs:
|
| 55 |
+
num_elements = int(stub.__name__[-1])
|
| 56 |
+
_simulated_vector_types[stub.__name__] = (
|
| 57 |
+
make_simulated_vector_type(num_elements, stub.__name__)
|
| 58 |
+
)
|
| 59 |
+
_simulated_vector_types[stub.__name__].aliases = stub.aliases
|
| 60 |
+
return _simulated_vector_types
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
vector_types = _initialize()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.testing import ensure_supported_ccs_initialized
|
| 2 |
+
from numba.testing import load_testsuite
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_tests(loader, tests, pattern):
|
| 7 |
+
ensure_supported_ccs_initialized()
|
| 8 |
+
return load_testsuite(loader, os.path.dirname(__file__))
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_array_attr.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba import cuda
|
| 3 |
+
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestArrayAttr(CUDATestCase):
|
| 7 |
+
|
| 8 |
+
def test_contigous_2d(self):
|
| 9 |
+
ary = np.arange(10)
|
| 10 |
+
cary = ary.reshape(2, 5)
|
| 11 |
+
fary = np.asfortranarray(cary)
|
| 12 |
+
|
| 13 |
+
dcary = cuda.to_device(cary)
|
| 14 |
+
dfary = cuda.to_device(fary)
|
| 15 |
+
self.assertTrue(dcary.is_c_contiguous())
|
| 16 |
+
self.assertTrue(not dfary.is_c_contiguous())
|
| 17 |
+
self.assertTrue(not dcary.is_f_contiguous())
|
| 18 |
+
self.assertTrue(dfary.is_f_contiguous())
|
| 19 |
+
|
| 20 |
+
def test_contigous_3d(self):
|
| 21 |
+
ary = np.arange(20)
|
| 22 |
+
cary = ary.reshape(2, 5, 2)
|
| 23 |
+
fary = np.asfortranarray(cary)
|
| 24 |
+
|
| 25 |
+
dcary = cuda.to_device(cary)
|
| 26 |
+
dfary = cuda.to_device(fary)
|
| 27 |
+
self.assertTrue(dcary.is_c_contiguous())
|
| 28 |
+
self.assertTrue(not dfary.is_c_contiguous())
|
| 29 |
+
self.assertTrue(not dcary.is_f_contiguous())
|
| 30 |
+
self.assertTrue(dfary.is_f_contiguous())
|
| 31 |
+
|
| 32 |
+
def test_contigous_4d(self):
|
| 33 |
+
ary = np.arange(60)
|
| 34 |
+
cary = ary.reshape(2, 5, 2, 3)
|
| 35 |
+
fary = np.asfortranarray(cary)
|
| 36 |
+
|
| 37 |
+
dcary = cuda.to_device(cary)
|
| 38 |
+
dfary = cuda.to_device(fary)
|
| 39 |
+
self.assertTrue(dcary.is_c_contiguous())
|
| 40 |
+
self.assertTrue(not dfary.is_c_contiguous())
|
| 41 |
+
self.assertTrue(not dcary.is_f_contiguous())
|
| 42 |
+
self.assertTrue(dfary.is_f_contiguous())
|
| 43 |
+
|
| 44 |
+
def test_ravel_1d(self):
|
| 45 |
+
ary = np.arange(60)
|
| 46 |
+
dary = cuda.to_device(ary)
|
| 47 |
+
for order in 'CFA':
|
| 48 |
+
expect = ary.ravel(order=order)
|
| 49 |
+
dflat = dary.ravel(order=order)
|
| 50 |
+
flat = dflat.copy_to_host()
|
| 51 |
+
self.assertTrue(dary is not dflat) # ravel returns new array
|
| 52 |
+
self.assertEqual(flat.ndim, 1)
|
| 53 |
+
self.assertPreciseEqual(expect, flat)
|
| 54 |
+
|
| 55 |
+
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
|
| 56 |
+
def test_ravel_stride_1d(self):
|
| 57 |
+
ary = np.arange(60)
|
| 58 |
+
dary = cuda.to_device(ary)
|
| 59 |
+
# No-copy stride device array
|
| 60 |
+
darystride = dary[::2]
|
| 61 |
+
dary_data = dary.__cuda_array_interface__['data'][0]
|
| 62 |
+
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
|
| 63 |
+
self.assertEqual(dary_data, ddarystride_data)
|
| 64 |
+
# Fail on ravel on non-contiguous array
|
| 65 |
+
with self.assertRaises(NotImplementedError):
|
| 66 |
+
darystride.ravel()
|
| 67 |
+
|
| 68 |
+
def test_ravel_c(self):
|
| 69 |
+
ary = np.arange(60)
|
| 70 |
+
reshaped = ary.reshape(2, 5, 2, 3)
|
| 71 |
+
|
| 72 |
+
expect = reshaped.ravel(order='C')
|
| 73 |
+
dary = cuda.to_device(reshaped)
|
| 74 |
+
dflat = dary.ravel()
|
| 75 |
+
flat = dflat.copy_to_host()
|
| 76 |
+
self.assertTrue(dary is not dflat)
|
| 77 |
+
self.assertEqual(flat.ndim, 1)
|
| 78 |
+
self.assertPreciseEqual(expect, flat)
|
| 79 |
+
|
| 80 |
+
# explicit order kwarg
|
| 81 |
+
for order in 'CA':
|
| 82 |
+
expect = reshaped.ravel(order=order)
|
| 83 |
+
dary = cuda.to_device(reshaped)
|
| 84 |
+
dflat = dary.ravel(order=order)
|
| 85 |
+
flat = dflat.copy_to_host()
|
| 86 |
+
self.assertTrue(dary is not dflat)
|
| 87 |
+
self.assertEqual(flat.ndim, 1)
|
| 88 |
+
self.assertPreciseEqual(expect, flat)
|
| 89 |
+
|
| 90 |
+
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
|
| 91 |
+
def test_ravel_stride_c(self):
|
| 92 |
+
ary = np.arange(60)
|
| 93 |
+
reshaped = ary.reshape(2, 5, 2, 3)
|
| 94 |
+
|
| 95 |
+
dary = cuda.to_device(reshaped)
|
| 96 |
+
darystride = dary[::2, ::2, ::2, ::2]
|
| 97 |
+
dary_data = dary.__cuda_array_interface__['data'][0]
|
| 98 |
+
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
|
| 99 |
+
self.assertEqual(dary_data, ddarystride_data)
|
| 100 |
+
with self.assertRaises(NotImplementedError):
|
| 101 |
+
darystride.ravel()
|
| 102 |
+
|
| 103 |
+
def test_ravel_f(self):
|
| 104 |
+
ary = np.arange(60)
|
| 105 |
+
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
|
| 106 |
+
for order in 'FA':
|
| 107 |
+
expect = reshaped.ravel(order=order)
|
| 108 |
+
dary = cuda.to_device(reshaped)
|
| 109 |
+
dflat = dary.ravel(order=order)
|
| 110 |
+
flat = dflat.copy_to_host()
|
| 111 |
+
self.assertTrue(dary is not dflat)
|
| 112 |
+
self.assertEqual(flat.ndim, 1)
|
| 113 |
+
self.assertPreciseEqual(expect, flat)
|
| 114 |
+
|
| 115 |
+
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
|
| 116 |
+
def test_ravel_stride_f(self):
|
| 117 |
+
ary = np.arange(60)
|
| 118 |
+
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
|
| 119 |
+
dary = cuda.to_device(reshaped)
|
| 120 |
+
darystride = dary[::2, ::2, ::2, ::2]
|
| 121 |
+
dary_data = dary.__cuda_array_interface__['data'][0]
|
| 122 |
+
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
|
| 123 |
+
self.assertEqual(dary_data, ddarystride_data)
|
| 124 |
+
with self.assertRaises(NotImplementedError):
|
| 125 |
+
darystride.ravel()
|
| 126 |
+
|
| 127 |
+
def test_reshape_c(self):
|
| 128 |
+
ary = np.arange(10)
|
| 129 |
+
expect = ary.reshape(2, 5)
|
| 130 |
+
dary = cuda.to_device(ary)
|
| 131 |
+
dary_reshaped = dary.reshape(2, 5)
|
| 132 |
+
got = dary_reshaped.copy_to_host()
|
| 133 |
+
self.assertPreciseEqual(expect, got)
|
| 134 |
+
|
| 135 |
+
def test_reshape_f(self):
|
| 136 |
+
ary = np.arange(10)
|
| 137 |
+
expect = ary.reshape(2, 5, order='F')
|
| 138 |
+
dary = cuda.to_device(ary)
|
| 139 |
+
dary_reshaped = dary.reshape(2, 5, order='F')
|
| 140 |
+
got = dary_reshaped.copy_to_host()
|
| 141 |
+
self.assertPreciseEqual(expect, got)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
if __name__ == '__main__':
|
| 145 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_context_stack.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numbers
|
| 2 |
+
from ctypes import byref
|
| 3 |
+
import weakref
|
| 4 |
+
|
| 5 |
+
from numba import cuda
|
| 6 |
+
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
|
| 7 |
+
from numba.cuda.cudadrv import driver
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestContextStack(CUDATestCase):
|
| 11 |
+
def setUp(self):
|
| 12 |
+
super().setUp()
|
| 13 |
+
# Reset before testing
|
| 14 |
+
cuda.close()
|
| 15 |
+
|
| 16 |
+
def test_gpus_current(self):
|
| 17 |
+
self.assertIs(cuda.gpus.current, None)
|
| 18 |
+
with cuda.gpus[0]:
|
| 19 |
+
self.assertEqual(int(cuda.gpus.current.id), 0)
|
| 20 |
+
|
| 21 |
+
def test_gpus_len(self):
|
| 22 |
+
self.assertGreater(len(cuda.gpus), 0)
|
| 23 |
+
|
| 24 |
+
def test_gpus_iter(self):
|
| 25 |
+
gpulist = list(cuda.gpus)
|
| 26 |
+
self.assertGreater(len(gpulist), 0)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class TestContextAPI(CUDATestCase):
|
| 30 |
+
|
| 31 |
+
def tearDown(self):
|
| 32 |
+
super().tearDown()
|
| 33 |
+
cuda.close()
|
| 34 |
+
|
| 35 |
+
def test_context_memory(self):
|
| 36 |
+
try:
|
| 37 |
+
mem = cuda.current_context().get_memory_info()
|
| 38 |
+
except NotImplementedError:
|
| 39 |
+
self.skipTest('EMM Plugin does not implement get_memory_info()')
|
| 40 |
+
|
| 41 |
+
self.assertIsInstance(mem.free, numbers.Number)
|
| 42 |
+
self.assertEqual(mem.free, mem[0])
|
| 43 |
+
|
| 44 |
+
self.assertIsInstance(mem.total, numbers.Number)
|
| 45 |
+
self.assertEqual(mem.total, mem[1])
|
| 46 |
+
|
| 47 |
+
self.assertLessEqual(mem.free, mem.total)
|
| 48 |
+
|
| 49 |
+
@unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus")
|
| 50 |
+
@skip_on_cudasim('CUDA HW required')
|
| 51 |
+
def test_forbidden_context_switch(self):
|
| 52 |
+
# Cannot switch context inside a `cuda.require_context`
|
| 53 |
+
@cuda.require_context
|
| 54 |
+
def switch_gpu():
|
| 55 |
+
with cuda.gpus[1]:
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
with cuda.gpus[0]:
|
| 59 |
+
with self.assertRaises(RuntimeError) as raises:
|
| 60 |
+
switch_gpu()
|
| 61 |
+
|
| 62 |
+
self.assertIn("Cannot switch CUDA-context.", str(raises.exception))
|
| 63 |
+
|
| 64 |
+
@unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus")
|
| 65 |
+
def test_accepted_context_switch(self):
|
| 66 |
+
def switch_gpu():
|
| 67 |
+
with cuda.gpus[1]:
|
| 68 |
+
return cuda.current_context().device.id
|
| 69 |
+
|
| 70 |
+
with cuda.gpus[0]:
|
| 71 |
+
devid = switch_gpu()
|
| 72 |
+
self.assertEqual(int(devid), 1)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@skip_on_cudasim('CUDA HW required')
|
| 76 |
+
class Test3rdPartyContext(CUDATestCase):
|
| 77 |
+
def tearDown(self):
|
| 78 |
+
super().tearDown()
|
| 79 |
+
cuda.close()
|
| 80 |
+
|
| 81 |
+
def test_attached_primary(self, extra_work=lambda: None):
|
| 82 |
+
# Emulate primary context creation by 3rd party
|
| 83 |
+
the_driver = driver.driver
|
| 84 |
+
if driver.USE_NV_BINDING:
|
| 85 |
+
dev = driver.binding.CUdevice(0)
|
| 86 |
+
hctx = the_driver.cuDevicePrimaryCtxRetain(dev)
|
| 87 |
+
else:
|
| 88 |
+
dev = 0
|
| 89 |
+
hctx = driver.drvapi.cu_context()
|
| 90 |
+
the_driver.cuDevicePrimaryCtxRetain(byref(hctx), dev)
|
| 91 |
+
try:
|
| 92 |
+
ctx = driver.Context(weakref.proxy(self), hctx)
|
| 93 |
+
ctx.push()
|
| 94 |
+
# Check that the context from numba matches the created primary
|
| 95 |
+
# context.
|
| 96 |
+
my_ctx = cuda.current_context()
|
| 97 |
+
if driver.USE_NV_BINDING:
|
| 98 |
+
self.assertEqual(int(my_ctx.handle), int(ctx.handle))
|
| 99 |
+
else:
|
| 100 |
+
self.assertEqual(my_ctx.handle.value, ctx.handle.value)
|
| 101 |
+
|
| 102 |
+
extra_work()
|
| 103 |
+
finally:
|
| 104 |
+
ctx.pop()
|
| 105 |
+
the_driver.cuDevicePrimaryCtxRelease(dev)
|
| 106 |
+
|
| 107 |
+
def test_attached_non_primary(self):
|
| 108 |
+
# Emulate non-primary context creation by 3rd party
|
| 109 |
+
the_driver = driver.driver
|
| 110 |
+
if driver.USE_NV_BINDING:
|
| 111 |
+
flags = 0
|
| 112 |
+
dev = driver.binding.CUdevice(0)
|
| 113 |
+
hctx = the_driver.cuCtxCreate(flags, dev)
|
| 114 |
+
else:
|
| 115 |
+
hctx = driver.drvapi.cu_context()
|
| 116 |
+
the_driver.cuCtxCreate(byref(hctx), 0, 0)
|
| 117 |
+
try:
|
| 118 |
+
cuda.current_context()
|
| 119 |
+
except RuntimeError as e:
|
| 120 |
+
# Expecting an error about non-primary CUDA context
|
| 121 |
+
self.assertIn("Numba cannot operate on non-primary CUDA context ",
|
| 122 |
+
str(e))
|
| 123 |
+
else:
|
| 124 |
+
self.fail("No RuntimeError raised")
|
| 125 |
+
finally:
|
| 126 |
+
the_driver.cuCtxDestroy(hctx)
|
| 127 |
+
|
| 128 |
+
def test_cudajit_in_attached_primary_context(self):
|
| 129 |
+
def do():
|
| 130 |
+
from numba import cuda
|
| 131 |
+
|
| 132 |
+
@cuda.jit
|
| 133 |
+
def foo(a):
|
| 134 |
+
for i in range(a.size):
|
| 135 |
+
a[i] = i
|
| 136 |
+
|
| 137 |
+
a = cuda.device_array(10)
|
| 138 |
+
foo[1, 1](a)
|
| 139 |
+
self.assertEqual(list(a.copy_to_host()), list(range(10)))
|
| 140 |
+
|
| 141 |
+
self.test_attached_primary(do)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
if __name__ == '__main__':
|
| 145 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import product
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from numba import cuda
|
| 6 |
+
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
|
| 7 |
+
from unittest.mock import patch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CudaArrayIndexing(CUDATestCase):
|
| 11 |
+
def test_index_1d(self):
|
| 12 |
+
arr = np.arange(10)
|
| 13 |
+
darr = cuda.to_device(arr)
|
| 14 |
+
x, = arr.shape
|
| 15 |
+
for i in range(-x, x):
|
| 16 |
+
self.assertEqual(arr[i], darr[i])
|
| 17 |
+
with self.assertRaises(IndexError):
|
| 18 |
+
darr[-x - 1]
|
| 19 |
+
with self.assertRaises(IndexError):
|
| 20 |
+
darr[x]
|
| 21 |
+
|
| 22 |
+
def test_index_2d(self):
|
| 23 |
+
arr = np.arange(3 * 4).reshape(3, 4)
|
| 24 |
+
darr = cuda.to_device(arr)
|
| 25 |
+
x, y = arr.shape
|
| 26 |
+
for i in range(-x, x):
|
| 27 |
+
for j in range(-y, y):
|
| 28 |
+
self.assertEqual(arr[i, j], darr[i, j])
|
| 29 |
+
with self.assertRaises(IndexError):
|
| 30 |
+
darr[-x - 1, 0]
|
| 31 |
+
with self.assertRaises(IndexError):
|
| 32 |
+
darr[x, 0]
|
| 33 |
+
with self.assertRaises(IndexError):
|
| 34 |
+
darr[0, -y - 1]
|
| 35 |
+
with self.assertRaises(IndexError):
|
| 36 |
+
darr[0, y]
|
| 37 |
+
|
| 38 |
+
def test_index_3d(self):
|
| 39 |
+
arr = np.arange(3 * 4 * 5).reshape(3, 4, 5)
|
| 40 |
+
darr = cuda.to_device(arr)
|
| 41 |
+
x, y, z = arr.shape
|
| 42 |
+
for i in range(-x, x):
|
| 43 |
+
for j in range(-y, y):
|
| 44 |
+
for k in range(-z, z):
|
| 45 |
+
self.assertEqual(arr[i, j, k], darr[i, j, k])
|
| 46 |
+
with self.assertRaises(IndexError):
|
| 47 |
+
darr[-x - 1, 0, 0]
|
| 48 |
+
with self.assertRaises(IndexError):
|
| 49 |
+
darr[x, 0, 0]
|
| 50 |
+
with self.assertRaises(IndexError):
|
| 51 |
+
darr[0, -y - 1, 0]
|
| 52 |
+
with self.assertRaises(IndexError):
|
| 53 |
+
darr[0, y, 0]
|
| 54 |
+
with self.assertRaises(IndexError):
|
| 55 |
+
darr[0, 0, -z - 1]
|
| 56 |
+
with self.assertRaises(IndexError):
|
| 57 |
+
darr[0, 0, z]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class CudaArrayStridedSlice(CUDATestCase):
|
| 61 |
+
|
| 62 |
+
def test_strided_index_1d(self):
|
| 63 |
+
arr = np.arange(10)
|
| 64 |
+
darr = cuda.to_device(arr)
|
| 65 |
+
for i in range(arr.size):
|
| 66 |
+
np.testing.assert_equal(arr[i::2], darr[i::2].copy_to_host())
|
| 67 |
+
|
| 68 |
+
def test_strided_index_2d(self):
|
| 69 |
+
arr = np.arange(6 * 7).reshape(6, 7)
|
| 70 |
+
darr = cuda.to_device(arr)
|
| 71 |
+
|
| 72 |
+
for i in range(arr.shape[0]):
|
| 73 |
+
for j in range(arr.shape[1]):
|
| 74 |
+
np.testing.assert_equal(arr[i::2, j::2],
|
| 75 |
+
darr[i::2, j::2].copy_to_host())
|
| 76 |
+
|
| 77 |
+
def test_strided_index_3d(self):
|
| 78 |
+
arr = np.arange(6 * 7 * 8).reshape(6, 7, 8)
|
| 79 |
+
darr = cuda.to_device(arr)
|
| 80 |
+
|
| 81 |
+
for i in range(arr.shape[0]):
|
| 82 |
+
for j in range(arr.shape[1]):
|
| 83 |
+
for k in range(arr.shape[2]):
|
| 84 |
+
np.testing.assert_equal(
|
| 85 |
+
arr[i::2, j::2, k::2],
|
| 86 |
+
darr[i::2, j::2, k::2].copy_to_host())
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class CudaArraySlicing(CUDATestCase):
|
| 90 |
+
def test_prefix_1d(self):
|
| 91 |
+
arr = np.arange(5)
|
| 92 |
+
darr = cuda.to_device(arr)
|
| 93 |
+
for i in range(arr.size):
|
| 94 |
+
expect = arr[i:]
|
| 95 |
+
got = darr[i:].copy_to_host()
|
| 96 |
+
self.assertTrue(np.all(expect == got))
|
| 97 |
+
|
| 98 |
+
def test_prefix_2d(self):
|
| 99 |
+
arr = np.arange(3 ** 2).reshape(3, 3)
|
| 100 |
+
darr = cuda.to_device(arr)
|
| 101 |
+
for i in range(arr.shape[0]):
|
| 102 |
+
for j in range(arr.shape[1]):
|
| 103 |
+
expect = arr[i:, j:]
|
| 104 |
+
sliced = darr[i:, j:]
|
| 105 |
+
self.assertEqual(expect.shape, sliced.shape)
|
| 106 |
+
self.assertEqual(expect.strides, sliced.strides)
|
| 107 |
+
got = sliced.copy_to_host()
|
| 108 |
+
self.assertTrue(np.all(expect == got))
|
| 109 |
+
|
| 110 |
+
def test_select_3d_first_two_dim(self):
|
| 111 |
+
arr = np.arange(3 * 4 * 5).reshape(3, 4, 5)
|
| 112 |
+
darr = cuda.to_device(arr)
|
| 113 |
+
# Select first dimension
|
| 114 |
+
for i in range(arr.shape[0]):
|
| 115 |
+
expect = arr[i]
|
| 116 |
+
sliced = darr[i]
|
| 117 |
+
self.assertEqual(expect.shape, sliced.shape)
|
| 118 |
+
self.assertEqual(expect.strides, sliced.strides)
|
| 119 |
+
got = sliced.copy_to_host()
|
| 120 |
+
self.assertTrue(np.all(expect == got))
|
| 121 |
+
# Select second dimension
|
| 122 |
+
for i in range(arr.shape[0]):
|
| 123 |
+
for j in range(arr.shape[1]):
|
| 124 |
+
expect = arr[i, j]
|
| 125 |
+
sliced = darr[i, j]
|
| 126 |
+
self.assertEqual(expect.shape, sliced.shape)
|
| 127 |
+
self.assertEqual(expect.strides, sliced.strides)
|
| 128 |
+
got = sliced.copy_to_host()
|
| 129 |
+
self.assertTrue(np.all(expect == got))
|
| 130 |
+
|
| 131 |
+
def test_select_f(self):
|
| 132 |
+
a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='F')
|
| 133 |
+
da = cuda.to_device(a)
|
| 134 |
+
|
| 135 |
+
for i in range(a.shape[0]):
|
| 136 |
+
for j in range(a.shape[1]):
|
| 137 |
+
self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(),
|
| 138 |
+
a[i, j, :]))
|
| 139 |
+
for j in range(a.shape[2]):
|
| 140 |
+
self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(),
|
| 141 |
+
a[i, :, j]))
|
| 142 |
+
for i in range(a.shape[1]):
|
| 143 |
+
for j in range(a.shape[2]):
|
| 144 |
+
self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(),
|
| 145 |
+
a[:, i, j]))
|
| 146 |
+
|
| 147 |
+
def test_select_c(self):
|
| 148 |
+
a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='C')
|
| 149 |
+
da = cuda.to_device(a)
|
| 150 |
+
|
| 151 |
+
for i in range(a.shape[0]):
|
| 152 |
+
for j in range(a.shape[1]):
|
| 153 |
+
self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(),
|
| 154 |
+
a[i, j, :]))
|
| 155 |
+
for j in range(a.shape[2]):
|
| 156 |
+
self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(),
|
| 157 |
+
a[i, :, j]))
|
| 158 |
+
for i in range(a.shape[1]):
|
| 159 |
+
for j in range(a.shape[2]):
|
| 160 |
+
self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(),
|
| 161 |
+
a[:, i, j]))
|
| 162 |
+
|
| 163 |
+
def test_prefix_select(self):
|
| 164 |
+
arr = np.arange(5 * 7).reshape(5, 7, order='F')
|
| 165 |
+
|
| 166 |
+
darr = cuda.to_device(arr)
|
| 167 |
+
self.assertTrue(np.all(darr[:1, 1].copy_to_host() == arr[:1, 1]))
|
| 168 |
+
|
| 169 |
+
def test_negative_slicing_1d(self):
|
| 170 |
+
arr = np.arange(10)
|
| 171 |
+
darr = cuda.to_device(arr)
|
| 172 |
+
for i, j in product(range(-10, 10), repeat=2):
|
| 173 |
+
np.testing.assert_array_equal(arr[i:j],
|
| 174 |
+
darr[i:j].copy_to_host())
|
| 175 |
+
|
| 176 |
+
def test_negative_slicing_2d(self):
|
| 177 |
+
arr = np.arange(12).reshape(3, 4)
|
| 178 |
+
darr = cuda.to_device(arr)
|
| 179 |
+
for x, y, w, s in product(range(-4, 4), repeat=4):
|
| 180 |
+
np.testing.assert_array_equal(arr[x:y, w:s],
|
| 181 |
+
darr[x:y, w:s].copy_to_host())
|
| 182 |
+
|
| 183 |
+
def test_empty_slice_1d(self):
|
| 184 |
+
arr = np.arange(5)
|
| 185 |
+
darr = cuda.to_device(arr)
|
| 186 |
+
for i in range(darr.shape[0]):
|
| 187 |
+
np.testing.assert_array_equal(darr[i:i].copy_to_host(), arr[i:i])
|
| 188 |
+
# empty slice of empty slice
|
| 189 |
+
self.assertFalse(darr[:0][:0].copy_to_host())
|
| 190 |
+
# out-of-bound slice just produces empty slices
|
| 191 |
+
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(),
|
| 192 |
+
arr[:0][:1])
|
| 193 |
+
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(),
|
| 194 |
+
arr[:0][-1:])
|
| 195 |
+
|
| 196 |
+
def test_empty_slice_2d(self):
|
| 197 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 198 |
+
darr = cuda.to_device(arr)
|
| 199 |
+
np.testing.assert_array_equal(darr[:0].copy_to_host(), arr[:0])
|
| 200 |
+
np.testing.assert_array_equal(darr[3, :0].copy_to_host(), arr[3, :0])
|
| 201 |
+
# empty slice of empty slice
|
| 202 |
+
self.assertFalse(darr[:0][:0].copy_to_host())
|
| 203 |
+
# out-of-bound slice just produces empty slices
|
| 204 |
+
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), arr[:0][:1])
|
| 205 |
+
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(),
|
| 206 |
+
arr[:0][-1:])
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
class CudaArraySetting(CUDATestCase):
|
| 210 |
+
"""
|
| 211 |
+
Most of the slicing logic is tested in the cases above, so these
|
| 212 |
+
tests focus on the setting logic.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def test_scalar(self):
|
| 216 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 217 |
+
darr = cuda.to_device(arr)
|
| 218 |
+
arr[2, 2] = 500
|
| 219 |
+
darr[2, 2] = 500
|
| 220 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 221 |
+
|
| 222 |
+
def test_rank(self):
|
| 223 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 224 |
+
darr = cuda.to_device(arr)
|
| 225 |
+
arr[2] = 500
|
| 226 |
+
darr[2] = 500
|
| 227 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 228 |
+
|
| 229 |
+
def test_broadcast(self):
|
| 230 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 231 |
+
darr = cuda.to_device(arr)
|
| 232 |
+
arr[:, 2] = 500
|
| 233 |
+
darr[:, 2] = 500
|
| 234 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 235 |
+
|
| 236 |
+
def test_array_assign_column(self):
|
| 237 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 238 |
+
darr = cuda.to_device(arr)
|
| 239 |
+
_400 = np.full(shape=7, fill_value=400)
|
| 240 |
+
arr[2] = _400
|
| 241 |
+
darr[2] = _400
|
| 242 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 243 |
+
|
| 244 |
+
def test_array_assign_row(self):
|
| 245 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 246 |
+
darr = cuda.to_device(arr)
|
| 247 |
+
_400 = np.full(shape=5, fill_value=400)
|
| 248 |
+
arr[:, 2] = _400
|
| 249 |
+
darr[:, 2] = _400
|
| 250 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 251 |
+
|
| 252 |
+
def test_array_assign_subarray(self):
|
| 253 |
+
arr = np.arange(5 * 6 * 7).reshape(5, 6, 7)
|
| 254 |
+
darr = cuda.to_device(arr)
|
| 255 |
+
_400 = np.full(shape=(6, 7), fill_value=400)
|
| 256 |
+
arr[2] = _400
|
| 257 |
+
darr[2] = _400
|
| 258 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 259 |
+
|
| 260 |
+
def test_array_assign_deep_subarray(self):
|
| 261 |
+
arr = np.arange(5 * 6 * 7 * 8).reshape(5, 6, 7, 8)
|
| 262 |
+
darr = cuda.to_device(arr)
|
| 263 |
+
_400 = np.full(shape=(5, 6, 8), fill_value=400)
|
| 264 |
+
arr[:, :, 2] = _400
|
| 265 |
+
darr[:, :, 2] = _400
|
| 266 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 267 |
+
|
| 268 |
+
def test_array_assign_all(self):
|
| 269 |
+
arr = np.arange(5 * 7).reshape(5, 7)
|
| 270 |
+
darr = cuda.to_device(arr)
|
| 271 |
+
_400 = np.full(shape=(5, 7), fill_value=400)
|
| 272 |
+
arr[:] = _400
|
| 273 |
+
darr[:] = _400
|
| 274 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 275 |
+
|
| 276 |
+
def test_strides(self):
|
| 277 |
+
arr = np.ones(20)
|
| 278 |
+
darr = cuda.to_device(arr)
|
| 279 |
+
arr[::2] = 500
|
| 280 |
+
darr[::2] = 500
|
| 281 |
+
np.testing.assert_array_equal(darr.copy_to_host(), arr)
|
| 282 |
+
|
| 283 |
+
def test_incompatible_highdim(self):
|
| 284 |
+
darr = cuda.to_device(np.arange(5 * 7))
|
| 285 |
+
|
| 286 |
+
with self.assertRaises(ValueError) as e:
|
| 287 |
+
darr[:] = np.ones(shape=(1, 2, 3))
|
| 288 |
+
|
| 289 |
+
self.assertIn(
|
| 290 |
+
member=str(e.exception),
|
| 291 |
+
container=[
|
| 292 |
+
"Can't assign 3-D array to 1-D self", # device
|
| 293 |
+
"could not broadcast input array from shape (2,3) "
|
| 294 |
+
"into shape (35,)", # simulator, NP >= 1.20
|
| 295 |
+
])
|
| 296 |
+
|
| 297 |
+
def test_incompatible_shape(self):
|
| 298 |
+
darr = cuda.to_device(np.arange(5))
|
| 299 |
+
|
| 300 |
+
with self.assertRaises(ValueError) as e:
|
| 301 |
+
darr[:] = [1, 3]
|
| 302 |
+
|
| 303 |
+
self.assertIn(
|
| 304 |
+
member=str(e.exception),
|
| 305 |
+
container=[
|
| 306 |
+
"Can't copy sequence with size 2 to array axis 0 with "
|
| 307 |
+
"dimension 5", # device
|
| 308 |
+
"could not broadcast input array from shape (2,) into "
|
| 309 |
+
"shape (5,)", # simulator, NP >= 1.20
|
| 310 |
+
])
|
| 311 |
+
|
| 312 |
+
@skip_on_cudasim('cudasim does not use streams and operates synchronously')
|
| 313 |
+
def test_sync(self):
|
| 314 |
+
# There should be a synchronization when no stream is supplied
|
| 315 |
+
darr = cuda.to_device(np.arange(5))
|
| 316 |
+
|
| 317 |
+
with patch.object(cuda.cudadrv.driver.Stream, 'synchronize',
|
| 318 |
+
return_value=None) as mock_sync:
|
| 319 |
+
darr[0] = 10
|
| 320 |
+
|
| 321 |
+
mock_sync.assert_called_once()
|
| 322 |
+
|
| 323 |
+
@skip_on_cudasim('cudasim does not use streams and operates synchronously')
|
| 324 |
+
def test_no_sync_default_stream(self):
|
| 325 |
+
# There should not be a synchronization when the array has a default
|
| 326 |
+
# stream, whether it is the default stream, the legacy default stream,
|
| 327 |
+
# the per-thread default stream, or another stream.
|
| 328 |
+
streams = (cuda.stream(), cuda.default_stream(),
|
| 329 |
+
cuda.legacy_default_stream(),
|
| 330 |
+
cuda.per_thread_default_stream())
|
| 331 |
+
|
| 332 |
+
for stream in streams:
|
| 333 |
+
darr = cuda.to_device(np.arange(5), stream=stream)
|
| 334 |
+
|
| 335 |
+
with patch.object(cuda.cudadrv.driver.Stream, 'synchronize',
|
| 336 |
+
return_value=None) as mock_sync:
|
| 337 |
+
darr[0] = 10
|
| 338 |
+
|
| 339 |
+
mock_sync.assert_not_called()
|
| 340 |
+
|
| 341 |
+
@skip_on_cudasim('cudasim does not use streams and operates synchronously')
|
| 342 |
+
def test_no_sync_supplied_stream(self):
|
| 343 |
+
# There should not be a synchronization when a stream is supplied for
|
| 344 |
+
# the setitem call, whether it is the default stream, the legacy default
|
| 345 |
+
# stream, the per-thread default stream, or another stream.
|
| 346 |
+
streams = (cuda.stream(), cuda.default_stream(),
|
| 347 |
+
cuda.legacy_default_stream(),
|
| 348 |
+
cuda.per_thread_default_stream())
|
| 349 |
+
|
| 350 |
+
for stream in streams:
|
| 351 |
+
darr = cuda.to_device(np.arange(5))
|
| 352 |
+
|
| 353 |
+
with patch.object(cuda.cudadrv.driver.Stream, 'synchronize',
|
| 354 |
+
return_value=None) as mock_sync:
|
| 355 |
+
darr.setitem(0, 10, stream=stream)
|
| 356 |
+
|
| 357 |
+
mock_sync.assert_not_called()
|
| 358 |
+
|
| 359 |
+
@unittest.skip('Requires PR #6367')
|
| 360 |
+
def test_issue_6505(self):
|
| 361 |
+
# On Windows, the writes to ary_v would not be visible prior to the
|
| 362 |
+
# assertion, due to the assignment being done with a kernel launch that
|
| 363 |
+
# returns asynchronously - there should now be a sync after the kernel
|
| 364 |
+
# launch to ensure that the writes are always visible.
|
| 365 |
+
ary = cuda.mapped_array(2, dtype=np.int32)
|
| 366 |
+
ary[:] = 0
|
| 367 |
+
|
| 368 |
+
ary_v = ary.view('u1')
|
| 369 |
+
ary_v[1] = 1
|
| 370 |
+
ary_v[5] = 1
|
| 371 |
+
self.assertEqual(sum(ary), 512)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
if __name__ == '__main__':
|
| 375 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_auto_context.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba import cuda
|
| 3 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestCudaAutoContext(CUDATestCase):
|
| 7 |
+
def test_auto_context(self):
|
| 8 |
+
"""A problem was revealed by a customer that the use cuda.to_device
|
| 9 |
+
does not create a CUDA context.
|
| 10 |
+
This tests the problem
|
| 11 |
+
"""
|
| 12 |
+
A = np.arange(10, dtype=np.float32)
|
| 13 |
+
newA = np.empty_like(A)
|
| 14 |
+
dA = cuda.to_device(A)
|
| 15 |
+
|
| 16 |
+
dA.copy_to_host(newA)
|
| 17 |
+
self.assertTrue(np.allclose(A, newA))
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if __name__ == '__main__':
|
| 21 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import ctypes
|
| 3 |
+
from numba.cuda.cudadrv.devicearray import (DeviceRecord, from_record_like,
|
| 4 |
+
auto_device)
|
| 5 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 6 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 7 |
+
from numba.np import numpy_support
|
| 8 |
+
from numba import cuda
|
| 9 |
+
|
| 10 |
+
N_CHARS = 5
|
| 11 |
+
|
| 12 |
+
recordtype = np.dtype(
|
| 13 |
+
[
|
| 14 |
+
('a', np.float64),
|
| 15 |
+
('b', np.int32),
|
| 16 |
+
('c', np.complex64),
|
| 17 |
+
('d', (np.str_, N_CHARS))
|
| 18 |
+
],
|
| 19 |
+
align=True
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
recordwitharray = np.dtype(
|
| 23 |
+
[
|
| 24 |
+
('g', np.int32),
|
| 25 |
+
('h', np.float32, 2)
|
| 26 |
+
],
|
| 27 |
+
align=True
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
recwithmat = np.dtype([('i', np.int32),
|
| 31 |
+
('j', np.float32, (3, 3))])
|
| 32 |
+
|
| 33 |
+
recwithrecwithmat = np.dtype([('x', np.int32), ('y', recwithmat)])
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@skip_on_cudasim('Device Record API unsupported in the simulator')
|
| 37 |
+
class TestCudaDeviceRecord(CUDATestCase):
|
| 38 |
+
"""
|
| 39 |
+
Tests the DeviceRecord class with np.void host types.
|
| 40 |
+
"""
|
| 41 |
+
def setUp(self):
|
| 42 |
+
super().setUp()
|
| 43 |
+
self._create_data(np.zeros)
|
| 44 |
+
|
| 45 |
+
def _create_data(self, array_ctor):
|
| 46 |
+
self.dtype = np.dtype([('a', np.int32), ('b', np.float32)], align=True)
|
| 47 |
+
self.hostz = array_ctor(1, self.dtype)[0]
|
| 48 |
+
self.hostnz = array_ctor(1, self.dtype)[0]
|
| 49 |
+
self.hostnz['a'] = 10
|
| 50 |
+
self.hostnz['b'] = 11.0
|
| 51 |
+
|
| 52 |
+
def _check_device_record(self, reference, rec):
|
| 53 |
+
self.assertEqual(rec.shape, tuple())
|
| 54 |
+
self.assertEqual(rec.strides, tuple())
|
| 55 |
+
self.assertEqual(rec.dtype, reference.dtype)
|
| 56 |
+
self.assertEqual(rec.alloc_size, reference.dtype.itemsize)
|
| 57 |
+
self.assertIsNotNone(rec.gpu_data)
|
| 58 |
+
self.assertNotEqual(rec.device_ctypes_pointer, ctypes.c_void_p(0))
|
| 59 |
+
|
| 60 |
+
numba_type = numpy_support.from_dtype(reference.dtype)
|
| 61 |
+
self.assertEqual(rec._numba_type_, numba_type)
|
| 62 |
+
|
| 63 |
+
def test_device_record_interface(self):
|
| 64 |
+
hostrec = self.hostz.copy()
|
| 65 |
+
devrec = DeviceRecord(self.dtype)
|
| 66 |
+
self._check_device_record(hostrec, devrec)
|
| 67 |
+
|
| 68 |
+
def test_device_record_copy(self):
|
| 69 |
+
hostrec = self.hostz.copy()
|
| 70 |
+
devrec = DeviceRecord(self.dtype)
|
| 71 |
+
devrec.copy_to_device(hostrec)
|
| 72 |
+
|
| 73 |
+
# Copy back and check values are all zeros
|
| 74 |
+
hostrec2 = self.hostnz.copy()
|
| 75 |
+
devrec.copy_to_host(hostrec2)
|
| 76 |
+
np.testing.assert_equal(self.hostz, hostrec2)
|
| 77 |
+
|
| 78 |
+
# Copy non-zero values to GPU and back and check values
|
| 79 |
+
hostrec3 = self.hostnz.copy()
|
| 80 |
+
devrec.copy_to_device(hostrec3)
|
| 81 |
+
|
| 82 |
+
hostrec4 = self.hostz.copy()
|
| 83 |
+
devrec.copy_to_host(hostrec4)
|
| 84 |
+
np.testing.assert_equal(hostrec4, self.hostnz)
|
| 85 |
+
|
| 86 |
+
def test_from_record_like(self):
|
| 87 |
+
# Create record from host record
|
| 88 |
+
hostrec = self.hostz.copy()
|
| 89 |
+
devrec = from_record_like(hostrec)
|
| 90 |
+
self._check_device_record(hostrec, devrec)
|
| 91 |
+
|
| 92 |
+
# Create record from device record and check for distinct data
|
| 93 |
+
devrec2 = from_record_like(devrec)
|
| 94 |
+
self._check_device_record(devrec, devrec2)
|
| 95 |
+
self.assertNotEqual(devrec.gpu_data, devrec2.gpu_data)
|
| 96 |
+
|
| 97 |
+
def test_auto_device(self):
|
| 98 |
+
# Create record from host record
|
| 99 |
+
hostrec = self.hostnz.copy()
|
| 100 |
+
devrec, new_gpu_obj = auto_device(hostrec)
|
| 101 |
+
self._check_device_record(hostrec, devrec)
|
| 102 |
+
self.assertTrue(new_gpu_obj)
|
| 103 |
+
|
| 104 |
+
# Copy data back and check it is equal to auto_device arg
|
| 105 |
+
hostrec2 = self.hostz.copy()
|
| 106 |
+
devrec.copy_to_host(hostrec2)
|
| 107 |
+
np.testing.assert_equal(hostrec2, hostrec)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class TestCudaDeviceRecordWithRecord(TestCudaDeviceRecord):
|
| 111 |
+
"""
|
| 112 |
+
Tests the DeviceRecord class with np.record host types
|
| 113 |
+
"""
|
| 114 |
+
def setUp(self):
|
| 115 |
+
CUDATestCase.setUp(self)
|
| 116 |
+
self._create_data(np.recarray)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@skip_on_cudasim('Structured array attr access not supported in simulator')
|
| 120 |
+
class TestRecordDtypeWithStructArrays(CUDATestCase):
|
| 121 |
+
'''
|
| 122 |
+
Test operation of device arrays on structured arrays.
|
| 123 |
+
'''
|
| 124 |
+
|
| 125 |
+
def _createSampleArrays(self):
|
| 126 |
+
self.sample1d = cuda.device_array(3, dtype=recordtype)
|
| 127 |
+
self.samplerec1darr = cuda.device_array(1, dtype=recordwitharray)[0]
|
| 128 |
+
self.samplerecmat = cuda.device_array(1,dtype=recwithmat)[0]
|
| 129 |
+
|
| 130 |
+
def setUp(self):
|
| 131 |
+
super().setUp()
|
| 132 |
+
self._createSampleArrays()
|
| 133 |
+
|
| 134 |
+
ary = self.sample1d
|
| 135 |
+
for i in range(ary.size):
|
| 136 |
+
x = i + 1
|
| 137 |
+
ary[i]['a'] = x / 2
|
| 138 |
+
ary[i]['b'] = x
|
| 139 |
+
ary[i]['c'] = x * 1j
|
| 140 |
+
ary[i]['d'] = str(x) * N_CHARS
|
| 141 |
+
|
| 142 |
+
def test_structured_array1(self):
|
| 143 |
+
ary = self.sample1d
|
| 144 |
+
for i in range(self.sample1d.size):
|
| 145 |
+
x = i + 1
|
| 146 |
+
self.assertEqual(ary[i]['a'], x / 2)
|
| 147 |
+
self.assertEqual(ary[i]['b'], x)
|
| 148 |
+
self.assertEqual(ary[i]['c'], x * 1j)
|
| 149 |
+
self.assertEqual(ary[i]['d'], str(x) * N_CHARS)
|
| 150 |
+
|
| 151 |
+
def test_structured_array2(self):
|
| 152 |
+
ary = self.samplerec1darr
|
| 153 |
+
ary['g'] = 2
|
| 154 |
+
ary['h'][0] = 3.0
|
| 155 |
+
ary['h'][1] = 4.0
|
| 156 |
+
self.assertEqual(ary['g'], 2)
|
| 157 |
+
self.assertEqual(ary['h'][0], 3.0)
|
| 158 |
+
self.assertEqual(ary['h'][1], 4.0)
|
| 159 |
+
|
| 160 |
+
def test_structured_array3(self):
|
| 161 |
+
ary = self.samplerecmat
|
| 162 |
+
mat = np.array([[5.0, 10.0, 15.0],
|
| 163 |
+
[20.0, 25.0, 30.0],
|
| 164 |
+
[35.0, 40.0, 45.0]],
|
| 165 |
+
dtype=np.float32).reshape(3,3)
|
| 166 |
+
ary['j'][:] = mat
|
| 167 |
+
np.testing.assert_equal(ary['j'], mat)
|
| 168 |
+
|
| 169 |
+
def test_structured_array4(self):
|
| 170 |
+
arr = np.zeros(1, dtype=recwithrecwithmat)
|
| 171 |
+
d_arr = cuda.to_device(arr)
|
| 172 |
+
d_arr[0]['y']['i'] = 1
|
| 173 |
+
self.assertEqual(d_arr[0]['y']['i'], 1)
|
| 174 |
+
d_arr[0]['y']['j'][0, 0] = 2.0
|
| 175 |
+
self.assertEqual(d_arr[0]['y']['j'][0, 0], 2.0)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
if __name__ == '__main__':
|
| 179 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_driver.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ctypes import byref, c_int, c_void_p, sizeof
|
| 2 |
+
|
| 3 |
+
from numba.cuda.cudadrv.driver import (host_to_device, device_to_host, driver,
|
| 4 |
+
launch_kernel)
|
| 5 |
+
from numba.cuda.cudadrv import devices, drvapi, driver as _driver
|
| 6 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 7 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
ptx1 = '''
|
| 11 |
+
.version 1.4
|
| 12 |
+
.target sm_10, map_f64_to_f32
|
| 13 |
+
|
| 14 |
+
.entry _Z10helloworldPi (
|
| 15 |
+
.param .u64 __cudaparm__Z10helloworldPi_A)
|
| 16 |
+
{
|
| 17 |
+
.reg .u32 %r<3>;
|
| 18 |
+
.reg .u64 %rd<6>;
|
| 19 |
+
.loc 14 4 0
|
| 20 |
+
$LDWbegin__Z10helloworldPi:
|
| 21 |
+
.loc 14 6 0
|
| 22 |
+
cvt.s32.u16 %r1, %tid.x;
|
| 23 |
+
ld.param.u64 %rd1, [__cudaparm__Z10helloworldPi_A];
|
| 24 |
+
cvt.u64.u16 %rd2, %tid.x;
|
| 25 |
+
mul.lo.u64 %rd3, %rd2, 4;
|
| 26 |
+
add.u64 %rd4, %rd1, %rd3;
|
| 27 |
+
st.global.s32 [%rd4+0], %r1;
|
| 28 |
+
.loc 14 7 0
|
| 29 |
+
exit;
|
| 30 |
+
$LDWend__Z10helloworldPi:
|
| 31 |
+
} // _Z10helloworldPi
|
| 32 |
+
'''
|
| 33 |
+
|
| 34 |
+
ptx2 = '''
|
| 35 |
+
.version 3.0
|
| 36 |
+
.target sm_20
|
| 37 |
+
.address_size 64
|
| 38 |
+
|
| 39 |
+
.file 1 "/tmp/tmpxft_000012c7_00000000-9_testcuda.cpp3.i"
|
| 40 |
+
.file 2 "testcuda.cu"
|
| 41 |
+
|
| 42 |
+
.entry _Z10helloworldPi(
|
| 43 |
+
.param .u64 _Z10helloworldPi_param_0
|
| 44 |
+
)
|
| 45 |
+
{
|
| 46 |
+
.reg .s32 %r<3>;
|
| 47 |
+
.reg .s64 %rl<5>;
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
ld.param.u64 %rl1, [_Z10helloworldPi_param_0];
|
| 51 |
+
cvta.to.global.u64 %rl2, %rl1;
|
| 52 |
+
.loc 2 6 1
|
| 53 |
+
mov.u32 %r1, %tid.x;
|
| 54 |
+
mul.wide.u32 %rl3, %r1, 4;
|
| 55 |
+
add.s64 %rl4, %rl2, %rl3;
|
| 56 |
+
st.global.u32 [%rl4], %r1;
|
| 57 |
+
.loc 2 7 2
|
| 58 |
+
ret;
|
| 59 |
+
}
|
| 60 |
+
'''
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@skip_on_cudasim('CUDA Driver API unsupported in the simulator')
|
| 64 |
+
class TestCudaDriver(CUDATestCase):
|
| 65 |
+
def setUp(self):
|
| 66 |
+
super().setUp()
|
| 67 |
+
self.assertTrue(len(devices.gpus) > 0)
|
| 68 |
+
self.context = devices.get_context()
|
| 69 |
+
device = self.context.device
|
| 70 |
+
ccmajor, _ = device.compute_capability
|
| 71 |
+
if ccmajor >= 2:
|
| 72 |
+
self.ptx = ptx2
|
| 73 |
+
else:
|
| 74 |
+
self.ptx = ptx1
|
| 75 |
+
|
| 76 |
+
def tearDown(self):
|
| 77 |
+
super().tearDown()
|
| 78 |
+
del self.context
|
| 79 |
+
|
| 80 |
+
def test_cuda_driver_basic(self):
|
| 81 |
+
module = self.context.create_module_ptx(self.ptx)
|
| 82 |
+
function = module.get_function('_Z10helloworldPi')
|
| 83 |
+
|
| 84 |
+
array = (c_int * 100)()
|
| 85 |
+
|
| 86 |
+
memory = self.context.memalloc(sizeof(array))
|
| 87 |
+
host_to_device(memory, array, sizeof(array))
|
| 88 |
+
|
| 89 |
+
ptr = memory.device_ctypes_pointer
|
| 90 |
+
stream = 0
|
| 91 |
+
|
| 92 |
+
if _driver.USE_NV_BINDING:
|
| 93 |
+
ptr = c_void_p(int(ptr))
|
| 94 |
+
stream = _driver.binding.CUstream(stream)
|
| 95 |
+
|
| 96 |
+
launch_kernel(function.handle, # Kernel
|
| 97 |
+
1, 1, 1, # gx, gy, gz
|
| 98 |
+
100, 1, 1, # bx, by, bz
|
| 99 |
+
0, # dynamic shared mem
|
| 100 |
+
stream, # stream
|
| 101 |
+
[ptr]) # arguments
|
| 102 |
+
|
| 103 |
+
device_to_host(array, memory, sizeof(array))
|
| 104 |
+
for i, v in enumerate(array):
|
| 105 |
+
self.assertEqual(i, v)
|
| 106 |
+
|
| 107 |
+
module.unload()
|
| 108 |
+
|
| 109 |
+
def test_cuda_driver_stream_operations(self):
|
| 110 |
+
module = self.context.create_module_ptx(self.ptx)
|
| 111 |
+
function = module.get_function('_Z10helloworldPi')
|
| 112 |
+
|
| 113 |
+
array = (c_int * 100)()
|
| 114 |
+
|
| 115 |
+
stream = self.context.create_stream()
|
| 116 |
+
|
| 117 |
+
with stream.auto_synchronize():
|
| 118 |
+
memory = self.context.memalloc(sizeof(array))
|
| 119 |
+
host_to_device(memory, array, sizeof(array), stream=stream)
|
| 120 |
+
|
| 121 |
+
ptr = memory.device_ctypes_pointer
|
| 122 |
+
if _driver.USE_NV_BINDING:
|
| 123 |
+
ptr = c_void_p(int(ptr))
|
| 124 |
+
|
| 125 |
+
launch_kernel(function.handle, # Kernel
|
| 126 |
+
1, 1, 1, # gx, gy, gz
|
| 127 |
+
100, 1, 1, # bx, by, bz
|
| 128 |
+
0, # dynamic shared mem
|
| 129 |
+
stream.handle, # stream
|
| 130 |
+
[ptr]) # arguments
|
| 131 |
+
|
| 132 |
+
device_to_host(array, memory, sizeof(array), stream=stream)
|
| 133 |
+
|
| 134 |
+
for i, v in enumerate(array):
|
| 135 |
+
self.assertEqual(i, v)
|
| 136 |
+
|
| 137 |
+
def test_cuda_driver_default_stream(self):
|
| 138 |
+
# Test properties of the default stream
|
| 139 |
+
ds = self.context.get_default_stream()
|
| 140 |
+
self.assertIn("Default CUDA stream", repr(ds))
|
| 141 |
+
self.assertEqual(0, int(ds))
|
| 142 |
+
# bool(stream) is the check that is done in memcpy to decide if async
|
| 143 |
+
# version should be used. So the default (0) stream should be true-ish
|
| 144 |
+
# even though 0 is usually false-ish in Python.
|
| 145 |
+
self.assertTrue(ds)
|
| 146 |
+
self.assertFalse(ds.external)
|
| 147 |
+
|
| 148 |
+
def test_cuda_driver_legacy_default_stream(self):
|
| 149 |
+
# Test properties of the legacy default stream
|
| 150 |
+
ds = self.context.get_legacy_default_stream()
|
| 151 |
+
self.assertIn("Legacy default CUDA stream", repr(ds))
|
| 152 |
+
self.assertEqual(1, int(ds))
|
| 153 |
+
self.assertTrue(ds)
|
| 154 |
+
self.assertFalse(ds.external)
|
| 155 |
+
|
| 156 |
+
def test_cuda_driver_per_thread_default_stream(self):
|
| 157 |
+
# Test properties of the per-thread default stream
|
| 158 |
+
ds = self.context.get_per_thread_default_stream()
|
| 159 |
+
self.assertIn("Per-thread default CUDA stream", repr(ds))
|
| 160 |
+
self.assertEqual(2, int(ds))
|
| 161 |
+
self.assertTrue(ds)
|
| 162 |
+
self.assertFalse(ds.external)
|
| 163 |
+
|
| 164 |
+
def test_cuda_driver_stream(self):
|
| 165 |
+
# Test properties of non-default streams
|
| 166 |
+
s = self.context.create_stream()
|
| 167 |
+
self.assertIn("CUDA stream", repr(s))
|
| 168 |
+
self.assertNotIn("Default", repr(s))
|
| 169 |
+
self.assertNotIn("External", repr(s))
|
| 170 |
+
self.assertNotEqual(0, int(s))
|
| 171 |
+
self.assertTrue(s)
|
| 172 |
+
self.assertFalse(s.external)
|
| 173 |
+
|
| 174 |
+
def test_cuda_driver_external_stream(self):
|
| 175 |
+
# Test properties of a stream created from an external stream object.
|
| 176 |
+
# We use the driver API directly to create a stream, to emulate an
|
| 177 |
+
# external library creating a stream
|
| 178 |
+
if _driver.USE_NV_BINDING:
|
| 179 |
+
handle = driver.cuStreamCreate(0)
|
| 180 |
+
ptr = int(handle)
|
| 181 |
+
else:
|
| 182 |
+
handle = drvapi.cu_stream()
|
| 183 |
+
driver.cuStreamCreate(byref(handle), 0)
|
| 184 |
+
ptr = handle.value
|
| 185 |
+
s = self.context.create_external_stream(ptr)
|
| 186 |
+
|
| 187 |
+
self.assertIn("External CUDA stream", repr(s))
|
| 188 |
+
# Ensure neither "Default" nor "default"
|
| 189 |
+
self.assertNotIn("efault", repr(s))
|
| 190 |
+
self.assertEqual(ptr, int(s))
|
| 191 |
+
self.assertTrue(s)
|
| 192 |
+
self.assertTrue(s.external)
|
| 193 |
+
|
| 194 |
+
def test_cuda_driver_occupancy(self):
|
| 195 |
+
module = self.context.create_module_ptx(self.ptx)
|
| 196 |
+
function = module.get_function('_Z10helloworldPi')
|
| 197 |
+
|
| 198 |
+
value = self.context.get_active_blocks_per_multiprocessor(function,
|
| 199 |
+
128, 128)
|
| 200 |
+
self.assertTrue(value > 0)
|
| 201 |
+
|
| 202 |
+
def b2d(bs):
|
| 203 |
+
return bs
|
| 204 |
+
|
| 205 |
+
grid, block = self.context.get_max_potential_block_size(function, b2d,
|
| 206 |
+
128, 128)
|
| 207 |
+
self.assertTrue(grid > 0)
|
| 208 |
+
self.assertTrue(block > 0)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class TestDevice(CUDATestCase):
|
| 212 |
+
def test_device_get_uuid(self):
|
| 213 |
+
# A device UUID looks like:
|
| 214 |
+
#
|
| 215 |
+
# GPU-e6489c45-5b68-3b03-bab7-0e7c8e809643
|
| 216 |
+
#
|
| 217 |
+
# To test, we construct an RE that matches this form and verify that
|
| 218 |
+
# the returned UUID matches.
|
| 219 |
+
#
|
| 220 |
+
# Device UUIDs may not conform to parts of the UUID specification (RFC
|
| 221 |
+
# 4122) pertaining to versions and variants, so we do not extract and
|
| 222 |
+
# validate the values of these bits.
|
| 223 |
+
|
| 224 |
+
h = '[0-9a-f]{%d}'
|
| 225 |
+
h4 = h % 4
|
| 226 |
+
h8 = h % 8
|
| 227 |
+
h12 = h % 12
|
| 228 |
+
uuid_format = f'^GPU-{h8}-{h4}-{h4}-{h4}-{h12}$'
|
| 229 |
+
|
| 230 |
+
dev = devices.get_context().device
|
| 231 |
+
self.assertRegex(dev.uuid, uuid_format)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
if __name__ == '__main__':
|
| 235 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_libraries.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.testing import unittest
|
| 2 |
+
from numba.cuda.testing import skip_on_cudasim, skip_unless_conda_cudatoolkit
|
| 3 |
+
from numba.misc.findlib import find_lib
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@skip_on_cudasim('Library detection unsupported in the simulator')
|
| 7 |
+
@skip_unless_conda_cudatoolkit
|
| 8 |
+
class TestLibraryDetection(unittest.TestCase):
|
| 9 |
+
def test_detect(self):
|
| 10 |
+
"""
|
| 11 |
+
This test is solely present to ensure that shipped cudatoolkits have
|
| 12 |
+
additional core libraries in locations that Numba scans by default.
|
| 13 |
+
PyCulib (and potentially others) rely on Numba's library finding
|
| 14 |
+
capacity to find and subsequently load these libraries.
|
| 15 |
+
"""
|
| 16 |
+
core_libs = ['nvvm']
|
| 17 |
+
for l in core_libs:
|
| 18 |
+
self.assertNotEqual(find_lib(l), [])
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
if __name__ == '__main__':
|
| 22 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_memory.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from numba.cuda.cudadrv import driver, drvapi, devices
|
| 6 |
+
from numba.cuda.testing import unittest, ContextResettingTestCase
|
| 7 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@skip_on_cudasim('CUDA Memory API unsupported in the simulator')
|
| 11 |
+
class TestCudaMemory(ContextResettingTestCase):
|
| 12 |
+
def setUp(self):
|
| 13 |
+
super().setUp()
|
| 14 |
+
self.context = devices.get_context()
|
| 15 |
+
|
| 16 |
+
def tearDown(self):
|
| 17 |
+
del self.context
|
| 18 |
+
super(TestCudaMemory, self).tearDown()
|
| 19 |
+
|
| 20 |
+
def _template(self, obj):
|
| 21 |
+
self.assertTrue(driver.is_device_memory(obj))
|
| 22 |
+
driver.require_device_memory(obj)
|
| 23 |
+
if driver.USE_NV_BINDING:
|
| 24 |
+
expected_class = driver.binding.CUdeviceptr
|
| 25 |
+
else:
|
| 26 |
+
expected_class = drvapi.cu_device_ptr
|
| 27 |
+
self.assertTrue(isinstance(obj.device_ctypes_pointer,
|
| 28 |
+
expected_class))
|
| 29 |
+
|
| 30 |
+
def test_device_memory(self):
|
| 31 |
+
devmem = self.context.memalloc(1024)
|
| 32 |
+
self._template(devmem)
|
| 33 |
+
|
| 34 |
+
def test_device_view(self):
|
| 35 |
+
devmem = self.context.memalloc(1024)
|
| 36 |
+
self._template(devmem.view(10))
|
| 37 |
+
|
| 38 |
+
def test_host_alloc(self):
|
| 39 |
+
devmem = self.context.memhostalloc(1024, mapped=True)
|
| 40 |
+
self._template(devmem)
|
| 41 |
+
|
| 42 |
+
def test_pinned_memory(self):
|
| 43 |
+
ary = np.arange(10)
|
| 44 |
+
devmem = self.context.mempin(ary, ary.ctypes.data,
|
| 45 |
+
ary.size * ary.dtype.itemsize,
|
| 46 |
+
mapped=True)
|
| 47 |
+
self._template(devmem)
|
| 48 |
+
|
| 49 |
+
def test_managed_memory(self):
|
| 50 |
+
devmem = self.context.memallocmanaged(1024)
|
| 51 |
+
self._template(devmem)
|
| 52 |
+
|
| 53 |
+
def test_derived_pointer(self):
|
| 54 |
+
# Use MemoryPointer.view to create derived pointer
|
| 55 |
+
|
| 56 |
+
def handle_val(mem):
|
| 57 |
+
if driver.USE_NV_BINDING:
|
| 58 |
+
return int(mem.handle)
|
| 59 |
+
else:
|
| 60 |
+
return mem.handle.value
|
| 61 |
+
|
| 62 |
+
def check(m, offset):
|
| 63 |
+
# create view
|
| 64 |
+
v1 = m.view(offset)
|
| 65 |
+
self.assertEqual(handle_val(v1.owner), handle_val(m))
|
| 66 |
+
self.assertEqual(m.refct, 2)
|
| 67 |
+
self.assertEqual(handle_val(v1) - offset, handle_val(v1.owner))
|
| 68 |
+
# create a view
|
| 69 |
+
v2 = v1.view(offset)
|
| 70 |
+
self.assertEqual(handle_val(v2.owner), handle_val(m))
|
| 71 |
+
self.assertEqual(handle_val(v2.owner), handle_val(m))
|
| 72 |
+
self.assertEqual(handle_val(v2) - offset * 2,
|
| 73 |
+
handle_val(v2.owner))
|
| 74 |
+
self.assertEqual(m.refct, 3)
|
| 75 |
+
del v2
|
| 76 |
+
self.assertEqual(m.refct, 2)
|
| 77 |
+
del v1
|
| 78 |
+
self.assertEqual(m.refct, 1)
|
| 79 |
+
|
| 80 |
+
m = self.context.memalloc(1024)
|
| 81 |
+
check(m=m, offset=0)
|
| 82 |
+
check(m=m, offset=1)
|
| 83 |
+
|
| 84 |
+
def test_user_extension(self):
|
| 85 |
+
# User can use MemoryPointer to wrap externally defined pointers.
|
| 86 |
+
# This test checks if the finalizer is invokded at correct time
|
| 87 |
+
fake_ptr = ctypes.c_void_p(0xdeadbeef)
|
| 88 |
+
dtor_invoked = [0]
|
| 89 |
+
|
| 90 |
+
def dtor():
|
| 91 |
+
dtor_invoked[0] += 1
|
| 92 |
+
|
| 93 |
+
# Ensure finalizer is called when pointer is deleted
|
| 94 |
+
ptr = driver.MemoryPointer(context=self.context, pointer=fake_ptr,
|
| 95 |
+
size=40, finalizer=dtor)
|
| 96 |
+
self.assertEqual(dtor_invoked[0], 0)
|
| 97 |
+
del ptr
|
| 98 |
+
self.assertEqual(dtor_invoked[0], 1)
|
| 99 |
+
|
| 100 |
+
# Ensure removing derived pointer doesn't call finalizer
|
| 101 |
+
ptr = driver.MemoryPointer(context=self.context, pointer=fake_ptr,
|
| 102 |
+
size=40, finalizer=dtor)
|
| 103 |
+
owned = ptr.own()
|
| 104 |
+
del owned
|
| 105 |
+
self.assertEqual(dtor_invoked[0], 1)
|
| 106 |
+
del ptr
|
| 107 |
+
self.assertEqual(dtor_invoked[0], 2)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class TestCudaMemoryFunctions(ContextResettingTestCase):
|
| 111 |
+
def setUp(self):
|
| 112 |
+
super().setUp()
|
| 113 |
+
self.context = devices.get_context()
|
| 114 |
+
|
| 115 |
+
def tearDown(self):
|
| 116 |
+
del self.context
|
| 117 |
+
super(TestCudaMemoryFunctions, self).tearDown()
|
| 118 |
+
|
| 119 |
+
def test_memcpy(self):
|
| 120 |
+
hstary = np.arange(100, dtype=np.uint32)
|
| 121 |
+
hstary2 = np.arange(100, dtype=np.uint32)
|
| 122 |
+
sz = hstary.size * hstary.dtype.itemsize
|
| 123 |
+
devary = self.context.memalloc(sz)
|
| 124 |
+
|
| 125 |
+
driver.host_to_device(devary, hstary, sz)
|
| 126 |
+
driver.device_to_host(hstary2, devary, sz)
|
| 127 |
+
|
| 128 |
+
self.assertTrue(np.all(hstary == hstary2))
|
| 129 |
+
|
| 130 |
+
def test_memset(self):
|
| 131 |
+
dtype = np.dtype('uint32')
|
| 132 |
+
n = 10
|
| 133 |
+
sz = dtype.itemsize * 10
|
| 134 |
+
devary = self.context.memalloc(sz)
|
| 135 |
+
driver.device_memset(devary, 0xab, sz)
|
| 136 |
+
|
| 137 |
+
hstary = np.empty(n, dtype=dtype)
|
| 138 |
+
driver.device_to_host(hstary, devary, sz)
|
| 139 |
+
|
| 140 |
+
hstary2 = np.array([0xabababab] * n, dtype=np.dtype('uint32'))
|
| 141 |
+
self.assertTrue(np.all(hstary == hstary2))
|
| 142 |
+
|
| 143 |
+
def test_d2d(self):
|
| 144 |
+
hst = np.arange(100, dtype=np.uint32)
|
| 145 |
+
hst2 = np.empty_like(hst)
|
| 146 |
+
sz = hst.size * hst.dtype.itemsize
|
| 147 |
+
dev1 = self.context.memalloc(sz)
|
| 148 |
+
dev2 = self.context.memalloc(sz)
|
| 149 |
+
driver.host_to_device(dev1, hst, sz)
|
| 150 |
+
driver.device_to_device(dev2, dev1, sz)
|
| 151 |
+
driver.device_to_host(hst2, dev2, sz)
|
| 152 |
+
self.assertTrue(np.all(hst == hst2))
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@skip_on_cudasim('CUDA Memory API unsupported in the simulator')
|
| 156 |
+
class TestMVExtent(ContextResettingTestCase):
|
| 157 |
+
def test_c_contiguous_array(self):
|
| 158 |
+
ary = np.arange(100)
|
| 159 |
+
arysz = ary.dtype.itemsize * ary.size
|
| 160 |
+
s, e = driver.host_memory_extents(ary)
|
| 161 |
+
self.assertTrue(ary.ctypes.data == s)
|
| 162 |
+
self.assertTrue(arysz == driver.host_memory_size(ary))
|
| 163 |
+
|
| 164 |
+
def test_f_contiguous_array(self):
|
| 165 |
+
ary = np.asfortranarray(np.arange(100).reshape(2, 50))
|
| 166 |
+
arysz = ary.dtype.itemsize * np.prod(ary.shape)
|
| 167 |
+
s, e = driver.host_memory_extents(ary)
|
| 168 |
+
self.assertTrue(ary.ctypes.data == s)
|
| 169 |
+
self.assertTrue(arysz == driver.host_memory_size(ary))
|
| 170 |
+
|
| 171 |
+
def test_single_element_array(self):
|
| 172 |
+
ary = np.asarray(np.uint32(1234))
|
| 173 |
+
arysz = ary.dtype.itemsize
|
| 174 |
+
s, e = driver.host_memory_extents(ary)
|
| 175 |
+
self.assertTrue(ary.ctypes.data == s)
|
| 176 |
+
self.assertTrue(arysz == driver.host_memory_size(ary))
|
| 177 |
+
|
| 178 |
+
def test_ctypes_struct(self):
|
| 179 |
+
class mystruct(ctypes.Structure):
|
| 180 |
+
_fields_ = [('x', ctypes.c_int), ('y', ctypes.c_int)]
|
| 181 |
+
|
| 182 |
+
data = mystruct(x=123, y=432)
|
| 183 |
+
sz = driver.host_memory_size(data)
|
| 184 |
+
self.assertTrue(ctypes.sizeof(data) == sz)
|
| 185 |
+
|
| 186 |
+
def test_ctypes_double(self):
|
| 187 |
+
data = ctypes.c_double(1.234)
|
| 188 |
+
sz = driver.host_memory_size(data)
|
| 189 |
+
self.assertTrue(ctypes.sizeof(data) == sz)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
if __name__ == '__main__':
|
| 193 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_ndarray.py
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numba.cuda.cudadrv import devicearray
|
| 4 |
+
from numba import cuda
|
| 5 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 6 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestCudaNDArray(CUDATestCase):
|
| 10 |
+
def test_device_array_interface(self):
|
| 11 |
+
dary = cuda.device_array(shape=100)
|
| 12 |
+
devicearray.verify_cuda_ndarray_interface(dary)
|
| 13 |
+
|
| 14 |
+
ary = np.empty(100)
|
| 15 |
+
dary = cuda.to_device(ary)
|
| 16 |
+
devicearray.verify_cuda_ndarray_interface(dary)
|
| 17 |
+
|
| 18 |
+
ary = np.asarray(1.234)
|
| 19 |
+
dary = cuda.to_device(ary)
|
| 20 |
+
self.assertEqual(dary.ndim, 0)
|
| 21 |
+
devicearray.verify_cuda_ndarray_interface(dary)
|
| 22 |
+
|
| 23 |
+
def test_device_array_from_readonly(self):
|
| 24 |
+
ary = np.arange(100, dtype=np.float32)
|
| 25 |
+
# Make the array readonly
|
| 26 |
+
ary.flags.writeable = False
|
| 27 |
+
self.assertFalse(ary.flags.writeable)
|
| 28 |
+
# Ensure that we can copy the readonly array
|
| 29 |
+
dary = cuda.to_device(ary)
|
| 30 |
+
retr = dary.copy_to_host()
|
| 31 |
+
np.testing.assert_array_equal(retr, ary)
|
| 32 |
+
|
| 33 |
+
def test_devicearray_dtype(self):
|
| 34 |
+
dary = cuda.device_array(shape=(100,), dtype="f4")
|
| 35 |
+
self.assertEqual(dary.dtype, np.dtype("f4"))
|
| 36 |
+
|
| 37 |
+
def test_devicearray_no_copy(self):
|
| 38 |
+
array = np.arange(100, dtype=np.float32)
|
| 39 |
+
cuda.to_device(array, copy=False)
|
| 40 |
+
|
| 41 |
+
def test_devicearray_shape(self):
|
| 42 |
+
ary = np.arange(2 * 3 * 4).reshape(2, 3, 4)
|
| 43 |
+
dary = cuda.to_device(ary)
|
| 44 |
+
self.assertEqual(ary.shape, dary.shape)
|
| 45 |
+
self.assertEqual(ary.shape[1:], dary.shape[1:])
|
| 46 |
+
|
| 47 |
+
def test_devicearray(self):
|
| 48 |
+
array = np.arange(100, dtype=np.int32)
|
| 49 |
+
original = array.copy()
|
| 50 |
+
gpumem = cuda.to_device(array)
|
| 51 |
+
array[:] = 0
|
| 52 |
+
gpumem.copy_to_host(array)
|
| 53 |
+
|
| 54 |
+
np.testing.assert_array_equal(array, original)
|
| 55 |
+
|
| 56 |
+
def test_stream_bind(self):
|
| 57 |
+
stream = cuda.stream()
|
| 58 |
+
with stream.auto_synchronize():
|
| 59 |
+
arr = cuda.device_array(
|
| 60 |
+
(3, 3),
|
| 61 |
+
dtype=np.float64,
|
| 62 |
+
stream=stream)
|
| 63 |
+
self.assertEqual(arr.bind(stream).stream, stream)
|
| 64 |
+
self.assertEqual(arr.stream, stream)
|
| 65 |
+
|
| 66 |
+
def test_len_1d(self):
|
| 67 |
+
ary = np.empty((3,))
|
| 68 |
+
dary = cuda.device_array(3)
|
| 69 |
+
self.assertEqual(len(ary), len(dary))
|
| 70 |
+
|
| 71 |
+
def test_len_2d(self):
|
| 72 |
+
ary = np.empty((3, 5))
|
| 73 |
+
dary = cuda.device_array((3, 5))
|
| 74 |
+
self.assertEqual(len(ary), len(dary))
|
| 75 |
+
|
| 76 |
+
def test_len_3d(self):
|
| 77 |
+
ary = np.empty((3, 5, 7))
|
| 78 |
+
dary = cuda.device_array((3, 5, 7))
|
| 79 |
+
self.assertEqual(len(ary), len(dary))
|
| 80 |
+
|
| 81 |
+
def test_devicearray_partition(self):
|
| 82 |
+
N = 100
|
| 83 |
+
array = np.arange(N, dtype=np.int32)
|
| 84 |
+
original = array.copy()
|
| 85 |
+
gpumem = cuda.to_device(array)
|
| 86 |
+
left, right = gpumem.split(N // 2)
|
| 87 |
+
|
| 88 |
+
array[:] = 0
|
| 89 |
+
|
| 90 |
+
self.assertTrue(np.all(array == 0))
|
| 91 |
+
|
| 92 |
+
right.copy_to_host(array[N // 2:])
|
| 93 |
+
left.copy_to_host(array[:N // 2])
|
| 94 |
+
|
| 95 |
+
self.assertTrue(np.all(array == original))
|
| 96 |
+
|
| 97 |
+
def test_devicearray_replace(self):
|
| 98 |
+
N = 100
|
| 99 |
+
array = np.arange(N, dtype=np.int32)
|
| 100 |
+
original = array.copy()
|
| 101 |
+
gpumem = cuda.to_device(array)
|
| 102 |
+
cuda.to_device(array * 2, to=gpumem)
|
| 103 |
+
gpumem.copy_to_host(array)
|
| 104 |
+
np.testing.assert_array_equal(array, original * 2)
|
| 105 |
+
|
| 106 |
+
@skip_on_cudasim('This works in the simulator')
|
| 107 |
+
def test_devicearray_transpose_wrongdim(self):
|
| 108 |
+
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4, 1))
|
| 109 |
+
|
| 110 |
+
with self.assertRaises(NotImplementedError) as e:
|
| 111 |
+
np.transpose(gpumem)
|
| 112 |
+
|
| 113 |
+
self.assertEqual(
|
| 114 |
+
"transposing a non-2D DeviceNDArray isn't supported",
|
| 115 |
+
str(e.exception))
|
| 116 |
+
|
| 117 |
+
def test_devicearray_transpose_identity(self):
|
| 118 |
+
# any-shape identities should work
|
| 119 |
+
original = np.array(np.arange(24)).reshape(3, 4, 2)
|
| 120 |
+
array = np.transpose(cuda.to_device(original),
|
| 121 |
+
axes=(0, 1, 2)).copy_to_host()
|
| 122 |
+
self.assertTrue(np.all(array == original))
|
| 123 |
+
|
| 124 |
+
def test_devicearray_transpose_duplicatedaxis(self):
|
| 125 |
+
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
|
| 126 |
+
|
| 127 |
+
with self.assertRaises(ValueError) as e:
|
| 128 |
+
np.transpose(gpumem, axes=(0, 0))
|
| 129 |
+
|
| 130 |
+
self.assertIn(
|
| 131 |
+
str(e.exception),
|
| 132 |
+
container=[
|
| 133 |
+
'invalid axes list (0, 0)', # GPU
|
| 134 |
+
'repeated axis in transpose', # sim
|
| 135 |
+
])
|
| 136 |
+
|
| 137 |
+
def test_devicearray_transpose_wrongaxis(self):
|
| 138 |
+
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
|
| 139 |
+
|
| 140 |
+
with self.assertRaises(ValueError) as e:
|
| 141 |
+
np.transpose(gpumem, axes=(0, 2))
|
| 142 |
+
|
| 143 |
+
self.assertIn(
|
| 144 |
+
str(e.exception),
|
| 145 |
+
container=[
|
| 146 |
+
'invalid axes list (0, 2)', # GPU
|
| 147 |
+
'invalid axis for this array',
|
| 148 |
+
'axis 2 is out of bounds for array of dimension 2', # sim
|
| 149 |
+
])
|
| 150 |
+
|
| 151 |
+
def test_devicearray_view_ok(self):
|
| 152 |
+
original = np.array(np.arange(12), dtype="i2").reshape(3, 4)
|
| 153 |
+
array = cuda.to_device(original)
|
| 154 |
+
for dtype in ("i4", "u4", "i8", "f8"):
|
| 155 |
+
with self.subTest(dtype=dtype):
|
| 156 |
+
np.testing.assert_array_equal(
|
| 157 |
+
array.view(dtype).copy_to_host(),
|
| 158 |
+
original.view(dtype)
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def test_devicearray_view_ok_not_c_contig(self):
|
| 162 |
+
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
|
| 163 |
+
array = cuda.to_device(original)[:, ::2]
|
| 164 |
+
original = original[:, ::2]
|
| 165 |
+
np.testing.assert_array_equal(
|
| 166 |
+
array.view("u2").copy_to_host(),
|
| 167 |
+
original.view("u2")
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
def test_devicearray_view_bad_not_c_contig(self):
|
| 171 |
+
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
|
| 172 |
+
array = cuda.to_device(original)[:, ::2]
|
| 173 |
+
with self.assertRaises(ValueError) as e:
|
| 174 |
+
array.view("i4")
|
| 175 |
+
|
| 176 |
+
msg = str(e.exception)
|
| 177 |
+
self.assertIn('To change to a dtype of a different size,', msg)
|
| 178 |
+
|
| 179 |
+
contiguous_pre_np123 = 'the array must be C-contiguous' in msg
|
| 180 |
+
contiguous_post_np123 = 'the last axis must be contiguous' in msg
|
| 181 |
+
self.assertTrue(contiguous_pre_np123 or contiguous_post_np123,
|
| 182 |
+
'Expected message to mention contiguity')
|
| 183 |
+
|
| 184 |
+
def test_devicearray_view_bad_itemsize(self):
|
| 185 |
+
original = np.array(np.arange(12), dtype="i2").reshape(4, 3)
|
| 186 |
+
array = cuda.to_device(original)
|
| 187 |
+
with self.assertRaises(ValueError) as e:
|
| 188 |
+
array.view("i4")
|
| 189 |
+
self.assertEqual(
|
| 190 |
+
"When changing to a larger dtype,"
|
| 191 |
+
" its size must be a divisor of the total size in bytes"
|
| 192 |
+
" of the last axis of the array.",
|
| 193 |
+
str(e.exception))
|
| 194 |
+
|
| 195 |
+
def test_devicearray_transpose_ok(self):
|
| 196 |
+
original = np.array(np.arange(12)).reshape(3, 4)
|
| 197 |
+
array = np.transpose(cuda.to_device(original)).copy_to_host()
|
| 198 |
+
self.assertTrue(np.all(array == original.T))
|
| 199 |
+
|
| 200 |
+
def test_devicearray_transpose_T(self):
|
| 201 |
+
original = np.array(np.arange(12)).reshape(3, 4)
|
| 202 |
+
array = cuda.to_device(original).T.copy_to_host()
|
| 203 |
+
self.assertTrue(np.all(array == original.T))
|
| 204 |
+
|
| 205 |
+
def test_devicearray_contiguous_slice(self):
|
| 206 |
+
# memcpys are dumb ranges of bytes, so trying to
|
| 207 |
+
# copy to a non-contiguous range shouldn't work!
|
| 208 |
+
a = np.arange(25).reshape(5, 5, order='F')
|
| 209 |
+
s = np.full(fill_value=5, shape=(5,))
|
| 210 |
+
|
| 211 |
+
d = cuda.to_device(a)
|
| 212 |
+
a[2] = s
|
| 213 |
+
|
| 214 |
+
# d is in F-order (not C-order), so d[2] is not contiguous
|
| 215 |
+
# (40-byte strides). This means we can't memcpy to it!
|
| 216 |
+
with self.assertRaises(ValueError) as e:
|
| 217 |
+
d[2].copy_to_device(s)
|
| 218 |
+
self.assertEqual(
|
| 219 |
+
devicearray.errmsg_contiguous_buffer,
|
| 220 |
+
str(e.exception))
|
| 221 |
+
|
| 222 |
+
# if d[2].copy_to_device(s), then this would pass:
|
| 223 |
+
# self.assertTrue((a == d.copy_to_host()).all())
|
| 224 |
+
|
| 225 |
+
def _test_devicearray_contiguous_host_copy(self, a_c, a_f):
|
| 226 |
+
"""
|
| 227 |
+
Checks host->device memcpys
|
| 228 |
+
"""
|
| 229 |
+
self.assertTrue(a_c.flags.c_contiguous)
|
| 230 |
+
self.assertTrue(a_f.flags.f_contiguous)
|
| 231 |
+
|
| 232 |
+
for original, copy in [
|
| 233 |
+
(a_f, a_f),
|
| 234 |
+
(a_f, a_c),
|
| 235 |
+
(a_c, a_f),
|
| 236 |
+
(a_c, a_c),
|
| 237 |
+
]:
|
| 238 |
+
msg = '%s => %s' % (
|
| 239 |
+
'C' if original.flags.c_contiguous else 'F',
|
| 240 |
+
'C' if copy.flags.c_contiguous else 'F',
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
d = cuda.to_device(original)
|
| 244 |
+
d.copy_to_device(copy)
|
| 245 |
+
self.assertTrue(np.all(d.copy_to_host() == a_c), msg=msg)
|
| 246 |
+
self.assertTrue(np.all(d.copy_to_host() == a_f), msg=msg)
|
| 247 |
+
|
| 248 |
+
def test_devicearray_contiguous_copy_host_3d(self):
|
| 249 |
+
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
|
| 250 |
+
a_f = np.array(a_c, order='F')
|
| 251 |
+
self._test_devicearray_contiguous_host_copy(a_c, a_f)
|
| 252 |
+
|
| 253 |
+
def test_devicearray_contiguous_copy_host_1d(self):
|
| 254 |
+
a_c = np.arange(5)
|
| 255 |
+
a_f = np.array(a_c, order='F')
|
| 256 |
+
self._test_devicearray_contiguous_host_copy(a_c, a_f)
|
| 257 |
+
|
| 258 |
+
def test_devicearray_contiguous_copy_device(self):
|
| 259 |
+
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
|
| 260 |
+
a_f = np.array(a_c, order='F')
|
| 261 |
+
self.assertTrue(a_c.flags.c_contiguous)
|
| 262 |
+
self.assertTrue(a_f.flags.f_contiguous)
|
| 263 |
+
|
| 264 |
+
d = cuda.to_device(a_c)
|
| 265 |
+
|
| 266 |
+
with self.assertRaises(ValueError) as e:
|
| 267 |
+
d.copy_to_device(cuda.to_device(a_f))
|
| 268 |
+
self.assertEqual(
|
| 269 |
+
"incompatible strides: {} vs. {}".format(a_c.strides, a_f.strides),
|
| 270 |
+
str(e.exception))
|
| 271 |
+
|
| 272 |
+
d.copy_to_device(cuda.to_device(a_c))
|
| 273 |
+
self.assertTrue(np.all(d.copy_to_host() == a_c))
|
| 274 |
+
|
| 275 |
+
d = cuda.to_device(a_f)
|
| 276 |
+
|
| 277 |
+
with self.assertRaises(ValueError) as e:
|
| 278 |
+
d.copy_to_device(cuda.to_device(a_c))
|
| 279 |
+
self.assertEqual(
|
| 280 |
+
"incompatible strides: {} vs. {}".format(a_f.strides, a_c.strides),
|
| 281 |
+
str(e.exception))
|
| 282 |
+
|
| 283 |
+
d.copy_to_device(cuda.to_device(a_f))
|
| 284 |
+
self.assertTrue(np.all(d.copy_to_host() == a_f))
|
| 285 |
+
|
| 286 |
+
def test_devicearray_broadcast_host_copy(self):
|
| 287 |
+
broadsize = 4
|
| 288 |
+
coreshape = (2, 3)
|
| 289 |
+
coresize = np.prod(coreshape)
|
| 290 |
+
core_c = np.arange(coresize).reshape(coreshape, order='C')
|
| 291 |
+
core_f = np.arange(coresize).reshape(coreshape, order='F')
|
| 292 |
+
for dim in range(len(coreshape)):
|
| 293 |
+
newindex = (slice(None),) * dim + (np.newaxis,)
|
| 294 |
+
broadshape = coreshape[:dim] + (broadsize,) + coreshape[dim:]
|
| 295 |
+
broad_c = np.broadcast_to(core_c[newindex], broadshape)
|
| 296 |
+
broad_f = np.broadcast_to(core_f[newindex], broadshape)
|
| 297 |
+
dbroad_c = cuda.to_device(broad_c)
|
| 298 |
+
dbroad_f = cuda.to_device(broad_f)
|
| 299 |
+
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_c)
|
| 300 |
+
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_f)
|
| 301 |
+
# Also test copying across different core orderings
|
| 302 |
+
dbroad_c.copy_to_device(broad_f)
|
| 303 |
+
dbroad_f.copy_to_device(broad_c)
|
| 304 |
+
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_f)
|
| 305 |
+
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_c)
|
| 306 |
+
|
| 307 |
+
def test_devicearray_contiguous_host_strided(self):
|
| 308 |
+
a_c = np.arange(10)
|
| 309 |
+
d = cuda.to_device(a_c)
|
| 310 |
+
arr = np.arange(20)[::2]
|
| 311 |
+
d.copy_to_device(arr)
|
| 312 |
+
np.testing.assert_array_equal(d.copy_to_host(), arr)
|
| 313 |
+
|
| 314 |
+
def test_devicearray_contiguous_device_strided(self):
|
| 315 |
+
d = cuda.to_device(np.arange(20))
|
| 316 |
+
arr = np.arange(20)
|
| 317 |
+
|
| 318 |
+
with self.assertRaises(ValueError) as e:
|
| 319 |
+
d.copy_to_device(cuda.to_device(arr)[::2])
|
| 320 |
+
self.assertEqual(
|
| 321 |
+
devicearray.errmsg_contiguous_buffer,
|
| 322 |
+
str(e.exception))
|
| 323 |
+
|
| 324 |
+
@skip_on_cudasim('DeviceNDArray class not present in simulator')
|
| 325 |
+
def test_devicearray_relaxed_strides(self):
|
| 326 |
+
# From the reproducer in Issue #6824.
|
| 327 |
+
|
| 328 |
+
# Construct a device array that is contiguous even though
|
| 329 |
+
# the strides for the first axis (800) are not equal to
|
| 330 |
+
# the strides * size (10 * 8 = 80) for the previous axis,
|
| 331 |
+
# because the first axis size is 1.
|
| 332 |
+
arr = devicearray.DeviceNDArray((1, 10), (800, 8), np.float64)
|
| 333 |
+
|
| 334 |
+
# Ensure we still believe the array to be contiguous because
|
| 335 |
+
# strides checking is relaxed.
|
| 336 |
+
self.assertTrue(arr.flags['C_CONTIGUOUS'])
|
| 337 |
+
self.assertTrue(arr.flags['F_CONTIGUOUS'])
|
| 338 |
+
|
| 339 |
+
def test_c_f_contiguity_matches_numpy(self):
|
| 340 |
+
# From the reproducer in Issue #4943.
|
| 341 |
+
|
| 342 |
+
shapes = ((1, 4), (4, 1))
|
| 343 |
+
orders = ('C', 'F')
|
| 344 |
+
|
| 345 |
+
for shape, order in itertools.product(shapes, orders):
|
| 346 |
+
arr = np.ndarray(shape, order=order)
|
| 347 |
+
d_arr = cuda.to_device(arr)
|
| 348 |
+
self.assertEqual(arr.flags['C_CONTIGUOUS'],
|
| 349 |
+
d_arr.flags['C_CONTIGUOUS'])
|
| 350 |
+
self.assertEqual(arr.flags['F_CONTIGUOUS'],
|
| 351 |
+
d_arr.flags['F_CONTIGUOUS'])
|
| 352 |
+
|
| 353 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 354 |
+
def test_devicearray_typing_order_simple_c(self):
|
| 355 |
+
# C-order 1D array
|
| 356 |
+
a = np.zeros(10, order='C')
|
| 357 |
+
d = cuda.to_device(a)
|
| 358 |
+
self.assertEqual(d._numba_type_.layout, 'C')
|
| 359 |
+
|
| 360 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 361 |
+
def test_devicearray_typing_order_simple_f(self):
|
| 362 |
+
# F-order array that is also C layout.
|
| 363 |
+
a = np.zeros(10, order='F')
|
| 364 |
+
d = cuda.to_device(a)
|
| 365 |
+
self.assertEqual(d._numba_type_.layout, 'C')
|
| 366 |
+
|
| 367 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 368 |
+
def test_devicearray_typing_order_2d_c(self):
|
| 369 |
+
# C-order 2D array
|
| 370 |
+
a = np.zeros((2, 10), order='C')
|
| 371 |
+
d = cuda.to_device(a)
|
| 372 |
+
self.assertEqual(d._numba_type_.layout, 'C')
|
| 373 |
+
|
| 374 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 375 |
+
def test_devicearray_typing_order_2d_f(self):
|
| 376 |
+
# F-order array that can only be F layout
|
| 377 |
+
a = np.zeros((2, 10), order='F')
|
| 378 |
+
d = cuda.to_device(a)
|
| 379 |
+
self.assertEqual(d._numba_type_.layout, 'F')
|
| 380 |
+
|
| 381 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 382 |
+
def test_devicearray_typing_order_noncontig_slice_c(self):
|
| 383 |
+
# Non-contiguous slice of C-order array
|
| 384 |
+
a = np.zeros((5, 5), order='C')
|
| 385 |
+
d = cuda.to_device(a)[:,2]
|
| 386 |
+
self.assertEqual(d._numba_type_.layout, 'A')
|
| 387 |
+
|
| 388 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 389 |
+
def test_devicearray_typing_order_noncontig_slice_f(self):
|
| 390 |
+
# Non-contiguous slice of F-order array
|
| 391 |
+
a = np.zeros((5, 5), order='F')
|
| 392 |
+
d = cuda.to_device(a)[2,:]
|
| 393 |
+
self.assertEqual(d._numba_type_.layout, 'A')
|
| 394 |
+
|
| 395 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 396 |
+
def test_devicearray_typing_order_contig_slice_c(self):
|
| 397 |
+
# Contiguous slice of C-order array
|
| 398 |
+
a = np.zeros((5, 5), order='C')
|
| 399 |
+
d = cuda.to_device(a)[2,:]
|
| 400 |
+
self.assertEqual(d._numba_type_.layout, 'C')
|
| 401 |
+
|
| 402 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 403 |
+
def test_devicearray_typing_order_contig_slice_f(self):
|
| 404 |
+
# Contiguous slice of F-order array - is both C- and F-contiguous, so
|
| 405 |
+
# types as 'C' layout
|
| 406 |
+
a = np.zeros((5, 5), order='F')
|
| 407 |
+
d = cuda.to_device(a)[:,2]
|
| 408 |
+
self.assertEqual(d._numba_type_.layout, 'C')
|
| 409 |
+
|
| 410 |
+
@skip_on_cudasim('Typing not done in the simulator')
|
| 411 |
+
def test_devicearray_typing_order_broadcasted(self):
|
| 412 |
+
# Broadcasted array, similar to that used for passing scalars to ufuncs
|
| 413 |
+
a = np.broadcast_to(np.array([1]), (10,))
|
| 414 |
+
d = cuda.to_device(a)
|
| 415 |
+
self.assertEqual(d._numba_type_.layout, 'A')
|
| 416 |
+
|
| 417 |
+
def test_bug6697(self):
|
| 418 |
+
ary = np.arange(10, dtype=np.int16)
|
| 419 |
+
dary = cuda.to_device(ary)
|
| 420 |
+
got = np.asarray(dary)
|
| 421 |
+
self.assertEqual(got.dtype, dary.dtype)
|
| 422 |
+
|
| 423 |
+
@skip_on_cudasim('DeviceNDArray class not present in simulator')
|
| 424 |
+
def test_issue_8477(self):
|
| 425 |
+
# Ensure that we can copy a zero-length device array to a zero-length
|
| 426 |
+
# host array when the strides of the device and host arrays differ -
|
| 427 |
+
# this should be possible because the strides are irrelevant when the
|
| 428 |
+
# length is zero. For more info see
|
| 429 |
+
# https://github.com/numba/numba/issues/8477.
|
| 430 |
+
|
| 431 |
+
# Create a device array with shape (0,) and strides (8,)
|
| 432 |
+
dev_array = devicearray.DeviceNDArray(shape=(0,), strides=(8,),
|
| 433 |
+
dtype=np.int8)
|
| 434 |
+
|
| 435 |
+
# Create a host array with shape (0,) and strides (0,)
|
| 436 |
+
host_array = np.ndarray(shape=(0,), strides=(0,), dtype=np.int8)
|
| 437 |
+
|
| 438 |
+
# Sanity check for this test - ensure our destination has the strides
|
| 439 |
+
# we expect, because strides can be ignored in some cases by the
|
| 440 |
+
# ndarray constructor - checking here ensures that we haven't failed to
|
| 441 |
+
# account for unexpected behaviour across different versions of NumPy
|
| 442 |
+
self.assertEqual(host_array.strides, (0,))
|
| 443 |
+
|
| 444 |
+
# Ensure that the copy succeeds in both directions
|
| 445 |
+
dev_array.copy_to_host(host_array)
|
| 446 |
+
dev_array.copy_to_device(host_array)
|
| 447 |
+
|
| 448 |
+
# Ensure that a device-to-device copy also succeeds when the strides
|
| 449 |
+
# differ - one way of doing this is to copy the host array across and
|
| 450 |
+
# use that for copies in both directions.
|
| 451 |
+
dev_array_from_host = cuda.to_device(host_array)
|
| 452 |
+
self.assertEqual(dev_array_from_host.shape, (0,))
|
| 453 |
+
self.assertEqual(dev_array_from_host.strides, (0,))
|
| 454 |
+
|
| 455 |
+
dev_array.copy_to_device(dev_array_from_host)
|
| 456 |
+
dev_array_from_host.copy_to_device(dev_array)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class TestRecarray(CUDATestCase):
|
| 460 |
+
def test_recarray(self):
|
| 461 |
+
# From issue #4111
|
| 462 |
+
a = np.recarray((16,), dtype=[
|
| 463 |
+
("value1", np.int64),
|
| 464 |
+
("value2", np.float64),
|
| 465 |
+
])
|
| 466 |
+
a.value1 = np.arange(a.size, dtype=np.int64)
|
| 467 |
+
a.value2 = np.arange(a.size, dtype=np.float64) / 100
|
| 468 |
+
|
| 469 |
+
expect1 = a.value1
|
| 470 |
+
expect2 = a.value2
|
| 471 |
+
|
| 472 |
+
def test(x, out1, out2):
|
| 473 |
+
i = cuda.grid(1)
|
| 474 |
+
if i < x.size:
|
| 475 |
+
out1[i] = x.value1[i]
|
| 476 |
+
out2[i] = x.value2[i]
|
| 477 |
+
|
| 478 |
+
got1 = np.zeros_like(expect1)
|
| 479 |
+
got2 = np.zeros_like(expect2)
|
| 480 |
+
cuda.jit(test)[1, a.size](a, got1, got2)
|
| 481 |
+
|
| 482 |
+
np.testing.assert_array_equal(expect1, got1)
|
| 483 |
+
np.testing.assert_array_equal(expect2, got2)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class TestCoreContiguous(CUDATestCase):
|
| 487 |
+
def _test_against_array_core(self, view):
|
| 488 |
+
self.assertEqual(
|
| 489 |
+
devicearray.is_contiguous(view),
|
| 490 |
+
devicearray.array_core(view).flags['C_CONTIGUOUS']
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
def test_device_array_like_1d(self):
|
| 494 |
+
d_a = cuda.device_array(10, order='C')
|
| 495 |
+
self._test_against_array_core(d_a)
|
| 496 |
+
|
| 497 |
+
def test_device_array_like_2d(self):
|
| 498 |
+
d_a = cuda.device_array((10, 12), order='C')
|
| 499 |
+
self._test_against_array_core(d_a)
|
| 500 |
+
|
| 501 |
+
def test_device_array_like_2d_transpose(self):
|
| 502 |
+
d_a = cuda.device_array((10, 12), order='C')
|
| 503 |
+
self._test_against_array_core(d_a.T)
|
| 504 |
+
|
| 505 |
+
def test_device_array_like_3d(self):
|
| 506 |
+
d_a = cuda.device_array((10, 12, 14), order='C')
|
| 507 |
+
self._test_against_array_core(d_a)
|
| 508 |
+
|
| 509 |
+
def test_device_array_like_1d_f(self):
|
| 510 |
+
d_a = cuda.device_array(10, order='F')
|
| 511 |
+
self._test_against_array_core(d_a)
|
| 512 |
+
|
| 513 |
+
def test_device_array_like_2d_f(self):
|
| 514 |
+
d_a = cuda.device_array((10, 12), order='F')
|
| 515 |
+
self._test_against_array_core(d_a)
|
| 516 |
+
|
| 517 |
+
def test_device_array_like_2d_f_transpose(self):
|
| 518 |
+
d_a = cuda.device_array((10, 12), order='F')
|
| 519 |
+
self._test_against_array_core(d_a.T)
|
| 520 |
+
|
| 521 |
+
def test_device_array_like_3d_f(self):
|
| 522 |
+
d_a = cuda.device_array((10, 12, 14), order='F')
|
| 523 |
+
self._test_against_array_core(d_a)
|
| 524 |
+
|
| 525 |
+
def test_1d_view(self):
|
| 526 |
+
shape = 10
|
| 527 |
+
view = np.zeros(shape)[::2]
|
| 528 |
+
self._test_against_array_core(view)
|
| 529 |
+
|
| 530 |
+
def test_1d_view_f(self):
|
| 531 |
+
shape = 10
|
| 532 |
+
view = np.zeros(shape, order='F')[::2]
|
| 533 |
+
self._test_against_array_core(view)
|
| 534 |
+
|
| 535 |
+
def test_2d_view(self):
|
| 536 |
+
shape = (10, 12)
|
| 537 |
+
view = np.zeros(shape)[::2, ::2]
|
| 538 |
+
self._test_against_array_core(view)
|
| 539 |
+
|
| 540 |
+
def test_2d_view_f(self):
|
| 541 |
+
shape = (10, 12)
|
| 542 |
+
view = np.zeros(shape, order='F')[::2, ::2]
|
| 543 |
+
self._test_against_array_core(view)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
if __name__ == '__main__':
|
| 547 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_deallocations.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from numba import cuda
|
| 6 |
+
from numba.cuda.testing import (unittest, skip_on_cudasim,
|
| 7 |
+
skip_if_external_memmgr, CUDATestCase)
|
| 8 |
+
from numba.tests.support import captured_stderr
|
| 9 |
+
from numba.core import config
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@skip_on_cudasim('not supported on CUDASIM')
|
| 13 |
+
@skip_if_external_memmgr('Deallocation specific to Numba memory management')
|
| 14 |
+
class TestDeallocation(CUDATestCase):
|
| 15 |
+
def test_max_pending_count(self):
|
| 16 |
+
# get deallocation manager and flush it
|
| 17 |
+
deallocs = cuda.current_context().memory_manager.deallocations
|
| 18 |
+
deallocs.clear()
|
| 19 |
+
self.assertEqual(len(deallocs), 0)
|
| 20 |
+
# deallocate to maximum count
|
| 21 |
+
for i in range(config.CUDA_DEALLOCS_COUNT):
|
| 22 |
+
cuda.to_device(np.arange(1))
|
| 23 |
+
self.assertEqual(len(deallocs), i + 1)
|
| 24 |
+
# one more to trigger .clear()
|
| 25 |
+
cuda.to_device(np.arange(1))
|
| 26 |
+
self.assertEqual(len(deallocs), 0)
|
| 27 |
+
|
| 28 |
+
def test_max_pending_bytes(self):
|
| 29 |
+
# get deallocation manager and flush it
|
| 30 |
+
ctx = cuda.current_context()
|
| 31 |
+
deallocs = ctx.memory_manager.deallocations
|
| 32 |
+
deallocs.clear()
|
| 33 |
+
self.assertEqual(len(deallocs), 0)
|
| 34 |
+
|
| 35 |
+
mi = ctx.get_memory_info()
|
| 36 |
+
|
| 37 |
+
max_pending = 10**6 # 1MB
|
| 38 |
+
old_ratio = config.CUDA_DEALLOCS_RATIO
|
| 39 |
+
try:
|
| 40 |
+
# change to a smaller ratio
|
| 41 |
+
config.CUDA_DEALLOCS_RATIO = max_pending / mi.total
|
| 42 |
+
# due to round off error (floor is used in calculating
|
| 43 |
+
# _max_pending_bytes) it can be off by 1.
|
| 44 |
+
self.assertAlmostEqual(deallocs._max_pending_bytes, max_pending,
|
| 45 |
+
delta=1)
|
| 46 |
+
|
| 47 |
+
# allocate half the max size
|
| 48 |
+
# this will not trigger deallocation
|
| 49 |
+
cuda.to_device(np.ones(max_pending // 2, dtype=np.int8))
|
| 50 |
+
self.assertEqual(len(deallocs), 1)
|
| 51 |
+
|
| 52 |
+
# allocate another remaining
|
| 53 |
+
# this will not trigger deallocation
|
| 54 |
+
cuda.to_device(np.ones(deallocs._max_pending_bytes -
|
| 55 |
+
deallocs._size, dtype=np.int8))
|
| 56 |
+
self.assertEqual(len(deallocs), 2)
|
| 57 |
+
|
| 58 |
+
# another byte to trigger .clear()
|
| 59 |
+
cuda.to_device(np.ones(1, dtype=np.int8))
|
| 60 |
+
self.assertEqual(len(deallocs), 0)
|
| 61 |
+
finally:
|
| 62 |
+
# restore old ratio
|
| 63 |
+
config.CUDA_DEALLOCS_RATIO = old_ratio
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@skip_on_cudasim("defer_cleanup has no effect in CUDASIM")
|
| 67 |
+
@skip_if_external_memmgr('Deallocation specific to Numba memory management')
|
| 68 |
+
class TestDeferCleanup(CUDATestCase):
|
| 69 |
+
def test_basic(self):
|
| 70 |
+
harr = np.arange(5)
|
| 71 |
+
darr1 = cuda.to_device(harr)
|
| 72 |
+
deallocs = cuda.current_context().memory_manager.deallocations
|
| 73 |
+
deallocs.clear()
|
| 74 |
+
self.assertEqual(len(deallocs), 0)
|
| 75 |
+
with cuda.defer_cleanup():
|
| 76 |
+
darr2 = cuda.to_device(harr)
|
| 77 |
+
del darr1
|
| 78 |
+
self.assertEqual(len(deallocs), 1)
|
| 79 |
+
del darr2
|
| 80 |
+
self.assertEqual(len(deallocs), 2)
|
| 81 |
+
deallocs.clear()
|
| 82 |
+
self.assertEqual(len(deallocs), 2)
|
| 83 |
+
|
| 84 |
+
deallocs.clear()
|
| 85 |
+
self.assertEqual(len(deallocs), 0)
|
| 86 |
+
|
| 87 |
+
def test_nested(self):
|
| 88 |
+
harr = np.arange(5)
|
| 89 |
+
darr1 = cuda.to_device(harr)
|
| 90 |
+
deallocs = cuda.current_context().memory_manager.deallocations
|
| 91 |
+
deallocs.clear()
|
| 92 |
+
self.assertEqual(len(deallocs), 0)
|
| 93 |
+
with cuda.defer_cleanup():
|
| 94 |
+
with cuda.defer_cleanup():
|
| 95 |
+
darr2 = cuda.to_device(harr)
|
| 96 |
+
del darr1
|
| 97 |
+
self.assertEqual(len(deallocs), 1)
|
| 98 |
+
del darr2
|
| 99 |
+
self.assertEqual(len(deallocs), 2)
|
| 100 |
+
deallocs.clear()
|
| 101 |
+
self.assertEqual(len(deallocs), 2)
|
| 102 |
+
deallocs.clear()
|
| 103 |
+
self.assertEqual(len(deallocs), 2)
|
| 104 |
+
|
| 105 |
+
deallocs.clear()
|
| 106 |
+
self.assertEqual(len(deallocs), 0)
|
| 107 |
+
|
| 108 |
+
def test_exception(self):
|
| 109 |
+
harr = np.arange(5)
|
| 110 |
+
darr1 = cuda.to_device(harr)
|
| 111 |
+
deallocs = cuda.current_context().memory_manager.deallocations
|
| 112 |
+
deallocs.clear()
|
| 113 |
+
self.assertEqual(len(deallocs), 0)
|
| 114 |
+
|
| 115 |
+
class CustomError(Exception):
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
with self.assertRaises(CustomError):
|
| 119 |
+
with cuda.defer_cleanup():
|
| 120 |
+
darr2 = cuda.to_device(harr)
|
| 121 |
+
del darr2
|
| 122 |
+
self.assertEqual(len(deallocs), 1)
|
| 123 |
+
deallocs.clear()
|
| 124 |
+
self.assertEqual(len(deallocs), 1)
|
| 125 |
+
raise CustomError
|
| 126 |
+
deallocs.clear()
|
| 127 |
+
self.assertEqual(len(deallocs), 0)
|
| 128 |
+
del darr1
|
| 129 |
+
self.assertEqual(len(deallocs), 1)
|
| 130 |
+
deallocs.clear()
|
| 131 |
+
self.assertEqual(len(deallocs), 0)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class TestDeferCleanupAvail(CUDATestCase):
|
| 135 |
+
def test_context_manager(self):
|
| 136 |
+
# just make sure the API is available
|
| 137 |
+
with cuda.defer_cleanup():
|
| 138 |
+
pass
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@skip_on_cudasim('not supported on CUDASIM')
|
| 142 |
+
class TestDel(CUDATestCase):
|
| 143 |
+
"""
|
| 144 |
+
Ensure resources are deleted properly without ignored exception.
|
| 145 |
+
"""
|
| 146 |
+
@contextmanager
|
| 147 |
+
def check_ignored_exception(self, ctx):
|
| 148 |
+
with captured_stderr() as cap:
|
| 149 |
+
yield
|
| 150 |
+
ctx.deallocations.clear()
|
| 151 |
+
self.assertFalse(cap.getvalue())
|
| 152 |
+
|
| 153 |
+
def test_stream(self):
|
| 154 |
+
ctx = cuda.current_context()
|
| 155 |
+
stream = ctx.create_stream()
|
| 156 |
+
with self.check_ignored_exception(ctx):
|
| 157 |
+
del stream
|
| 158 |
+
|
| 159 |
+
def test_event(self):
|
| 160 |
+
ctx = cuda.current_context()
|
| 161 |
+
event = ctx.create_event()
|
| 162 |
+
with self.check_ignored_exception(ctx):
|
| 163 |
+
del event
|
| 164 |
+
|
| 165 |
+
def test_pinned_memory(self):
|
| 166 |
+
ctx = cuda.current_context()
|
| 167 |
+
mem = ctx.memhostalloc(32)
|
| 168 |
+
with self.check_ignored_exception(ctx):
|
| 169 |
+
del mem
|
| 170 |
+
|
| 171 |
+
def test_mapped_memory(self):
|
| 172 |
+
ctx = cuda.current_context()
|
| 173 |
+
mem = ctx.memhostalloc(32, mapped=True)
|
| 174 |
+
with self.check_ignored_exception(ctx):
|
| 175 |
+
del mem
|
| 176 |
+
|
| 177 |
+
def test_device_memory(self):
|
| 178 |
+
ctx = cuda.current_context()
|
| 179 |
+
mem = ctx.memalloc(32)
|
| 180 |
+
with self.check_ignored_exception(ctx):
|
| 181 |
+
del mem
|
| 182 |
+
|
| 183 |
+
def test_managed_memory(self):
|
| 184 |
+
ctx = cuda.current_context()
|
| 185 |
+
mem = ctx.memallocmanaged(32)
|
| 186 |
+
with self.check_ignored_exception(ctx):
|
| 187 |
+
del mem
|
| 188 |
+
|
| 189 |
+
def test_pinned_contextmanager(self):
|
| 190 |
+
# Check that temporarily pinned memory is unregistered immediately,
|
| 191 |
+
# such that it can be re-pinned at any time
|
| 192 |
+
class PinnedException(Exception):
|
| 193 |
+
pass
|
| 194 |
+
|
| 195 |
+
arr = np.zeros(1)
|
| 196 |
+
ctx = cuda.current_context()
|
| 197 |
+
ctx.deallocations.clear()
|
| 198 |
+
with self.check_ignored_exception(ctx):
|
| 199 |
+
with cuda.pinned(arr):
|
| 200 |
+
pass
|
| 201 |
+
with cuda.pinned(arr):
|
| 202 |
+
pass
|
| 203 |
+
# Should also work inside a `defer_cleanup` block
|
| 204 |
+
with cuda.defer_cleanup():
|
| 205 |
+
with cuda.pinned(arr):
|
| 206 |
+
pass
|
| 207 |
+
with cuda.pinned(arr):
|
| 208 |
+
pass
|
| 209 |
+
# Should also work when breaking out of the block due to an
|
| 210 |
+
# exception
|
| 211 |
+
try:
|
| 212 |
+
with cuda.pinned(arr):
|
| 213 |
+
raise PinnedException
|
| 214 |
+
except PinnedException:
|
| 215 |
+
with cuda.pinned(arr):
|
| 216 |
+
pass
|
| 217 |
+
|
| 218 |
+
def test_mapped_contextmanager(self):
|
| 219 |
+
# Check that temporarily mapped memory is unregistered immediately,
|
| 220 |
+
# such that it can be re-mapped at any time
|
| 221 |
+
class MappedException(Exception):
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
arr = np.zeros(1)
|
| 225 |
+
ctx = cuda.current_context()
|
| 226 |
+
ctx.deallocations.clear()
|
| 227 |
+
with self.check_ignored_exception(ctx):
|
| 228 |
+
with cuda.mapped(arr):
|
| 229 |
+
pass
|
| 230 |
+
with cuda.mapped(arr):
|
| 231 |
+
pass
|
| 232 |
+
# Should also work inside a `defer_cleanup` block
|
| 233 |
+
with cuda.defer_cleanup():
|
| 234 |
+
with cuda.mapped(arr):
|
| 235 |
+
pass
|
| 236 |
+
with cuda.mapped(arr):
|
| 237 |
+
pass
|
| 238 |
+
# Should also work when breaking out of the block due to an
|
| 239 |
+
# exception
|
| 240 |
+
try:
|
| 241 |
+
with cuda.mapped(arr):
|
| 242 |
+
raise MappedException
|
| 243 |
+
except MappedException:
|
| 244 |
+
with cuda.mapped(arr):
|
| 245 |
+
pass
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
if __name__ == '__main__':
|
| 249 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_detect.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import subprocess
|
| 4 |
+
import threading
|
| 5 |
+
from numba import cuda
|
| 6 |
+
from numba.cuda.testing import (unittest, CUDATestCase, skip_on_cudasim,
|
| 7 |
+
skip_under_cuda_memcheck)
|
| 8 |
+
from numba.tests.support import captured_stdout
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestCudaDetect(CUDATestCase):
|
| 12 |
+
def test_cuda_detect(self):
|
| 13 |
+
# exercise the code path
|
| 14 |
+
with captured_stdout() as out:
|
| 15 |
+
cuda.detect()
|
| 16 |
+
output = out.getvalue()
|
| 17 |
+
self.assertIn('Found', output)
|
| 18 |
+
self.assertIn('CUDA devices', output)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
|
| 22 |
+
class TestCUDAFindLibs(CUDATestCase):
|
| 23 |
+
|
| 24 |
+
def run_cmd(self, cmdline, env):
|
| 25 |
+
popen = subprocess.Popen(cmdline,
|
| 26 |
+
stdout=subprocess.PIPE,
|
| 27 |
+
stderr=subprocess.PIPE,
|
| 28 |
+
env=env)
|
| 29 |
+
|
| 30 |
+
# finish in 5 minutes or kill it
|
| 31 |
+
timeout = threading.Timer(5 * 60., popen.kill)
|
| 32 |
+
try:
|
| 33 |
+
timeout.start()
|
| 34 |
+
out, err = popen.communicate()
|
| 35 |
+
# the process should exit with an error
|
| 36 |
+
return out.decode(), err.decode()
|
| 37 |
+
finally:
|
| 38 |
+
timeout.cancel()
|
| 39 |
+
return None, None
|
| 40 |
+
|
| 41 |
+
def run_test_in_separate_process(self, envvar, envvar_value):
|
| 42 |
+
env_copy = os.environ.copy()
|
| 43 |
+
env_copy[envvar] = str(envvar_value)
|
| 44 |
+
code = """if 1:
|
| 45 |
+
from numba import cuda
|
| 46 |
+
@cuda.jit('(int64,)')
|
| 47 |
+
def kernel(x):
|
| 48 |
+
pass
|
| 49 |
+
kernel(1,)
|
| 50 |
+
"""
|
| 51 |
+
cmdline = [sys.executable, "-c", code]
|
| 52 |
+
return self.run_cmd(cmdline, env_copy)
|
| 53 |
+
|
| 54 |
+
@skip_on_cudasim('Simulator does not hit device library search code path')
|
| 55 |
+
@unittest.skipIf(not sys.platform.startswith('linux'), "linux only")
|
| 56 |
+
def test_cuda_find_lib_errors(self):
|
| 57 |
+
"""
|
| 58 |
+
This tests that the find_libs works as expected in the case of an
|
| 59 |
+
environment variable being used to set the path.
|
| 60 |
+
"""
|
| 61 |
+
# one of these is likely to exist on linux, it's also unlikely that
|
| 62 |
+
# someone has extracted the contents of libdevice into here!
|
| 63 |
+
locs = ['lib', 'lib64']
|
| 64 |
+
|
| 65 |
+
looking_for = None
|
| 66 |
+
for l in locs:
|
| 67 |
+
looking_for = os.path.join(os.path.sep, l)
|
| 68 |
+
if os.path.exists(looking_for):
|
| 69 |
+
break
|
| 70 |
+
|
| 71 |
+
# This is the testing part, the test will only run if there's a valid
|
| 72 |
+
# path in which to look
|
| 73 |
+
if looking_for is not None:
|
| 74 |
+
out, err = self.run_test_in_separate_process("NUMBA_CUDA_DRIVER",
|
| 75 |
+
looking_for)
|
| 76 |
+
self.assertTrue(out is not None)
|
| 77 |
+
self.assertTrue(err is not None)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
if __name__ == '__main__':
|
| 81 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import numpy as np
|
| 3 |
+
import weakref
|
| 4 |
+
|
| 5 |
+
from numba import cuda
|
| 6 |
+
from numba.core import config
|
| 7 |
+
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
|
| 8 |
+
from numba.tests.support import linux_only
|
| 9 |
+
|
| 10 |
+
if not config.ENABLE_CUDASIM:
|
| 11 |
+
class DeviceOnlyEMMPlugin(cuda.HostOnlyCUDAMemoryManager):
|
| 12 |
+
"""
|
| 13 |
+
Dummy EMM Plugin implementation for testing. It memorises which plugin
|
| 14 |
+
API methods have been called so that the tests can check that Numba
|
| 15 |
+
called into the plugin as expected.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, *args, **kwargs):
|
| 19 |
+
super().__init__(*args, **kwargs)
|
| 20 |
+
|
| 21 |
+
# For tracking our dummy allocations
|
| 22 |
+
self.allocations = {}
|
| 23 |
+
self.count = 0
|
| 24 |
+
|
| 25 |
+
# For tracking which methods have been called
|
| 26 |
+
self.initialized = False
|
| 27 |
+
self.memalloc_called = False
|
| 28 |
+
self.reset_called = False
|
| 29 |
+
self.get_memory_info_called = False
|
| 30 |
+
self.get_ipc_handle_called = False
|
| 31 |
+
|
| 32 |
+
def memalloc(self, size):
|
| 33 |
+
# We maintain a list of allocations and keep track of them, so that
|
| 34 |
+
# we can test that the finalizers of objects returned by memalloc
|
| 35 |
+
# get called.
|
| 36 |
+
|
| 37 |
+
# Numba should have initialized the memory manager when preparing
|
| 38 |
+
# the context for use, prior to any memalloc call.
|
| 39 |
+
if not self.initialized:
|
| 40 |
+
raise RuntimeError("memalloc called before initialize")
|
| 41 |
+
self.memalloc_called = True
|
| 42 |
+
|
| 43 |
+
# Create an allocation and record it
|
| 44 |
+
self.count += 1
|
| 45 |
+
alloc_count = self.count
|
| 46 |
+
self.allocations[alloc_count] = size
|
| 47 |
+
|
| 48 |
+
# The finalizer deletes the record from our internal dict of
|
| 49 |
+
# allocations.
|
| 50 |
+
finalizer_allocs = self.allocations
|
| 51 |
+
|
| 52 |
+
def finalizer():
|
| 53 |
+
del finalizer_allocs[alloc_count]
|
| 54 |
+
|
| 55 |
+
# We use an AutoFreePointer so that the finalizer will be run when
|
| 56 |
+
# the reference count drops to zero.
|
| 57 |
+
ctx = weakref.proxy(self.context)
|
| 58 |
+
ptr = ctypes.c_void_p(alloc_count)
|
| 59 |
+
return cuda.cudadrv.driver.AutoFreePointer(ctx, ptr, size,
|
| 60 |
+
finalizer=finalizer)
|
| 61 |
+
|
| 62 |
+
def initialize(self):
|
| 63 |
+
# No special initialization needed.
|
| 64 |
+
self.initialized = True
|
| 65 |
+
|
| 66 |
+
def reset(self):
|
| 67 |
+
# We remove all allocations on reset, just as a real EMM Plugin
|
| 68 |
+
# would do. Note that our finalizers in memalloc don't check
|
| 69 |
+
# whether the allocations are still alive, so running them after
|
| 70 |
+
# reset will detect any allocations that are floating around at
|
| 71 |
+
# exit time; however, the atexit finalizer for weakref will only
|
| 72 |
+
# print a traceback, not terminate the interpreter abnormally.
|
| 73 |
+
self.reset_called = True
|
| 74 |
+
|
| 75 |
+
def get_memory_info(self):
|
| 76 |
+
# Return some dummy memory information
|
| 77 |
+
self.get_memory_info_called = True
|
| 78 |
+
return cuda.MemoryInfo(free=32, total=64)
|
| 79 |
+
|
| 80 |
+
def get_ipc_handle(self, memory):
|
| 81 |
+
# The dummy IPC handle is only a string, so it is important that
|
| 82 |
+
# the tests don't try to do too much with it (e.g. open / close
|
| 83 |
+
# it).
|
| 84 |
+
self.get_ipc_handle_called = True
|
| 85 |
+
return "Dummy IPC handle for alloc %s" % memory.device_pointer.value
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def interface_version(self):
|
| 89 |
+
# The expected version for an EMM Plugin.
|
| 90 |
+
return 1
|
| 91 |
+
|
| 92 |
+
class BadVersionEMMPlugin(DeviceOnlyEMMPlugin):
|
| 93 |
+
"""A plugin that claims to implement a different interface version"""
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def interface_version(self):
|
| 97 |
+
return 2
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@skip_on_cudasim('EMM Plugins not supported on CUDA simulator')
|
| 101 |
+
class TestDeviceOnlyEMMPlugin(CUDATestCase):
|
| 102 |
+
"""
|
| 103 |
+
Tests that the API of an EMM Plugin that implements device allocations
|
| 104 |
+
only is used correctly by Numba.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
def setUp(self):
|
| 108 |
+
super().setUp()
|
| 109 |
+
# Always start afresh with a new context and memory manager
|
| 110 |
+
cuda.close()
|
| 111 |
+
cuda.set_memory_manager(DeviceOnlyEMMPlugin)
|
| 112 |
+
|
| 113 |
+
def tearDown(self):
|
| 114 |
+
super().tearDown()
|
| 115 |
+
# Unset the memory manager for subsequent tests
|
| 116 |
+
cuda.close()
|
| 117 |
+
cuda.cudadrv.driver._memory_manager = None
|
| 118 |
+
|
| 119 |
+
def test_memalloc(self):
|
| 120 |
+
mgr = cuda.current_context().memory_manager
|
| 121 |
+
|
| 122 |
+
# Allocate an array and check that memalloc was called with the correct
|
| 123 |
+
# size.
|
| 124 |
+
arr_1 = np.arange(10)
|
| 125 |
+
d_arr_1 = cuda.device_array_like(arr_1)
|
| 126 |
+
self.assertTrue(mgr.memalloc_called)
|
| 127 |
+
self.assertEqual(mgr.count, 1)
|
| 128 |
+
self.assertEqual(mgr.allocations[1], arr_1.nbytes)
|
| 129 |
+
|
| 130 |
+
# Allocate again, with a different size, and check that it is also
|
| 131 |
+
# correct.
|
| 132 |
+
arr_2 = np.arange(5)
|
| 133 |
+
d_arr_2 = cuda.device_array_like(arr_2)
|
| 134 |
+
self.assertEqual(mgr.count, 2)
|
| 135 |
+
self.assertEqual(mgr.allocations[2], arr_2.nbytes)
|
| 136 |
+
|
| 137 |
+
# Remove the first array, and check that our finalizer was called for
|
| 138 |
+
# the first array only.
|
| 139 |
+
del d_arr_1
|
| 140 |
+
self.assertNotIn(1, mgr.allocations)
|
| 141 |
+
self.assertIn(2, mgr.allocations)
|
| 142 |
+
|
| 143 |
+
# Remove the second array and check that its finalizer was also
|
| 144 |
+
# called.
|
| 145 |
+
del d_arr_2
|
| 146 |
+
self.assertNotIn(2, mgr.allocations)
|
| 147 |
+
|
| 148 |
+
def test_initialized_in_context(self):
|
| 149 |
+
# If we have a CUDA context, it should already have initialized its
|
| 150 |
+
# memory manager.
|
| 151 |
+
self.assertTrue(cuda.current_context().memory_manager.initialized)
|
| 152 |
+
|
| 153 |
+
def test_reset(self):
|
| 154 |
+
ctx = cuda.current_context()
|
| 155 |
+
ctx.reset()
|
| 156 |
+
self.assertTrue(ctx.memory_manager.reset_called)
|
| 157 |
+
|
| 158 |
+
def test_get_memory_info(self):
|
| 159 |
+
ctx = cuda.current_context()
|
| 160 |
+
meminfo = ctx.get_memory_info()
|
| 161 |
+
self.assertTrue(ctx.memory_manager.get_memory_info_called)
|
| 162 |
+
self.assertEqual(meminfo.free, 32)
|
| 163 |
+
self.assertEqual(meminfo.total, 64)
|
| 164 |
+
|
| 165 |
+
@linux_only
|
| 166 |
+
def test_get_ipc_handle(self):
|
| 167 |
+
# We don't attempt to close the IPC handle in this test because Numba
|
| 168 |
+
# will be expecting a real IpcHandle object to have been returned from
|
| 169 |
+
# get_ipc_handle, and it would cause problems to do so.
|
| 170 |
+
arr = np.arange(2)
|
| 171 |
+
d_arr = cuda.device_array_like(arr)
|
| 172 |
+
ipch = d_arr.get_ipc_handle()
|
| 173 |
+
ctx = cuda.current_context()
|
| 174 |
+
self.assertTrue(ctx.memory_manager.get_ipc_handle_called)
|
| 175 |
+
self.assertIn("Dummy IPC handle for alloc 1", ipch._ipc_handle)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@skip_on_cudasim('EMM Plugins not supported on CUDA simulator')
|
| 179 |
+
class TestBadEMMPluginVersion(CUDATestCase):
|
| 180 |
+
"""
|
| 181 |
+
Ensure that Numba rejects EMM Plugins with incompatible version
|
| 182 |
+
numbers.
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
def test_bad_plugin_version(self):
|
| 186 |
+
with self.assertRaises(RuntimeError) as raises:
|
| 187 |
+
cuda.set_memory_manager(BadVersionEMMPlugin)
|
| 188 |
+
self.assertIn('version 1 required', str(raises.exception))
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
if __name__ == '__main__':
|
| 192 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_events.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba import cuda
|
| 3 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestCudaEvent(CUDATestCase):
|
| 7 |
+
def test_event_elapsed(self):
|
| 8 |
+
N = 32
|
| 9 |
+
dary = cuda.device_array(N, dtype=np.double)
|
| 10 |
+
evtstart = cuda.event()
|
| 11 |
+
evtend = cuda.event()
|
| 12 |
+
|
| 13 |
+
evtstart.record()
|
| 14 |
+
cuda.to_device(np.arange(N, dtype=np.double), to=dary)
|
| 15 |
+
evtend.record()
|
| 16 |
+
evtend.wait()
|
| 17 |
+
evtend.synchronize()
|
| 18 |
+
# Exercise the code path
|
| 19 |
+
evtstart.elapsed_time(evtend)
|
| 20 |
+
|
| 21 |
+
def test_event_elapsed_stream(self):
|
| 22 |
+
N = 32
|
| 23 |
+
stream = cuda.stream()
|
| 24 |
+
dary = cuda.device_array(N, dtype=np.double)
|
| 25 |
+
evtstart = cuda.event()
|
| 26 |
+
evtend = cuda.event()
|
| 27 |
+
|
| 28 |
+
evtstart.record(stream=stream)
|
| 29 |
+
cuda.to_device(np.arange(N, dtype=np.double), to=dary, stream=stream)
|
| 30 |
+
evtend.record(stream=stream)
|
| 31 |
+
evtend.wait(stream=stream)
|
| 32 |
+
evtend.synchronize()
|
| 33 |
+
# Exercise the code path
|
| 34 |
+
evtstart.elapsed_time(evtend)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == '__main__':
|
| 38 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_host_alloc.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba.cuda.cudadrv import driver
|
| 3 |
+
from numba import cuda
|
| 4 |
+
from numba.cuda.testing import unittest, ContextResettingTestCase
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestHostAlloc(ContextResettingTestCase):
|
| 8 |
+
def test_host_alloc_driver(self):
|
| 9 |
+
n = 32
|
| 10 |
+
mem = cuda.current_context().memhostalloc(n, mapped=True)
|
| 11 |
+
|
| 12 |
+
dtype = np.dtype(np.uint8)
|
| 13 |
+
ary = np.ndarray(shape=n // dtype.itemsize, dtype=dtype,
|
| 14 |
+
buffer=mem)
|
| 15 |
+
|
| 16 |
+
magic = 0xab
|
| 17 |
+
driver.device_memset(mem, magic, n)
|
| 18 |
+
|
| 19 |
+
self.assertTrue(np.all(ary == magic))
|
| 20 |
+
|
| 21 |
+
ary.fill(n)
|
| 22 |
+
|
| 23 |
+
recv = np.empty_like(ary)
|
| 24 |
+
|
| 25 |
+
driver.device_to_host(recv, mem, ary.size)
|
| 26 |
+
|
| 27 |
+
self.assertTrue(np.all(ary == recv))
|
| 28 |
+
self.assertTrue(np.all(recv == n))
|
| 29 |
+
|
| 30 |
+
def test_host_alloc_pinned(self):
|
| 31 |
+
ary = cuda.pinned_array(10, dtype=np.uint32)
|
| 32 |
+
ary.fill(123)
|
| 33 |
+
self.assertTrue(all(ary == 123))
|
| 34 |
+
devary = cuda.to_device(ary)
|
| 35 |
+
driver.device_memset(devary, 0, driver.device_memory_size(devary))
|
| 36 |
+
self.assertTrue(all(ary == 123))
|
| 37 |
+
devary.copy_to_host(ary)
|
| 38 |
+
self.assertTrue(all(ary == 0))
|
| 39 |
+
|
| 40 |
+
def test_host_alloc_mapped(self):
|
| 41 |
+
ary = cuda.mapped_array(10, dtype=np.uint32)
|
| 42 |
+
ary.fill(123)
|
| 43 |
+
self.assertTrue(all(ary == 123))
|
| 44 |
+
driver.device_memset(ary, 0, driver.device_memory_size(ary))
|
| 45 |
+
self.assertTrue(all(ary == 0))
|
| 46 |
+
self.assertTrue(sum(ary != 0) == 0)
|
| 47 |
+
|
| 48 |
+
def test_host_operators(self):
|
| 49 |
+
for ary in [cuda.mapped_array(10, dtype=np.uint32),
|
| 50 |
+
cuda.pinned_array(10, dtype=np.uint32)]:
|
| 51 |
+
ary[:] = range(10)
|
| 52 |
+
self.assertTrue(sum(ary + 1) == 55)
|
| 53 |
+
self.assertTrue(sum((ary + 1) * 2 - 1) == 100)
|
| 54 |
+
self.assertTrue(sum(ary < 5) == 5)
|
| 55 |
+
self.assertTrue(sum(ary <= 5) == 6)
|
| 56 |
+
self.assertTrue(sum(ary > 6) == 3)
|
| 57 |
+
self.assertTrue(sum(ary >= 6) == 4)
|
| 58 |
+
self.assertTrue(sum(ary ** 2) == 285)
|
| 59 |
+
self.assertTrue(sum(ary // 2) == 20)
|
| 60 |
+
self.assertTrue(sum(ary / 2.0) == 22.5)
|
| 61 |
+
self.assertTrue(sum(ary % 2) == 5)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == '__main__':
|
| 65 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_init.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import multiprocessing as mp
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from numba import cuda
|
| 5 |
+
from numba.cuda.cudadrv.driver import CudaAPIError, driver
|
| 6 |
+
from numba.cuda.cudadrv.error import CudaSupportError
|
| 7 |
+
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# A mock of cuInit that always raises a CudaAPIError
|
| 11 |
+
def cuInit_raising(arg):
|
| 12 |
+
raise CudaAPIError(999, 'CUDA_ERROR_UNKNOWN')
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Test code to run in a child that patches driver.cuInit to a variant that
|
| 16 |
+
# always raises. We can't use mock.patch.object here because driver.cuInit is
|
| 17 |
+
# not assigned until we attempt to initialize - mock.patch.object cannot locate
|
| 18 |
+
# the non-existent original method, and so fails. Instead we patch
|
| 19 |
+
# driver.cuInit with our raising version prior to any attempt to initialize.
|
| 20 |
+
def cuInit_raising_test(result_queue):
|
| 21 |
+
driver.cuInit = cuInit_raising
|
| 22 |
+
|
| 23 |
+
success = False
|
| 24 |
+
msg = None
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
# A CUDA operation that forces initialization of the device
|
| 28 |
+
cuda.device_array(1)
|
| 29 |
+
except CudaSupportError as e:
|
| 30 |
+
success = True
|
| 31 |
+
msg = e.msg
|
| 32 |
+
|
| 33 |
+
result_queue.put((success, msg))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Similar to cuInit_raising_test above, but for testing that the string
|
| 37 |
+
# returned by cuda_error() is as expected.
|
| 38 |
+
def initialization_error_test(result_queue):
|
| 39 |
+
driver.cuInit = cuInit_raising
|
| 40 |
+
|
| 41 |
+
success = False
|
| 42 |
+
msg = None
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
# A CUDA operation that forces initialization of the device
|
| 46 |
+
cuda.device_array(1)
|
| 47 |
+
except CudaSupportError:
|
| 48 |
+
success = True
|
| 49 |
+
|
| 50 |
+
msg = cuda.cuda_error()
|
| 51 |
+
result_queue.put((success, msg))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# For testing the path where Driver.__init__() catches a CudaSupportError
|
| 55 |
+
def cuda_disabled_test(result_queue):
|
| 56 |
+
success = False
|
| 57 |
+
msg = None
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
# A CUDA operation that forces initialization of the device
|
| 61 |
+
cuda.device_array(1)
|
| 62 |
+
except CudaSupportError as e:
|
| 63 |
+
success = True
|
| 64 |
+
msg = e.msg
|
| 65 |
+
|
| 66 |
+
result_queue.put((success, msg))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# Similar to cuda_disabled_test, but checks cuda.cuda_error() instead of the
|
| 70 |
+
# exception raised on initialization
|
| 71 |
+
def cuda_disabled_error_test(result_queue):
|
| 72 |
+
success = False
|
| 73 |
+
msg = None
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
# A CUDA operation that forces initialization of the device
|
| 77 |
+
cuda.device_array(1)
|
| 78 |
+
except CudaSupportError:
|
| 79 |
+
success = True
|
| 80 |
+
|
| 81 |
+
msg = cuda.cuda_error()
|
| 82 |
+
result_queue.put((success, msg))
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@skip_on_cudasim('CUDA Simulator does not initialize driver')
|
| 86 |
+
class TestInit(CUDATestCase):
|
| 87 |
+
def _test_init_failure(self, target, expected):
|
| 88 |
+
# Run the initialization failure test in a separate subprocess
|
| 89 |
+
ctx = mp.get_context('spawn')
|
| 90 |
+
result_queue = ctx.Queue()
|
| 91 |
+
proc = ctx.Process(target=target, args=(result_queue,))
|
| 92 |
+
proc.start()
|
| 93 |
+
proc.join(30) # should complete within 30s
|
| 94 |
+
success, msg = result_queue.get()
|
| 95 |
+
|
| 96 |
+
# Ensure the child process raised an exception during initialization
|
| 97 |
+
# before checking the message
|
| 98 |
+
if not success:
|
| 99 |
+
self.fail('CudaSupportError not raised')
|
| 100 |
+
|
| 101 |
+
self.assertIn(expected, msg)
|
| 102 |
+
|
| 103 |
+
def test_init_failure_raising(self):
|
| 104 |
+
expected = 'Error at driver init: CUDA_ERROR_UNKNOWN (999)'
|
| 105 |
+
self._test_init_failure(cuInit_raising_test, expected)
|
| 106 |
+
|
| 107 |
+
def test_init_failure_error(self):
|
| 108 |
+
expected = 'CUDA_ERROR_UNKNOWN (999)'
|
| 109 |
+
self._test_init_failure(initialization_error_test, expected)
|
| 110 |
+
|
| 111 |
+
def _test_cuda_disabled(self, target):
|
| 112 |
+
# Uses _test_init_failure to launch the test in a separate subprocess
|
| 113 |
+
# with CUDA disabled.
|
| 114 |
+
cuda_disabled = os.environ.get('NUMBA_DISABLE_CUDA')
|
| 115 |
+
os.environ['NUMBA_DISABLE_CUDA'] = "1"
|
| 116 |
+
try:
|
| 117 |
+
expected = 'CUDA is disabled due to setting NUMBA_DISABLE_CUDA=1'
|
| 118 |
+
self._test_init_failure(cuda_disabled_test, expected)
|
| 119 |
+
finally:
|
| 120 |
+
if cuda_disabled is not None:
|
| 121 |
+
os.environ['NUMBA_DISABLE_CUDA'] = cuda_disabled
|
| 122 |
+
else:
|
| 123 |
+
os.environ.pop('NUMBA_DISABLE_CUDA')
|
| 124 |
+
|
| 125 |
+
def test_cuda_disabled_raising(self):
|
| 126 |
+
self._test_cuda_disabled(cuda_disabled_test)
|
| 127 |
+
|
| 128 |
+
def test_cuda_disabled_error(self):
|
| 129 |
+
self._test_cuda_disabled(cuda_disabled_error_test)
|
| 130 |
+
|
| 131 |
+
def test_init_success(self):
|
| 132 |
+
# Here we assume that initialization is successful (because many bad
|
| 133 |
+
# things will happen with the test suite if it is not) and check that
|
| 134 |
+
# there is no error recorded.
|
| 135 |
+
self.assertIsNone(cuda.cuda_error())
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == '__main__':
|
| 139 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_inline_ptx.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llvmlite import ir
|
| 2 |
+
|
| 3 |
+
from numba.cuda.cudadrv import nvvm
|
| 4 |
+
from numba.cuda.testing import unittest, ContextResettingTestCase
|
| 5 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@skip_on_cudasim('Inline PTX cannot be used in the simulator')
|
| 9 |
+
class TestCudaInlineAsm(ContextResettingTestCase):
|
| 10 |
+
def test_inline_rsqrt(self):
|
| 11 |
+
mod = ir.Module(__name__)
|
| 12 |
+
mod.triple = 'nvptx64-nvidia-cuda'
|
| 13 |
+
nvvm.add_ir_version(mod)
|
| 14 |
+
fnty = ir.FunctionType(ir.VoidType(), [ir.PointerType(ir.FloatType())])
|
| 15 |
+
fn = ir.Function(mod, fnty, 'cu_rsqrt')
|
| 16 |
+
bldr = ir.IRBuilder(fn.append_basic_block('entry'))
|
| 17 |
+
|
| 18 |
+
rsqrt_approx_fnty = ir.FunctionType(ir.FloatType(), [ir.FloatType()])
|
| 19 |
+
inlineasm = ir.InlineAsm(rsqrt_approx_fnty,
|
| 20 |
+
'rsqrt.approx.f32 $0, $1;',
|
| 21 |
+
'=f,f', side_effect=True)
|
| 22 |
+
val = bldr.load(fn.args[0])
|
| 23 |
+
res = bldr.call(inlineasm, [val])
|
| 24 |
+
|
| 25 |
+
bldr.store(res, fn.args[0])
|
| 26 |
+
bldr.ret_void()
|
| 27 |
+
|
| 28 |
+
# generate ptx
|
| 29 |
+
mod.data_layout = nvvm.NVVM().data_layout
|
| 30 |
+
nvvm.set_cuda_kernel(fn)
|
| 31 |
+
nvvmir = str(mod)
|
| 32 |
+
ptx = nvvm.compile_ir(nvvmir)
|
| 33 |
+
self.assertTrue('rsqrt.approx.f32' in str(ptx))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == '__main__':
|
| 37 |
+
unittest.main()
|