diff --git a/.gitattributes b/.gitattributes index c2ebc938e789f24ab3a27f829b3b04b56e59e7f5..997937d8997007229dcd494ab9c724a5bb866392 100644 --- a/.gitattributes +++ b/.gitattributes @@ -94,3 +94,4 @@ lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so fil lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a525a0eb029d1c450105758f419e101bb7eeb946 --- /dev/null +++ b/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1808806ad22b1e8cca102941bf36917bcb2ebb81801a051088e7c0dd2577a31e +size 720649 diff --git a/lib/python3.10/site-packages/numba/cpython/cmathimpl.py b/lib/python3.10/site-packages/numba/cpython/cmathimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..4305e9d45dde68b5f32b56e462bd5b064bfaed18 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/cmathimpl.py @@ -0,0 +1,542 @@ +""" +Implement the cmath module functions. +""" + + +import cmath +import math + +from numba.core.imputils import Registry, impl_ret_untracked +from numba.core import types, cgutils +from numba.core.typing import signature +from numba.cpython import builtins, mathimpl +from numba.core.extending import overload + +registry = Registry('cmathimpl') +lower = registry.lower + + +def is_nan(builder, z): + return builder.fcmp_unordered('uno', z.real, z.imag) + +def is_inf(builder, z): + return builder.or_(mathimpl.is_inf(builder, z.real), + mathimpl.is_inf(builder, z.imag)) + +def is_finite(builder, z): + return builder.and_(mathimpl.is_finite(builder, z.real), + mathimpl.is_finite(builder, z.imag)) + + +@lower(cmath.isnan, types.Complex) +def isnan_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_nan(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + +@lower(cmath.isinf, types.Complex) +def isinf_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_inf(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower(cmath.isfinite, types.Complex) +def isfinite_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_finite(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@overload(cmath.rect) +def impl_cmath_rect(r, phi): + if all([isinstance(typ, types.Float) for typ in [r, phi]]): + def impl(r, phi): + if not math.isfinite(phi): + if not r: + # cmath.rect(0, phi={inf, nan}) = 0 + return abs(r) + if math.isinf(r): + # cmath.rect(inf, phi={inf, nan}) = inf + j phi + return complex(r, phi) + real = math.cos(phi) + imag = math.sin(phi) + if real == 0. and math.isinf(r): + # 0 * inf would return NaN, we want to keep 0 but xor the sign + real /= r + else: + real *= r + if imag == 0. and math.isinf(r): + # ditto + imag /= r + else: + imag *= r + return complex(real, imag) + return impl + + +def intrinsic_complex_unary(inner_func): + def wrapper(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + x = z.real + y = z.imag + # Same as above: math.isfinite() is unavailable on 2.x so we precompute + # its value and pass it to the pure Python implementation. + x_is_finite = mathimpl.is_finite(builder, x) + y_is_finite = mathimpl.is_finite(builder, y) + inner_sig = signature(sig.return_type, + *(typ.underlying_float,) * 2 + (types.boolean,) * 2) + res = context.compile_internal(builder, inner_func, inner_sig, + (x, y, x_is_finite, y_is_finite)) + return impl_ret_untracked(context, builder, sig, res) + return wrapper + + +NAN = float('nan') +INF = float('inf') + +@lower(cmath.exp, types.Complex) +@intrinsic_complex_unary +def exp_impl(x, y, x_is_finite, y_is_finite): + """cmath.exp(x + y j)""" + if x_is_finite: + if y_is_finite: + c = math.cos(y) + s = math.sin(y) + r = math.exp(x) + return complex(r * c, r * s) + else: + return complex(NAN, NAN) + elif math.isnan(x): + if y: + return complex(x, x) # nan + j nan + else: + return complex(x, y) # nan + 0j + elif x > 0.0: + # x == +inf + if y_is_finite: + real = math.cos(y) + imag = math.sin(y) + # Avoid NaNs if math.cos(y) or math.sin(y) == 0 + # (e.g. cmath.exp(inf + 0j) == inf + 0j) + if real != 0: + real *= x + if imag != 0: + imag *= x + return complex(real, imag) + else: + return complex(x, NAN) + else: + # x == -inf + if y_is_finite: + r = math.exp(x) + c = math.cos(y) + s = math.sin(y) + return complex(r * c, r * s) + else: + r = 0 + return complex(r, r) + +@lower(cmath.log, types.Complex) +@intrinsic_complex_unary +def log_impl(x, y, x_is_finite, y_is_finite): + """cmath.log(x + y j)""" + a = math.log(math.hypot(x, y)) + b = math.atan2(y, x) + return complex(a, b) + + +@lower(cmath.log, types.Complex, types.Complex) +def log_base_impl(context, builder, sig, args): + """cmath.log(z, base)""" + [z, base] = args + + def log_base(z, base): + return cmath.log(z) / cmath.log(base) + + res = context.compile_internal(builder, log_base, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +@overload(cmath.log10) +def impl_cmath_log10(z): + if not isinstance(z, types.Complex): + return + + LN_10 = 2.302585092994045684 + + def log10_impl(z): + """cmath.log10(z)""" + z = cmath.log(z) + # This formula gives better results on +/-inf than cmath.log(z, 10) + # See http://bugs.python.org/issue22544 + return complex(z.real / LN_10, z.imag / LN_10) + + return log10_impl + + +@overload(cmath.phase) +def phase_impl(x): + """cmath.phase(x + y j)""" + + if not isinstance(x, types.Complex): + return + + def impl(x): + return math.atan2(x.imag, x.real) + return impl + + +@overload(cmath.polar) +def polar_impl(x): + if not isinstance(x, types.Complex): + return + + def impl(x): + r, i = x.real, x.imag + return math.hypot(r, i), math.atan2(i, r) + return impl + + +@lower(cmath.sqrt, types.Complex) +def sqrt_impl(context, builder, sig, args): + # We risk spurious overflow for components >= FLT_MAX / (1 + sqrt(2)). + + SQRT2 = 1.414213562373095048801688724209698079E0 + ONE_PLUS_SQRT2 = (1. + SQRT2) + theargflt = sig.args[0].underlying_float + # Get a type specific maximum value so scaling for overflow is based on that + MAX = mathimpl.DBL_MAX if theargflt.bitwidth == 64 else mathimpl.FLT_MAX + # THRES will be double precision, should not impact typing as it's just + # used for comparison, there *may* be a few values near THRES which + # deviate from e.g. NumPy due to rounding that occurs in the computation + # of this value in the case of a 32bit argument. + THRES = MAX / ONE_PLUS_SQRT2 + + def sqrt_impl(z): + """cmath.sqrt(z)""" + # This is NumPy's algorithm, see npy_csqrt() in npy_math_complex.c.src + a = z.real + b = z.imag + if a == 0.0 and b == 0.0: + return complex(abs(b), b) + if math.isinf(b): + return complex(abs(b), b) + if math.isnan(a): + return complex(a, a) + if math.isinf(a): + if a < 0.0: + return complex(abs(b - b), math.copysign(a, b)) + else: + return complex(a, math.copysign(b - b, b)) + + # The remaining special case (b is NaN) is handled just fine by + # the normal code path below. + + # Scale to avoid overflow + if abs(a) >= THRES or abs(b) >= THRES: + a *= 0.25 + b *= 0.25 + scale = True + else: + scale = False + # Algorithm 312, CACM vol 10, Oct 1967 + if a >= 0: + t = math.sqrt((a + math.hypot(a, b)) * 0.5) + real = t + imag = b / (2 * t) + else: + t = math.sqrt((-a + math.hypot(a, b)) * 0.5) + real = abs(b) / (2 * t) + imag = math.copysign(t, b) + # Rescale + if scale: + return complex(real * 2, imag) + else: + return complex(real, imag) + + res = context.compile_internal(builder, sqrt_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +@lower(cmath.cos, types.Complex) +def cos_impl(context, builder, sig, args): + def cos_impl(z): + """cmath.cos(z) = cmath.cosh(z j)""" + return cmath.cosh(complex(-z.imag, z.real)) + + res = context.compile_internal(builder, cos_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@overload(cmath.cosh) +def impl_cmath_cosh(z): + if not isinstance(z, types.Complex): + return + + def cosh_impl(z): + """cmath.cosh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + if math.isnan(y): + # x = +inf, y = NaN => cmath.cosh(x + y j) = inf + Nan * j + real = abs(x) + imag = y + elif y == 0.0: + # x = +inf, y = 0 => cmath.cosh(x + y j) = inf + 0j + real = abs(x) + imag = y + else: + real = math.copysign(x, math.cos(y)) + imag = math.copysign(x, math.sin(y)) + if x < 0.0: + # x = -inf => negate imaginary part of result + imag = -imag + return complex(real, imag) + return complex(math.cos(y) * math.cosh(x), + math.sin(y) * math.sinh(x)) + return cosh_impl + + +@lower(cmath.sin, types.Complex) +def sin_impl(context, builder, sig, args): + def sin_impl(z): + """cmath.sin(z) = -j * cmath.sinh(z j)""" + r = cmath.sinh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, sin_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@overload(cmath.sinh) +def impl_cmath_sinh(z): + if not isinstance(z, types.Complex): + return + + def sinh_impl(z): + """cmath.sinh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + if math.isnan(y): + # x = +/-inf, y = NaN => cmath.sinh(x + y j) = x + NaN * j + real = x + imag = y + else: + real = math.cos(y) + imag = math.sin(y) + if real != 0.: + real *= x + if imag != 0.: + imag *= abs(x) + return complex(real, imag) + return complex(math.cos(y) * math.sinh(x), + math.sin(y) * math.cosh(x)) + return sinh_impl + + +@lower(cmath.tan, types.Complex) +def tan_impl(context, builder, sig, args): + def tan_impl(z): + """cmath.tan(z) = -j * cmath.tanh(z j)""" + r = cmath.tanh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, tan_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +@overload(cmath.tanh) +def impl_cmath_tanh(z): + if not isinstance(z, types.Complex): + return + + def tanh_impl(z): + """cmath.tanh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + real = math.copysign(1., x) + if math.isinf(y): + imag = 0. + else: + imag = math.copysign(0., math.sin(2. * y)) + return complex(real, imag) + # This is CPython's algorithm (see c_tanh() in cmathmodule.c). + # XXX how to force float constants into single precision? + tx = math.tanh(x) + ty = math.tan(y) + cx = 1. / math.cosh(x) + txty = tx * ty + denom = 1. + txty * txty + return complex( + tx * (1. + ty * ty) / denom, + ((ty / denom) * cx) * cx) + + return tanh_impl + + +@lower(cmath.acos, types.Complex) +def acos_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def acos_impl(z): + """cmath.acos(z)""" + # CPython's algorithm (see c_acos() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + # Avoid unnecessary overflow for large arguments + # (also handles infinities gracefully) + real = math.atan2(abs(z.imag), z.real) + imag = math.copysign( + math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4, + -z.imag) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(1. - z.real, -z.imag)) + s2 = cmath.sqrt(complex(1. + z.real, z.imag)) + real = 2. * math.atan2(s1.real, s2.real) + imag = math.asinh(s2.real * s1.imag - s2.imag * s1.real) + return complex(real, imag) + + res = context.compile_internal(builder, acos_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@overload(cmath.acosh) +def impl_cmath_acosh(z): + if not isinstance(z, types.Complex): + return + + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def acosh_impl(z): + """cmath.acosh(z)""" + # CPython's algorithm (see c_acosh() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + # Avoid unnecessary overflow for large arguments + # (also handles infinities gracefully) + real = math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4 + imag = math.atan2(z.imag, z.real) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(z.real - 1., z.imag)) + s2 = cmath.sqrt(complex(z.real + 1., z.imag)) + real = math.asinh(s1.real * s2.real + s1.imag * s2.imag) + imag = 2. * math.atan2(s1.imag, s2.real) + return complex(real, imag) + # Condensed formula (NumPy) + #return cmath.log(z + cmath.sqrt(z + 1.) * cmath.sqrt(z - 1.)) + + return acosh_impl + + +@lower(cmath.asinh, types.Complex) +def asinh_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def asinh_impl(z): + """cmath.asinh(z)""" + # CPython's algorithm (see c_asinh() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + real = math.copysign( + math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4, + z.real) + imag = math.atan2(z.imag, abs(z.real)) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(1. + z.imag, -z.real)) + s2 = cmath.sqrt(complex(1. - z.imag, z.real)) + real = math.asinh(s1.real * s2.imag - s2.real * s1.imag) + imag = math.atan2(z.imag, s1.real * s2.real - s1.imag * s2.imag) + return complex(real, imag) + + res = context.compile_internal(builder, asinh_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@lower(cmath.asin, types.Complex) +def asin_impl(context, builder, sig, args): + def asin_impl(z): + """cmath.asin(z) = -j * cmath.asinh(z j)""" + r = cmath.asinh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, asin_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@lower(cmath.atan, types.Complex) +def atan_impl(context, builder, sig, args): + def atan_impl(z): + """cmath.atan(z) = -j * cmath.atanh(z j)""" + r = cmath.atanh(complex(-z.imag, z.real)) + if math.isinf(z.real) and math.isnan(z.imag): + # XXX this is odd but necessary + return complex(r.imag, r.real) + else: + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, atan_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +@lower(cmath.atanh, types.Complex) +def atanh_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES_LARGE = math.sqrt(mathimpl.FLT_MAX / 4) + THRES_SMALL = math.sqrt(mathimpl.FLT_MIN) + PI_12 = math.pi / 2 + + def atanh_impl(z): + """cmath.atanh(z)""" + # CPython's algorithm (see c_atanh() in cmathmodule.c) + if z.real < 0.: + # Reduce to case where z.real >= 0., using atanh(z) = -atanh(-z). + negate = True + z = -z + else: + negate = False + + ay = abs(z.imag) + if math.isnan(z.real) or z.real > THRES_LARGE or ay > THRES_LARGE: + if math.isinf(z.imag): + real = math.copysign(0., z.real) + elif math.isinf(z.real): + real = 0. + else: + # may be safe from overflow, depending on hypot's implementation... + h = math.hypot(z.real * 0.5, z.imag * 0.5) + real = z.real/4./h/h + imag = -math.copysign(PI_12, -z.imag) + elif z.real == 1. and ay < THRES_SMALL: + # C99 standard says: atanh(1+/-0.) should be inf +/- 0j + if ay == 0.: + real = INF + imag = z.imag + else: + real = -math.log(math.sqrt(ay) / + math.sqrt(math.hypot(ay, 2.))) + imag = math.copysign(math.atan2(2., -ay) / 2, z.imag) + else: + sqay = ay * ay + zr1 = 1 - z.real + real = math.log1p(4. * z.real / (zr1 * zr1 + sqay)) * 0.25 + imag = -math.atan2(-2. * z.imag, + zr1 * (1 + z.real) - sqay) * 0.5 + + if math.isnan(z.imag): + imag = NAN + if negate: + return complex(-real, -imag) + else: + return complex(real, imag) + + res = context.compile_internal(builder, atanh_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) diff --git a/lib/python3.10/site-packages/numba/cpython/hashing.py b/lib/python3.10/site-packages/numba/cpython/hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..410f9be325d778887d340da9c016cd9eb9174980 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/hashing.py @@ -0,0 +1,10 @@ +import sys +from numba.core.utils import _RedirectSubpackage +from numba.core import config + +if config.USE_LEGACY_TYPE_SYSTEM: + sys.modules[__name__] = _RedirectSubpackage(locals(), + "numba.cpython.old_hashing") +else: + sys.modules[__name__] = _RedirectSubpackage(locals(), + "numba.cpython.new_hashing") diff --git a/lib/python3.10/site-packages/numba/cpython/listobj.py b/lib/python3.10/site-packages/numba/cpython/listobj.py new file mode 100644 index 0000000000000000000000000000000000000000..08c23dea4835862d38ed2c9f11812d7764006f96 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/listobj.py @@ -0,0 +1,1260 @@ +""" +Support for native homogeneous lists. +""" + + +import math +import operator +from functools import cached_property + +from llvmlite import ir +from numba.core import types, typing, errors, cgutils, config +from numba.core.imputils import (lower_builtin, lower_cast, + iternext_impl, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked, + RefType) +from numba.core.extending import overload_method, overload +from numba.misc import quicksort +from numba.cpython import slicing +from numba import literal_unroll + + +def get_list_payload(context, builder, list_type, value): + """ + Given a list value and type, get its payload structure (as a + reference, so that mutations are seen by all). + """ + payload_type = types.ListPayload(list_type) + payload = context.nrt.meminfo_data(builder, value.meminfo) + ptrty = context.get_data_type(payload_type).as_pointer() + payload = builder.bitcast(payload, ptrty) + return context.make_data_helper(builder, payload_type, ref=payload) + + +def get_itemsize(context, list_type): + """ + Return the item size for the given list type. + """ + llty = context.get_data_type(list_type.dtype) + return context.get_abi_sizeof(llty) + + +class _ListPayloadMixin(object): + + @property + def size(self): + return self._payload.size + + @size.setter + def size(self, value): + self._payload.size = value + + @property + def dirty(self): + return self._payload.dirty + + @property + def data(self): + return self._payload._get_ptr_by_name('data') + + def _gep(self, idx): + return cgutils.gep(self._builder, self.data, idx) + + def getitem(self, idx): + ptr = self._gep(idx) + data_item = self._builder.load(ptr) + return self._datamodel.from_data(self._builder, data_item) + + def fix_index(self, idx): + """ + Fix negative indices by adding the size to them. Positive + indices are left untouched. + """ + is_negative = self._builder.icmp_signed('<', idx, + ir.Constant(idx.type, 0)) + wrapped_index = self._builder.add(idx, self.size) + return self._builder.select(is_negative, wrapped_index, idx) + + def is_out_of_bounds(self, idx): + """ + Return whether the index is out of bounds. + """ + underflow = self._builder.icmp_signed('<', idx, + ir.Constant(idx.type, 0)) + overflow = self._builder.icmp_signed('>=', idx, self.size) + return self._builder.or_(underflow, overflow) + + def clamp_index(self, idx): + """ + Clamp the index in [0, size]. + """ + builder = self._builder + idxptr = cgutils.alloca_once_value(builder, idx) + + zero = ir.Constant(idx.type, 0) + size = self.size + + underflow = self._builder.icmp_signed('<', idx, zero) + with builder.if_then(underflow, likely=False): + builder.store(zero, idxptr) + overflow = self._builder.icmp_signed('>=', idx, size) + with builder.if_then(overflow, likely=False): + builder.store(size, idxptr) + + return builder.load(idxptr) + + def guard_index(self, idx, msg): + """ + Raise an error if the index is out of bounds. + """ + with self._builder.if_then(self.is_out_of_bounds(idx), likely=False): + self._context.call_conv.return_user_exc(self._builder, + IndexError, (msg,)) + + def fix_slice(self, slice): + """ + Fix slice start and stop to be valid (inclusive and exclusive, resp) + indexing bounds. + """ + return slicing.fix_slice(self._builder, slice, self.size) + + def incref_value(self, val): + "Incref an element value" + self._context.nrt.incref(self._builder, self.dtype, val) + + def decref_value(self, val): + "Decref an element value" + self._context.nrt.decref(self._builder, self.dtype, val) + + +class ListPayloadAccessor(_ListPayloadMixin): + """ + A helper object to access the list attributes given the pointer to the + payload type. + """ + def __init__(self, context, builder, list_type, payload_ptr): + self._context = context + self._builder = builder + self._ty = list_type + self._datamodel = context.data_model_manager[list_type.dtype] + payload_type = types.ListPayload(list_type) + ptrty = context.get_data_type(payload_type).as_pointer() + payload_ptr = builder.bitcast(payload_ptr, ptrty) + payload = context.make_data_helper(builder, payload_type, + ref=payload_ptr) + self._payload = payload + + +class ListInstance(_ListPayloadMixin): + + def __init__(self, context, builder, list_type, list_val): + self._context = context + self._builder = builder + self._ty = list_type + self._list = context.make_helper(builder, list_type, list_val) + self._itemsize = get_itemsize(context, list_type) + self._datamodel = context.data_model_manager[list_type.dtype] + + @property + def dtype(self): + return self._ty.dtype + + @property + def _payload(self): + # This cannot be cached as it can be reallocated + return get_list_payload(self._context, self._builder, self._ty, self._list) + + @property + def parent(self): + return self._list.parent + + @parent.setter + def parent(self, value): + self._list.parent = value + + @property + def value(self): + return self._list._getvalue() + + @property + def meminfo(self): + return self._list.meminfo + + def set_dirty(self, val): + if self._ty.reflected: + self._payload.dirty = cgutils.true_bit if val else cgutils.false_bit + + def clear_value(self, idx): + """Remove the value at the location + """ + self.decref_value(self.getitem(idx)) + # it's necessary for the dtor which just decref every slot on it. + self.zfill(idx, self._builder.add(idx, idx.type(1))) + + def setitem(self, idx, val, incref, decref_old_value=True): + # Decref old data + if decref_old_value: + self.decref_value(self.getitem(idx)) + + ptr = self._gep(idx) + data_item = self._datamodel.as_data(self._builder, val) + self._builder.store(data_item, ptr) + self.set_dirty(True) + if incref: + # Incref the underlying data + self.incref_value(val) + + def inititem(self, idx, val, incref=True): + ptr = self._gep(idx) + data_item = self._datamodel.as_data(self._builder, val) + self._builder.store(data_item, ptr) + if incref: + self.incref_value(val) + + def zfill(self, start, stop): + """Zero-fill the memory at index *start* to *stop* + + *stop* MUST not be smaller than *start*. + """ + builder = self._builder + base = self._gep(start) + end = self._gep(stop) + intaddr_t = self._context.get_value_type(types.intp) + size = builder.sub(builder.ptrtoint(end, intaddr_t), + builder.ptrtoint(base, intaddr_t)) + cgutils.memset(builder, base, size, ir.IntType(8)(0)) + + @classmethod + def allocate_ex(cls, context, builder, list_type, nitems): + """ + Allocate a ListInstance with its storage. + Return a (ok, instance) tuple where *ok* is a LLVM boolean and + *instance* is a ListInstance object (the object's contents are + only valid when *ok* is true). + """ + intp_t = context.get_value_type(types.intp) + + if isinstance(nitems, int): + nitems = ir.Constant(intp_t, nitems) + + payload_type = context.get_data_type(types.ListPayload(list_type)) + payload_size = context.get_abi_sizeof(payload_type) + + itemsize = get_itemsize(context, list_type) + # Account for the fact that the payload struct contains one entry + payload_size -= itemsize + + ok = cgutils.alloca_once_value(builder, cgutils.true_bit) + self = cls(context, builder, list_type, None) + + # Total allocation size = + nitems * itemsize + allocsize, ovf = cgutils.muladd_with_overflow(builder, nitems, + ir.Constant(intp_t, itemsize), + ir.Constant(intp_t, payload_size)) + with builder.if_then(ovf, likely=False): + builder.store(cgutils.false_bit, ok) + + with builder.if_then(builder.load(ok), likely=True): + meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked( + builder, size=allocsize, dtor=self.get_dtor()) + with builder.if_else(cgutils.is_null(builder, meminfo), + likely=False) as (if_error, if_ok): + with if_error: + builder.store(cgutils.false_bit, ok) + with if_ok: + self._list.meminfo = meminfo + self._list.parent = context.get_constant_null(types.pyobject) + self._payload.allocated = nitems + self._payload.size = ir.Constant(intp_t, 0) # for safety + self._payload.dirty = cgutils.false_bit + # Zero the allocated region + self.zfill(self.size.type(0), nitems) + + return builder.load(ok), self + + def define_dtor(self): + "Define the destructor if not already defined" + context = self._context + builder = self._builder + mod = builder.module + # Declare dtor + fnty = ir.FunctionType(ir.VoidType(), [cgutils.voidptr_t]) + fn = cgutils.get_or_insert_function(mod, fnty, + '.dtor.list.{}'.format(self.dtype)) + if not fn.is_declaration: + # End early if the dtor is already defined + return fn + fn.linkage = 'linkonce_odr' + # Populate the dtor + builder = ir.IRBuilder(fn.append_basic_block()) + base_ptr = fn.args[0] # void* + + # get payload + payload = ListPayloadAccessor(context, builder, self._ty, base_ptr) + + # Loop over all data to decref + intp = payload.size.type + with cgutils.for_range_slice( + builder, start=intp(0), stop=payload.size, step=intp(1), + intp=intp) as (idx, _): + val = payload.getitem(idx) + context.nrt.decref(builder, self.dtype, val) + builder.ret_void() + return fn + + def get_dtor(self): + """"Get the element dtor function pointer as void pointer. + + It's safe to be called multiple times. + """ + # Define and set the Dtor + dtor = self.define_dtor() + dtor_fnptr = self._builder.bitcast(dtor, cgutils.voidptr_t) + return dtor_fnptr + + @classmethod + def allocate(cls, context, builder, list_type, nitems): + """ + Allocate a ListInstance with its storage. Same as allocate_ex(), + but return an initialized *instance*. If allocation failed, + control is transferred to the caller using the target's current + call convention. + """ + ok, self = cls.allocate_ex(context, builder, list_type, nitems) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot allocate list",)) + return self + + @classmethod + def from_meminfo(cls, context, builder, list_type, meminfo): + """ + Allocate a new list instance pointing to an existing payload + (a meminfo pointer). + Note the parent field has to be filled by the caller. + """ + self = cls(context, builder, list_type, None) + self._list.meminfo = meminfo + self._list.parent = context.get_constant_null(types.pyobject) + context.nrt.incref(builder, list_type, self.value) + # Payload is part of the meminfo, no need to touch it + return self + + def resize(self, new_size): + """ + Ensure the list is properly sized for the new size. + """ + def _payload_realloc(new_allocated): + payload_type = context.get_data_type(types.ListPayload(self._ty)) + payload_size = context.get_abi_sizeof(payload_type) + # Account for the fact that the payload struct contains one entry + payload_size -= itemsize + + allocsize, ovf = cgutils.muladd_with_overflow( + builder, new_allocated, + ir.Constant(intp_t, itemsize), + ir.Constant(intp_t, payload_size)) + with builder.if_then(ovf, likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot resize list",)) + + ptr = context.nrt.meminfo_varsize_realloc_unchecked(builder, + self._list.meminfo, + size=allocsize) + cgutils.guard_memory_error(context, builder, ptr, + "cannot resize list") + self._payload.allocated = new_allocated + + context = self._context + builder = self._builder + intp_t = new_size.type + + itemsize = get_itemsize(context, self._ty) + allocated = self._payload.allocated + + two = ir.Constant(intp_t, 2) + eight = ir.Constant(intp_t, 8) + + # allocated < new_size + is_too_small = builder.icmp_signed('<', allocated, new_size) + # (allocated >> 2) > new_size + is_too_large = builder.icmp_signed('>', builder.ashr(allocated, two), new_size) + + with builder.if_then(is_too_large, likely=False): + # Exact downsize to requested size + # NOTE: is_too_large must be aggressive enough to avoid repeated + # upsizes and downsizes when growing a list. + _payload_realloc(new_size) + + with builder.if_then(is_too_small, likely=False): + # Upsize with moderate over-allocation (size + size >> 2 + 8) + new_allocated = builder.add(eight, + builder.add(new_size, + builder.ashr(new_size, two))) + _payload_realloc(new_allocated) + self.zfill(self.size, new_allocated) + + self._payload.size = new_size + self.set_dirty(True) + + def move(self, dest_idx, src_idx, count): + """ + Move `count` elements from `src_idx` to `dest_idx`. + """ + dest_ptr = self._gep(dest_idx) + src_ptr = self._gep(src_idx) + cgutils.raw_memmove(self._builder, dest_ptr, src_ptr, + count, itemsize=self._itemsize) + + self.set_dirty(True) + +class ListIterInstance(_ListPayloadMixin): + + def __init__(self, context, builder, iter_type, iter_val): + self._context = context + self._builder = builder + self._ty = iter_type + self._iter = context.make_helper(builder, iter_type, iter_val) + self._datamodel = context.data_model_manager[iter_type.yield_type] + + @classmethod + def from_list(cls, context, builder, iter_type, list_val): + list_inst = ListInstance(context, builder, iter_type.container, list_val) + self = cls(context, builder, iter_type, None) + index = context.get_constant(types.intp, 0) + self._iter.index = cgutils.alloca_once_value(builder, index) + self._iter.meminfo = list_inst.meminfo + return self + + @property + def _payload(self): + # This cannot be cached as it can be reallocated + return get_list_payload(self._context, self._builder, + self._ty.container, self._iter) + + @property + def value(self): + return self._iter._getvalue() + + @property + def index(self): + return self._builder.load(self._iter.index) + + @index.setter + def index(self, value): + self._builder.store(value, self._iter.index) + + +#------------------------------------------------------------------------------- +# Constructors + +def build_list(context, builder, list_type, items): + """ + Build a list of the given type, containing the given items. + """ + nitems = len(items) + inst = ListInstance.allocate(context, builder, list_type, nitems) + # Populate list + inst.size = context.get_constant(types.intp, nitems) + for i, val in enumerate(items): + inst.setitem(context.get_constant(types.intp, i), val, incref=True) + + return impl_ret_new_ref(context, builder, list_type, inst.value) + + +@lower_builtin(list, types.IterableType) +def list_constructor(context, builder, sig, args): + + def list_impl(iterable): + res = [] + res.extend(iterable) + return res + + return context.compile_internal(builder, list_impl, sig, args) + +@lower_builtin(list) +def list_constructor(context, builder, sig, args): + list_type = sig.return_type + list_len = 0 + inst = ListInstance.allocate(context, builder, list_type, list_len) + return impl_ret_new_ref(context, builder, list_type, inst.value) + +#------------------------------------------------------------------------------- +# Various operations + +@lower_builtin(len, types.List) +def list_len(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + return inst.size + +@lower_builtin('getiter', types.List) +def getiter_list(context, builder, sig, args): + inst = ListIterInstance.from_list(context, builder, sig.return_type, args[0]) + return impl_ret_borrowed(context, builder, sig.return_type, inst.value) + +@lower_builtin('iternext', types.ListIter) +@iternext_impl(RefType.BORROWED) +def iternext_listiter(context, builder, sig, args, result): + inst = ListIterInstance(context, builder, sig.args[0], args[0]) + + index = inst.index + nitems = inst.size + is_valid = builder.icmp_signed('<', index, nitems) + result.set_valid(is_valid) + + with builder.if_then(is_valid): + result.yield_(inst.getitem(index)) + inst.index = builder.add(index, context.get_constant(types.intp, 1)) + + +@lower_builtin(operator.getitem, types.List, types.Integer) +def getitem_list(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + index = args[1] + + index = inst.fix_index(index) + inst.guard_index(index, msg="getitem out of range") + result = inst.getitem(index) + + return impl_ret_borrowed(context, builder, sig.return_type, result) + +@lower_builtin(operator.setitem, types.List, types.Integer, types.Any) +def setitem_list(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + index = args[1] + value = args[2] + + index = inst.fix_index(index) + inst.guard_index(index, msg="setitem out of range") + inst.setitem(index, value, incref=True) + return context.get_dummy_value() + + +@lower_builtin(operator.getitem, types.List, types.SliceType) +def getslice_list(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + slice = context.make_helper(builder, sig.args[1], args[1]) + slicing.guard_invalid_slice(context, builder, sig.args[1], slice) + inst.fix_slice(slice) + + # Allocate result and populate it + result_size = slicing.get_slice_length(builder, slice) + result = ListInstance.allocate(context, builder, sig.return_type, + result_size) + result.size = result_size + with cgutils.for_range_slice_generic(builder, slice.start, slice.stop, + slice.step) as (pos_range, neg_range): + with pos_range as (idx, count): + value = inst.getitem(idx) + result.inititem(count, value, incref=True) + with neg_range as (idx, count): + value = inst.getitem(idx) + result.inititem(count, value, incref=True) + + return impl_ret_new_ref(context, builder, sig.return_type, result.value) + +@lower_builtin(operator.setitem, types.List, types.SliceType, types.Any) +def setitem_list(context, builder, sig, args): + dest = ListInstance(context, builder, sig.args[0], args[0]) + src = ListInstance(context, builder, sig.args[2], args[2]) + + slice = context.make_helper(builder, sig.args[1], args[1]) + slicing.guard_invalid_slice(context, builder, sig.args[1], slice) + dest.fix_slice(slice) + + src_size = src.size + avail_size = slicing.get_slice_length(builder, slice) + size_delta = builder.sub(src.size, avail_size) + + zero = ir.Constant(size_delta.type, 0) + one = ir.Constant(size_delta.type, 1) + + with builder.if_else(builder.icmp_signed('==', slice.step, one)) as (then, otherwise): + with then: + # Slice step == 1 => we can resize + + # Compute the real stop, e.g. for dest[2:0] = [...] + real_stop = builder.add(slice.start, avail_size) + # Size of the list tail, after the end of slice + tail_size = builder.sub(dest.size, real_stop) + + with builder.if_then(builder.icmp_signed('>', size_delta, zero)): + # Grow list then move list tail + dest.resize(builder.add(dest.size, size_delta)) + dest.move(builder.add(real_stop, size_delta), real_stop, + tail_size) + + with builder.if_then(builder.icmp_signed('<', size_delta, zero)): + # Move list tail then shrink list + dest.move(builder.add(real_stop, size_delta), real_stop, + tail_size) + dest.resize(builder.add(dest.size, size_delta)) + + dest_offset = slice.start + + with cgutils.for_range(builder, src_size) as loop: + value = src.getitem(loop.index) + dest.setitem(builder.add(loop.index, dest_offset), value, incref=True) + + with otherwise: + with builder.if_then(builder.icmp_signed('!=', size_delta, zero)): + msg = "cannot resize extended list slice with step != 1" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + with cgutils.for_range_slice_generic( + builder, slice.start, slice.stop, slice.step) as (pos_range, neg_range): + with pos_range as (index, count): + value = src.getitem(count) + dest.setitem(index, value, incref=True) + with neg_range as (index, count): + value = src.getitem(count) + dest.setitem(index, value, incref=True) + + return context.get_dummy_value() + + + +@lower_builtin(operator.delitem, types.List, types.Integer) +def delitem_list_index(context, builder, sig, args): + + def list_delitem_impl(lst, i): + lst.pop(i) + + return context.compile_internal(builder, list_delitem_impl, sig, args) + + +@lower_builtin(operator.delitem, types.List, types.SliceType) +def delitem_list(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + slice = context.make_helper(builder, sig.args[1], args[1]) + + slicing.guard_invalid_slice(context, builder, sig.args[1], slice) + inst.fix_slice(slice) + + slice_len = slicing.get_slice_length(builder, slice) + + one = ir.Constant(slice_len.type, 1) + + with builder.if_then(builder.icmp_signed('!=', slice.step, one), likely=False): + msg = "unsupported del list[start:stop:step] with step != 1" + context.call_conv.return_user_exc(builder, NotImplementedError, (msg,)) + + # Compute the real stop, e.g. for dest[2:0] + start = slice.start + real_stop = builder.add(start, slice_len) + # Decref the removed range + with cgutils.for_range_slice( + builder, start, real_stop, start.type(1) + ) as (idx, _): + inst.decref_value(inst.getitem(idx)) + + # Size of the list tail, after the end of slice + tail_size = builder.sub(inst.size, real_stop) + inst.move(start, real_stop, tail_size) + inst.resize(builder.sub(inst.size, slice_len)) + + return context.get_dummy_value() + + +# XXX should there be a specific module for Sequence or collection base classes? + +@lower_builtin(operator.contains, types.Sequence, types.Any) +def in_seq(context, builder, sig, args): + def seq_contains_impl(lst, value): + for elem in lst: + if elem == value: + return True + return False + + return context.compile_internal(builder, seq_contains_impl, sig, args) + +@lower_builtin(bool, types.Sequence) +def sequence_bool(context, builder, sig, args): + def sequence_bool_impl(seq): + return len(seq) != 0 + + return context.compile_internal(builder, sequence_bool_impl, sig, args) + + +@overload(operator.truth) +def sequence_truth(seq): + if isinstance(seq, types.Sequence): + def impl(seq): + return len(seq) != 0 + return impl + + +@lower_builtin(operator.add, types.List, types.List) +def list_add(context, builder, sig, args): + a = ListInstance(context, builder, sig.args[0], args[0]) + b = ListInstance(context, builder, sig.args[1], args[1]) + + a_size = a.size + b_size = b.size + nitems = builder.add(a_size, b_size) + dest = ListInstance.allocate(context, builder, sig.return_type, nitems) + dest.size = nitems + + with cgutils.for_range(builder, a_size) as loop: + value = a.getitem(loop.index) + value = context.cast(builder, value, a.dtype, dest.dtype) + dest.setitem(loop.index, value, incref=True) + with cgutils.for_range(builder, b_size) as loop: + value = b.getitem(loop.index) + value = context.cast(builder, value, b.dtype, dest.dtype) + dest.setitem(builder.add(loop.index, a_size), value, incref=True) + + return impl_ret_new_ref(context, builder, sig.return_type, dest.value) + +@lower_builtin(operator.iadd, types.List, types.List) +def list_add_inplace(context, builder, sig, args): + assert sig.args[0].dtype == sig.return_type.dtype + dest = _list_extend_list(context, builder, sig, args) + + return impl_ret_borrowed(context, builder, sig.return_type, dest.value) + + +@lower_builtin(operator.mul, types.List, types.Integer) +@lower_builtin(operator.mul, types.Integer, types.List) +def list_mul(context, builder, sig, args): + if isinstance(sig.args[0], types.List): + list_idx, int_idx = 0, 1 + else: + list_idx, int_idx = 1, 0 + src = ListInstance(context, builder, sig.args[list_idx], args[list_idx]) + src_size = src.size + + mult = args[int_idx] + zero = ir.Constant(mult.type, 0) + mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult) + nitems = builder.mul(mult, src_size) + + dest = ListInstance.allocate(context, builder, sig.return_type, nitems) + dest.size = nitems + + with cgutils.for_range_slice(builder, zero, nitems, src_size, inc=True) as (dest_offset, _): + with cgutils.for_range(builder, src_size) as loop: + value = src.getitem(loop.index) + dest.setitem(builder.add(loop.index, dest_offset), value, incref=True) + + return impl_ret_new_ref(context, builder, sig.return_type, dest.value) + +@lower_builtin(operator.imul, types.List, types.Integer) +def list_mul_inplace(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + src_size = inst.size + + mult = args[1] + zero = ir.Constant(mult.type, 0) + mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult) + nitems = builder.mul(mult, src_size) + + inst.resize(nitems) + + with cgutils.for_range_slice(builder, src_size, nitems, src_size, inc=True) as (dest_offset, _): + with cgutils.for_range(builder, src_size) as loop: + value = inst.getitem(loop.index) + inst.setitem(builder.add(loop.index, dest_offset), value, incref=True) + + return impl_ret_borrowed(context, builder, sig.return_type, inst.value) + + +#------------------------------------------------------------------------------- +# Comparisons + +@lower_builtin(operator.is_, types.List, types.List) +def list_is(context, builder, sig, args): + a = ListInstance(context, builder, sig.args[0], args[0]) + b = ListInstance(context, builder, sig.args[1], args[1]) + ma = builder.ptrtoint(a.meminfo, cgutils.intp_t) + mb = builder.ptrtoint(b.meminfo, cgutils.intp_t) + return builder.icmp_signed('==', ma, mb) + +@lower_builtin(operator.eq, types.List, types.List) +def list_eq(context, builder, sig, args): + aty, bty = sig.args + a = ListInstance(context, builder, aty, args[0]) + b = ListInstance(context, builder, bty, args[1]) + + a_size = a.size + same_size = builder.icmp_signed('==', a_size, b.size) + + res = cgutils.alloca_once_value(builder, same_size) + + with builder.if_then(same_size): + with cgutils.for_range(builder, a_size) as loop: + v = a.getitem(loop.index) + w = b.getitem(loop.index) + itemres = context.generic_compare(builder, operator.eq, + (aty.dtype, bty.dtype), (v, w)) + with builder.if_then(builder.not_(itemres)): + # Exit early + builder.store(cgutils.false_bit, res) + loop.do_break() + + return builder.load(res) + + +def all_list(*args): + return all([isinstance(typ, types.List) for typ in args]) + +@overload(operator.ne) +def impl_list_ne(a, b): + if not all_list(a, b): + return + + def list_ne_impl(a, b): + return not (a == b) + + return list_ne_impl + +@overload(operator.le) +def impl_list_le(a, b): + if not all_list(a, b): + return + + def list_le_impl(a, b): + m = len(a) + n = len(b) + for i in range(min(m, n)): + if a[i] < b[i]: + return True + elif a[i] > b[i]: + return False + return m <= n + + return list_le_impl + +@overload(operator.lt) +def impl_list_lt(a, b): + if not all_list(a, b): + return + + def list_lt_impl(a, b): + m = len(a) + n = len(b) + for i in range(min(m, n)): + if a[i] < b[i]: + return True + elif a[i] > b[i]: + return False + return m < n + + return list_lt_impl + +@overload(operator.ge) +def impl_list_ge(a, b): + if not all_list(a, b): + return + + def list_ge_impl(a, b): + return b <= a + + return list_ge_impl + +@overload(operator.gt) +def impl_list_gt(a, b): + if not all_list(a, b): + return + + def list_gt_impl(a, b): + return b < a + + return list_gt_impl + +#------------------------------------------------------------------------------- +# Methods + +@lower_builtin("list.append", types.List, types.Any) +def list_append(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + item = args[1] + + n = inst.size + new_size = builder.add(n, ir.Constant(n.type, 1)) + inst.resize(new_size) + inst.setitem(n, item, incref=True) + + return context.get_dummy_value() + +@lower_builtin("list.clear", types.List) +def list_clear(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + inst.resize(context.get_constant(types.intp, 0)) + + return context.get_dummy_value() + + +@overload_method(types.List, "copy") +def list_copy(lst): + def list_copy_impl(lst): + return list(lst) + + return list_copy_impl + + +@overload_method(types.List, "count") +def list_count(lst, value): + + def list_count_impl(lst, value): + res = 0 + for elem in lst: + if elem == value: + res += 1 + return res + + return list_count_impl + + +def _list_extend_list(context, builder, sig, args): + src = ListInstance(context, builder, sig.args[1], args[1]) + dest = ListInstance(context, builder, sig.args[0], args[0]) + + src_size = src.size + dest_size = dest.size + nitems = builder.add(src_size, dest_size) + dest.resize(nitems) + dest.size = nitems + + with cgutils.for_range(builder, src_size) as loop: + value = src.getitem(loop.index) + value = context.cast(builder, value, src.dtype, dest.dtype) + dest.setitem(builder.add(loop.index, dest_size), value, incref=True) + + return dest + +@lower_builtin("list.extend", types.List, types.IterableType) +def list_extend(context, builder, sig, args): + if isinstance(sig.args[1], types.List): + # Specialize for list operands, for speed. + _list_extend_list(context, builder, sig, args) + return context.get_dummy_value() + + def list_extend(lst, iterable): + # Speed hack to avoid NRT refcount operations inside the loop + meth = lst.append + for v in iterable: + meth(v) + + return context.compile_internal(builder, list_extend, sig, args) + + +if config.USE_LEGACY_TYPE_SYSTEM: + intp_max = types.intp.maxval +else: + intp_max = types.py_int.maxval + + +@overload_method(types.List, "index") +def list_index(lst, value, start=0, stop=intp_max): + + if not isinstance(start, (int, types.Integer, types.Omitted)): + raise errors.TypingError(f'arg "start" must be an Integer. Got {start}') + if not isinstance(stop, (int, types.Integer, types.Omitted)): + raise errors.TypingError(f'arg "stop" must be an Integer. Got {stop}') + + def list_index_impl(lst, value, start=0, stop=intp_max): + n = len(lst) + if start < 0: + start += n + if start < 0: + start = 0 + if stop < 0: + stop += n + if stop > n: + stop = n + for i in range(start, stop): + if lst[i] == value: + return i + # XXX references are leaked when raising + raise ValueError("value not in list") + return list_index_impl + + +@lower_builtin("list.insert", types.List, types.Integer, + types.Any) +def list_insert(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + index = inst.fix_index(args[1]) + index = inst.clamp_index(index) + value = args[2] + + n = inst.size + one = ir.Constant(n.type, 1) + new_size = builder.add(n, one) + inst.resize(new_size) + inst.move(builder.add(index, one), index, builder.sub(n, index)) + inst.setitem(index, value, incref=True, decref_old_value=False) + + return context.get_dummy_value() + +@lower_builtin("list.pop", types.List) +def list_pop(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + + n = inst.size + cgutils.guard_zero(context, builder, n, + (IndexError, "pop from empty list")) + n = builder.sub(n, ir.Constant(n.type, 1)) + res = inst.getitem(n) + inst.incref_value(res) # incref the pop'ed element + inst.clear_value(n) # clear the storage space + inst.resize(n) + return impl_ret_new_ref(context, builder, sig.return_type, res) + +@lower_builtin("list.pop", types.List, types.Integer) +def list_pop(context, builder, sig, args): + inst = ListInstance(context, builder, sig.args[0], args[0]) + idx = inst.fix_index(args[1]) + + n = inst.size + cgutils.guard_zero(context, builder, n, + (IndexError, "pop from empty list")) + inst.guard_index(idx, "pop index out of range") + + res = inst.getitem(idx) + + one = ir.Constant(n.type, 1) + n = builder.sub(n, ir.Constant(n.type, 1)) + inst.move(idx, builder.add(idx, one), builder.sub(n, idx)) + inst.resize(n) + return impl_ret_new_ref(context, builder, sig.return_type, res) + +@overload_method(types.List, "remove") +def list_remove(lst, value): + + def list_remove_impl(lst, value): + for i in range(len(lst)): + if lst[i] == value: + lst.pop(i) + return + # XXX references are leaked when raising + raise ValueError("list.remove(x): x not in list") + + return list_remove_impl + +@overload_method(types.List, "reverse") +def list_reverse(lst): + + def list_reverse_impl(lst): + for a in range(0, len(lst) // 2): + b = -a - 1 + lst[a], lst[b] = lst[b], lst[a] + + return list_reverse_impl + +# ----------------------------------------------------------------------------- +# Sorting + +def gt(a, b): + return a > b + +sort_forwards = quicksort.make_jit_quicksort().run_quicksort +sort_backwards = quicksort.make_jit_quicksort(lt=gt).run_quicksort + +arg_sort_forwards = quicksort.make_jit_quicksort(is_argsort=True, + is_list=True).run_quicksort +arg_sort_backwards = quicksort.make_jit_quicksort(is_argsort=True, lt=gt, + is_list=True).run_quicksort + + +def _sort_check_reverse(reverse): + if isinstance(reverse, types.Omitted): + rty = reverse.value + elif isinstance(reverse, types.Optional): + rty = reverse.type + else: + rty = reverse + if not isinstance(rty, (types.Boolean, types.Integer, int, bool)): + msg = "an integer is required for 'reverse' (got type %s)" % reverse + raise errors.TypingError(msg) + return rty + + +def _sort_check_key(key): + if isinstance(key, types.Optional): + msg = ("Key must concretely be None or a Numba JIT compiled function, " + "an Optional (union of None and a value) was found") + raise errors.TypingError(msg) + if not (cgutils.is_nonelike(key) or isinstance(key, types.Dispatcher)): + msg = "Key must be None or a Numba JIT compiled function" + raise errors.TypingError(msg) + + +@overload_method(types.List, "sort") +def ol_list_sort(lst, key=None, reverse=False): + + _sort_check_key(key) + _sort_check_reverse(reverse) + + if cgutils.is_nonelike(key): + KEY = False + sort_f = sort_forwards + sort_b = sort_backwards + elif isinstance(key, types.Dispatcher): + KEY = True + sort_f = arg_sort_forwards + sort_b = arg_sort_backwards + + def impl(lst, key=None, reverse=False): + if KEY is True: + _lst = [key(x) for x in lst] + else: + _lst = lst + if reverse is False or reverse == 0: + tmp = sort_f(_lst) + else: + tmp = sort_b(_lst) + if KEY is True: + lst[:] = [lst[i] for i in tmp] + return impl + + +@overload(sorted) +def ol_sorted(iterable, key=None, reverse=False): + + if not isinstance(iterable, types.IterableType): + return False + + _sort_check_key(key) + _sort_check_reverse(reverse) + + def impl(iterable, key=None, reverse=False): + lst = list(iterable) + lst.sort(key=key, reverse=reverse) + return lst + return impl + +# ----------------------------------------------------------------------------- +# Implicit casting + +@lower_cast(types.List, types.List) +def list_to_list(context, builder, fromty, toty, val): + # Casting from non-reflected to reflected + assert fromty.dtype == toty.dtype + return val + +# ----------------------------------------------------------------------------- +# Implementations for types.LiteralList +# ----------------------------------------------------------------------------- + +_banned_error = errors.TypingError("Cannot mutate a literal list") + + +# Things that mutate literal lists are banned +@overload_method(types.LiteralList, 'append') +def literal_list_banned_append(lst, obj): + raise _banned_error + + +@overload_method(types.LiteralList, 'extend') +def literal_list_banned_extend(lst, iterable): + raise _banned_error + + +@overload_method(types.LiteralList, 'insert') +def literal_list_banned_insert(lst, index, obj): + raise _banned_error + + +@overload_method(types.LiteralList, 'remove') +def literal_list_banned_remove(lst, value): + raise _banned_error + + +@overload_method(types.LiteralList, 'pop') +def literal_list_banned_pop(lst, index=-1): + raise _banned_error + + +@overload_method(types.LiteralList, 'clear') +def literal_list_banned_clear(lst): + raise _banned_error + + +@overload_method(types.LiteralList, 'sort') +def literal_list_banned_sort(lst, key=None, reverse=False): + raise _banned_error + + +@overload_method(types.LiteralList, 'reverse') +def literal_list_banned_reverse(lst): + raise _banned_error + +if config.USE_LEGACY_TYPE_SYSTEM: + _index_end = types.intp.maxval +else: + _index_end = types.py_int.maxval + +@overload_method(types.LiteralList, 'index') +def literal_list_index(lst, x, start=0, end=_index_end): + # TODO: To make this work, need consts as slice for start/end so as to + # be able to statically analyse the bounds, then its a just loop body + # versioning based iteration along with enumerate to find the item + if isinstance(lst, types.LiteralList): + msg = "list.index is unsupported for literal lists" + raise errors.TypingError(msg) + +@overload_method(types.LiteralList, 'count') +def literal_list_count(lst, x): + if isinstance(lst, types.LiteralList): + def impl(lst, x): + count = 0 + for val in literal_unroll(lst): + if val == x: + count += 1 + return count + return impl + +@overload_method(types.LiteralList, 'copy') +def literal_list_count(lst): + if isinstance(lst, types.LiteralList): + def impl(lst): + return lst # tuples are immutable, as is this, so just return it + return impl + +@overload(operator.delitem) +def literal_list_delitem(lst, index): + if isinstance(lst, types.LiteralList): + raise _banned_error + +@overload(operator.setitem) +def literal_list_setitem(lst, index, value): + if isinstance(lst, types.LiteralList): + raise errors.TypingError("Cannot mutate a literal list") + +@overload(operator.getitem) +def literal_list_getitem(lst, *args): + if not isinstance(lst, types.LiteralList): + return + msg = ("Cannot __getitem__ on a literal list, return type cannot be " + "statically determined.") + raise errors.TypingError(msg) + +@overload(len) +def literal_list_len(lst): + if not isinstance(lst, types.LiteralList): + return + l = lst.count + return lambda lst: l + +@overload(operator.contains) +def literal_list_contains(lst, item): + if isinstance(lst, types.LiteralList): + def impl(lst, item): + for val in literal_unroll(lst): + if val == item: + return True + return False + return impl + +@lower_cast(types.LiteralList, types.LiteralList) +def literallist_to_literallist(context, builder, fromty, toty, val): + if len(fromty) != len(toty): + # Disallowed by typing layer + raise NotImplementedError + + olditems = cgutils.unpack_tuple(builder, val, len(fromty)) + items = [context.cast(builder, v, f, t) + for v, f, t in zip(olditems, fromty, toty)] + return context.make_tuple(builder, toty, items) diff --git a/lib/python3.10/site-packages/numba/cpython/old_hashing.py b/lib/python3.10/site-packages/numba/cpython/old_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..836d1616a2a49e291321fce0823492cc48e33273 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/old_hashing.py @@ -0,0 +1,743 @@ +""" +Hash implementations for Numba types +""" + +import math +import numpy as np +import sys +import ctypes +import warnings +from collections import namedtuple + +import llvmlite.binding as ll +from llvmlite import ir + +from numba import literal_unroll +from numba.core.extending import ( + overload, overload_method, intrinsic, register_jitable) +from numba.core import errors +from numba.core import types +from numba.core.unsafe.bytes import grab_byte, grab_uint64_t +from numba.cpython.randomimpl import (const_int, get_next_int, get_next_int32, + get_state_ptr) + +# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t): +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96 # noqa: E501 +_hash_width = sys.hash_info.width +_Py_hash_t = getattr(types, 'int%s' % _hash_width) +_Py_uhash_t = getattr(types, 'uint%s' % _hash_width) + +# Constants from CPython source, obtained by various means: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h # noqa: E501 +_PyHASH_INF = sys.hash_info.inf +_PyHASH_NAN = sys.hash_info.nan +_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus) +_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes +_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL +_PyHASH_IMAG = _PyHASH_MULTIPLIER +_PyLong_SHIFT = sys.int_info.bits_per_digit +_Py_HASH_CUTOFF = sys.hash_info.cutoff +_Py_hashfunc_name = sys.hash_info.algorithm + + +# This stub/overload pair are used to force branch pruning to remove the dead +# branch based on the potential `None` type of the hash_func which works better +# if the predicate for the prune in an ir.Arg. The obj is an arg to allow for +# a custom error message. +def _defer_hash(hash_func): + pass + + +@overload(_defer_hash) +def ol_defer_hash(obj, hash_func): + err_msg = f"unhashable type: '{obj}'" + + def impl(obj, hash_func): + if hash_func is None: + raise TypeError(err_msg) + else: + return hash_func() + return impl + + +# hash(obj) is implemented by calling obj.__hash__() +@overload(hash) +def hash_overload(obj): + attempt_generic_msg = ("No __hash__ is defined for object of type " + f"'{obj}' and a generic hash() cannot be " + "performed as there is no suitable object " + "represention in Numba compiled code!") + + def impl(obj): + if hasattr(obj, '__hash__'): + return _defer_hash(obj, getattr(obj, '__hash__')) + else: + raise TypeError(attempt_generic_msg) + return impl + + +@register_jitable +def process_return(val): + asint = _Py_hash_t(val) + if (asint == int(-1)): + asint = int(-2) + return asint + + +# This is a translation of CPython's _Py_HashDouble: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129 # noqa: E501 +# NOTE: In Python 3.10 hash of nan is now hash of the pointer to the PyObject +# containing said nan. Numba cannot replicate this as there is no object, so it +# elects to replicate the behaviour i.e. hash of nan is something "unique" which +# satisfies https://bugs.python.org/issue43475. + +@register_jitable(locals={'x': _Py_uhash_t, + 'y': _Py_uhash_t, + 'm': types.double, + 'e': types.intc, + 'sign': types.intc, + '_PyHASH_MODULUS': _Py_uhash_t, + '_PyHASH_BITS': types.intc}) +def _Py_HashDouble(v): + if not np.isfinite(v): + if (np.isinf(v)): + if (v > 0): + return _PyHASH_INF + else: + return -_PyHASH_INF + else: + # Python 3.10 does not use `_PyHASH_NAN`. + # https://github.com/python/cpython/blob/2c4792264f9218692a1bd87398a60591f756b171/Python/pyhash.c#L102 # noqa: E501 + # Numba returns a pseudo-random number to reflect the spirit of the + # change. + x = _prng_random_hash() + return process_return(x) + + m, e = math.frexp(v) + + sign = 1 + if (m < 0): + sign = -1 + m = -m + + # process 28 bits at a time; this should work well both for binary + # and hexadecimal floating point. + x = 0 + while (m): + x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28) + m *= 268435456.0 # /* 2**28 */ + e -= 28 + y = int(m) # /* pull out integer part */ + m -= y + x += y + if x >= _PyHASH_MODULUS: + x -= _PyHASH_MODULUS + # /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */ + if e >= 0: + e = e % _PyHASH_BITS + else: + e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS) + + x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e) + + x = x * sign + return process_return(x) + + +@intrinsic +def _fpext(tyctx, val): + def impl(cgctx, builder, signature, args): + val = args[0] + return builder.fpext(val, ir.DoubleType()) + sig = types.float64(types.float32) + return sig, impl + + +@intrinsic +def _prng_random_hash(tyctx): + + def impl(cgctx, builder, signature, args): + state_ptr = get_state_ptr(cgctx, builder, "internal") + bits = const_int(_hash_width) + + # Why not just use get_next_int() with the correct bitwidth? + # get_next_int() always returns an i64, because the bitwidth it is + # passed may not be a compile-time constant, so it needs to allocate + # the largest unit of storage that may be required. Therefore, if the + # hash width is 32, then we need to use get_next_int32() to ensure we + # don't return a wider-than-expected hash, even if everything above + # the low 32 bits would have been zero. + if _hash_width == 32: + value = get_next_int32(cgctx, builder, state_ptr) + else: + value = get_next_int(cgctx, builder, state_ptr, bits, False) + + return value + + sig = _Py_hash_t() + return sig, impl + + +# This is a translation of CPython's long_hash, but restricted to the numerical +# domain reachable by int64/uint64 (i.e. no BigInt like support): +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989 # noqa: E501 +# obdigit is a uint32_t which is typedef'd to digit +# int32_t is typedef'd to sdigit + + +@register_jitable(locals={'x': _Py_uhash_t, + 'p1': _Py_uhash_t, + 'p2': _Py_uhash_t, + 'p3': _Py_uhash_t, + 'p4': _Py_uhash_t, + '_PyHASH_MODULUS': _Py_uhash_t, + '_PyHASH_BITS': types.int32, + '_PyLong_SHIFT': types.int32,}) +def _long_impl(val): + # This function assumes val came from a long int repr with val being a + # uint64_t this means having to split the input into PyLong_SHIFT size + # chunks in an unsigned hash wide type, max numba can handle is a 64bit int + + # mask to select low _PyLong_SHIFT bits + _tmp_shift = 32 - _PyLong_SHIFT + mask_shift = (~types.uint32(0x0)) >> _tmp_shift + + # a 64bit wide max means Numba only needs 3 x 30 bit values max, + # or 5 x 15 bit values max on 32bit platforms + i = (64 // _PyLong_SHIFT) + 1 + + # alg as per hash_long + x = 0 + p3 = (_PyHASH_BITS - _PyLong_SHIFT) + for idx in range(i - 1, -1, -1): + p1 = x << _PyLong_SHIFT + p2 = p1 & _PyHASH_MODULUS + p4 = x >> p3 + x = p2 | p4 + # the shift and mask splits out the `ob_digit` parts of a Long repr + x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift) + if x >= _PyHASH_MODULUS: + x -= _PyHASH_MODULUS + return _Py_hash_t(x) + + +# This has no CPython equivalent, CPython uses long_hash. +@overload_method(types.Integer, '__hash__') +@overload_method(types.Boolean, '__hash__') +def int_hash(val): + + _HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4 + _SIGNED_MIN = types.int64(-0x8000000000000000) + + # Find a suitable type to hold a "big" value, i.e. iinfo(ty).min/max + # this is to ensure e.g. int32.min is handled ok as it's abs() is its value + _BIG = types.int64 if getattr(val, 'signed', False) else types.uint64 + + # this is a bit involved due to the CPython repr of ints + def impl(val): + # If the magnitude is under PyHASH_MODULUS, just return the + # value val as the hash, couple of special cases if val == val: + # 1. it's 0, in which case return 0 + # 2. it's signed int minimum value, return the value CPython computes + # but Numba cannot as there's no type wide enough to hold the shifts. + # + # If the magnitude is greater than PyHASH_MODULUS then... if the value + # is negative then negate it switch the sign on the hash once computed + # and use the standard wide unsigned hash implementation + val = _BIG(val) + mag = abs(val) + if mag < _PyHASH_MODULUS: + if val == 0: + ret = 0 + elif val == _SIGNED_MIN: # e.g. int64 min, -0x8000000000000000 + ret = _Py_hash_t(_HASH_I64_MIN) + else: + ret = _Py_hash_t(val) + else: + needs_negate = False + if val < 0: + val = -val + needs_negate = True + ret = _long_impl(val) + if needs_negate: + ret = -ret + return process_return(ret) + return impl + +# This is a translation of CPython's float_hash: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532 # noqa: E501 + + +@overload_method(types.Float, '__hash__') +def float_hash(val): + if val.bitwidth == 64: + def impl(val): + hashed = _Py_HashDouble(val) + return hashed + else: + def impl(val): + # widen the 32bit float to 64bit + fpextended = np.float64(_fpext(val)) + hashed = _Py_HashDouble(fpextended) + return hashed + return impl + +# This is a translation of CPython's complex_hash: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428 # noqa: E501 + + +@overload_method(types.Complex, '__hash__') +def complex_hash(val): + def impl(val): + hashreal = hash(val.real) + hashimag = hash(val.imag) + # Note: if the imaginary part is 0, hashimag is 0 now, + # so the following returns hashreal unchanged. This is + # important because numbers of different types that + # compare equal must have the same hash value, so that + # hash(x + 0*j) must equal hash(x). + combined = hashreal + _PyHASH_IMAG * hashimag + return process_return(combined) + return impl + + +# Python 3.8 strengthened its hash alg for tuples. +# This is a translation of CPython's tuplehash for Python >=3.8 +# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L338-L391 # noqa: E501 + +# These consts are needed for this alg variant, they are from: +# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L353-L363 # noqa: E501 +if _Py_uhash_t.bitwidth // 8 > 4: + _PyHASH_XXPRIME_1 = _Py_uhash_t(11400714785074694791) + _PyHASH_XXPRIME_2 = _Py_uhash_t(14029467366897019727) + _PyHASH_XXPRIME_5 = _Py_uhash_t(2870177450012600261) + + @register_jitable(locals={'x': types.uint64}) + def _PyHASH_XXROTATE(x): + # Rotate left 31 bits + return ((x << types.uint64(31)) | (x >> types.uint64(33))) +else: + _PyHASH_XXPRIME_1 = _Py_uhash_t(2654435761) + _PyHASH_XXPRIME_2 = _Py_uhash_t(2246822519) + _PyHASH_XXPRIME_5 = _Py_uhash_t(374761393) + + @register_jitable(locals={'x': types.uint64}) + def _PyHASH_XXROTATE(x): + # Rotate left 13 bits + return ((x << types.uint64(13)) | (x >> types.uint64(19))) + + +@register_jitable(locals={'acc': _Py_uhash_t, 'lane': _Py_uhash_t, + '_PyHASH_XXPRIME_5': _Py_uhash_t, + '_PyHASH_XXPRIME_1': _Py_uhash_t, + 'tl': _Py_uhash_t}) +def _tuple_hash(tup): + tl = len(tup) + acc = _PyHASH_XXPRIME_5 + for x in literal_unroll(tup): + lane = hash(x) + if lane == _Py_uhash_t(-1): + return -1 + acc += lane * _PyHASH_XXPRIME_2 + acc = _PyHASH_XXROTATE(acc) + acc *= _PyHASH_XXPRIME_1 + + acc += tl ^ (_PyHASH_XXPRIME_5 ^ _Py_uhash_t(3527539)) + + if acc == _Py_uhash_t(-1): + return process_return(1546275796) + + return process_return(acc) + + +@overload_method(types.BaseTuple, '__hash__') +def tuple_hash(val): + def impl(val): + return _tuple_hash(val) + return impl + + +# ------------------------------------------------------------------------------ +# String/bytes hashing needs hashseed info, this is from: +# https://stackoverflow.com/a/41088757 +# with thanks to Martijn Pieters +# +# Developer note: +# CPython makes use of an internal "hashsecret" which is essentially a struct +# containing some state that is set on CPython initialization and contains magic +# numbers used particularly in unicode/string hashing. This code binds to the +# Python runtime libraries in use by the current process and reads the +# "hashsecret" state so that it can be used by Numba. As this is done at runtime +# the behaviour and influence of the PYTHONHASHSEED environment variable is +# accommodated. + +from ctypes import ( # noqa + c_size_t, + c_ubyte, + c_uint64, + pythonapi, + Structure, + Union, +) # noqa + + +class FNV(Structure): + _fields_ = [ + ('prefix', c_size_t), + ('suffix', c_size_t) + ] + + +class SIPHASH(Structure): + _fields_ = [ + ('k0', c_uint64), + ('k1', c_uint64), + ] + + +class DJBX33A(Structure): + _fields_ = [ + ('padding', c_ubyte * 16), + ('suffix', c_size_t), + ] + + +class EXPAT(Structure): + _fields_ = [ + ('padding', c_ubyte * 16), + ('hashsalt', c_size_t), + ] + + +class _Py_HashSecret_t(Union): + _fields_ = [ + # ensure 24 bytes + ('uc', c_ubyte * 24), + # two Py_hash_t for FNV + ('fnv', FNV), + # two uint64 for SipHash24 + ('siphash', SIPHASH), + # a different (!) Py_hash_t for small string optimization + ('djbx33a', DJBX33A), + ('expat', EXPAT), + ] + + +_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value']) + + +# Only a few members are needed at present +def _build_hashsecret(): + """Read hash secret from the Python process + + Returns + ------- + info : dict + - keys are "djbx33a_suffix", "siphash_k0", siphash_k1". + - values are the namedtuple[symbol:str, value:int] + """ + # Read hashsecret and inject it into the LLVM symbol map under the + # prefix `_numba_hashsecret_`. + pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret') + info = {} + + def inject(name, val): + symbol_name = "_numba_hashsecret_{}".format(name) + val = ctypes.c_uint64(val) + addr = ctypes.addressof(val) + ll.add_symbol(symbol_name, addr) + info[name] = _hashsecret_entry(symbol=symbol_name, value=val) + + inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix) + inject('siphash_k0', pyhashsecret.siphash.k0) + inject('siphash_k1', pyhashsecret.siphash.k1) + return info + + +_hashsecret = _build_hashsecret() + + +# ------------------------------------------------------------------------------ + + +if _Py_hashfunc_name in ('siphash13', 'siphash24', 'fnv'): + + # Check for use of the FNV hashing alg, warn users that it's not implemented + # and functionality relying of properties derived from hashing will be fine + # but hash values themselves are likely to be different. + if _Py_hashfunc_name == 'fnv': + msg = ("FNV hashing is not implemented in Numba. See PEP 456 " + "https://www.python.org/dev/peps/pep-0456/ " + "for rationale over not using FNV. Numba will continue to work, " + "but hashes for built in types will be computed using " + "siphash24. This will permit e.g. dictionaries to continue to " + "behave as expected, however anything relying on the value of " + "the hash opposed to hash as a derived property is likely to " + "not work as expected.") + warnings.warn(msg) + + # This is a translation of CPython's siphash24 function: + # https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413 # noqa: E501 + # and also, since Py 3.11, a translation of CPython's siphash13 function: + # https://github.com/python/cpython/blob/9dda9020abcf0d51d59b283a89c58c8e1fb0f574/Python/pyhash.c#L376-L424 + # the only differences are in the use of SINGLE_ROUND in siphash13 vs. + # DOUBLE_ROUND in siphash24, and that siphash13 has an extra "ROUND" applied + # just before the final XORing of components to create the return value. + + # /* ********************************************************************* + # + # Copyright (c) 2013 Marek Majkowski + + # Permission is hereby granted, free of charge, to any person obtaining a + # copy of this software and associated documentation files (the "Software"), + # to deal in the Software without restriction, including without limitation + # the rights to use, copy, modify, merge, publish, distribute, sublicense, + # and/or sell copies of the Software, and to permit persons to whom the + # Software is furnished to do so, subject to the following conditions: + + # The above copyright notice and this permission notice shall be included in + # all copies or substantial portions of the Software. + + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + # DEALINGS IN THE SOFTWARE. + # + + # Original location: + # https://github.com/majek/csiphash/ + + # Solution inspired by code from: + # Samuel Neves (supercop/crypto_auth/siphash24/little) + #djb (supercop/crypto_auth/siphash24/little2) + # Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) + + # Modified for Python by Christian Heimes: + # - C89 / MSVC compatibility + # - _rotl64() on Windows + # - letoh64() fallback + # */ + + @register_jitable(locals={'x': types.uint64, + 'b': types.uint64, }) + def _ROTATE(x, b): + return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b)))) + + @register_jitable(locals={'a': types.uint64, + 'b': types.uint64, + 'c': types.uint64, + 'd': types.uint64, + 's': types.uint64, + 't': types.uint64, }) + def _HALF_ROUND(a, b, c, d, s, t): + a += b + c += d + b = _ROTATE(b, s) ^ a + d = _ROTATE(d, t) ^ c + a = _ROTATE(a, 32) + return a, b, c, d + + @register_jitable(locals={'v0': types.uint64, + 'v1': types.uint64, + 'v2': types.uint64, + 'v3': types.uint64, }) + def _SINGLE_ROUND(v0, v1, v2, v3): + v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16) + v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21) + return v0, v1, v2, v3 + + @register_jitable(locals={'v0': types.uint64, + 'v1': types.uint64, + 'v2': types.uint64, + 'v3': types.uint64, }) + def _DOUBLE_ROUND(v0, v1, v2, v3): + v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3) + v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3) + return v0, v1, v2, v3 + + def _gen_siphash(alg): + if alg == 'siphash13': + _ROUNDER = _SINGLE_ROUND + _EXTRA_ROUND = True + elif alg == 'siphash24': + _ROUNDER = _DOUBLE_ROUND + _EXTRA_ROUND = False + else: + assert 0, 'unreachable' + + @register_jitable(locals={'v0': types.uint64, + 'v1': types.uint64, + 'v2': types.uint64, + 'v3': types.uint64, + 'b': types.uint64, + 'mi': types.uint64, + 't': types.uint64, + 'mask': types.uint64, + 'jmp': types.uint64, + 'ohexefef': types.uint64}) + def _siphash(k0, k1, src, src_sz): + b = types.uint64(src_sz) << 56 + v0 = k0 ^ types.uint64(0x736f6d6570736575) + v1 = k1 ^ types.uint64(0x646f72616e646f6d) + v2 = k0 ^ types.uint64(0x6c7967656e657261) + v3 = k1 ^ types.uint64(0x7465646279746573) + + idx = 0 + while (src_sz >= 8): + mi = grab_uint64_t(src, idx) + idx += 1 + src_sz -= 8 + v3 ^= mi + v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3) + v0 ^= mi + + # this is the switch fallthrough: + # https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400 # noqa: E501 + t = types.uint64(0x0) + boffset = idx * 8 + ohexefef = types.uint64(0xff) + if src_sz >= 7: + jmp = (6 * 8) + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6)) + << jmp) + if src_sz >= 6: + jmp = (5 * 8) + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5)) + << jmp) + if src_sz >= 5: + jmp = (4 * 8) + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4)) + << jmp) + if src_sz >= 4: + t &= types.uint64(0xffffffff00000000) + for i in range(4): + jmp = i * 8 + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + i)) + << jmp) + if src_sz >= 3: + jmp = (2 * 8) + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2)) + << jmp) + if src_sz >= 2: + jmp = (1 * 8) + mask = ~types.uint64(ohexefef << jmp) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1)) + << jmp) + if src_sz >= 1: + mask = ~(ohexefef) + t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0))) + + b |= t + v3 ^= b + v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3) + v0 ^= b + v2 ^= ohexefef + v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3) + v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3) + if _EXTRA_ROUND: + v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3) + t = (v0 ^ v1) ^ (v2 ^ v3) + return t + + return _siphash + + _siphash13 = _gen_siphash('siphash13') + _siphash24 = _gen_siphash('siphash24') + + _siphasher = _siphash13 if _Py_hashfunc_name == 'siphash13' else _siphash24 + +else: + msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name + raise ValueError(msg) + + +@intrinsic +def _inject_hashsecret_read(tyctx, name): + """Emit code to load the hashsecret. + """ + if not isinstance(name, types.StringLiteral): + raise errors.TypingError("requires literal string") + + sym = _hashsecret[name.literal_value].symbol + resty = types.uint64 + sig = resty(name) + + def impl(cgctx, builder, sig, args): + mod = builder.module + try: + # Search for existing global + gv = mod.get_global(sym) + except KeyError: + # Inject the symbol if not already exist. + gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym) + v = builder.load(gv) + return v + + return sig, impl + + +def _load_hashsecret(name): + return _hashsecret[name].value + + +@overload(_load_hashsecret) +def _impl_load_hashsecret(name): + def imp(name): + return _inject_hashsecret_read(name) + return imp + + +# This is a translation of CPythons's _Py_HashBytes: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191 # noqa: E501 + + +@register_jitable(locals={'_hash': _Py_uhash_t}) +def _Py_HashBytes(val, _len): + if (_len == 0): + return process_return(0) + + if (_len < _Py_HASH_CUTOFF): + # TODO: this branch needs testing, needs a CPython setup for it! + # /* Optimize hashing of very small strings with inline DJBX33A. */ + _hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */ + for idx in range(_len): + _hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx)) + + _hash ^= _len + _hash ^= _load_hashsecret('djbx33a_suffix') + else: + tmp = _siphasher(types.uint64(_load_hashsecret('siphash_k0')), + types.uint64(_load_hashsecret('siphash_k1')), + val, _len) + _hash = process_return(tmp) + return process_return(_hash) + +# This is an approximate translation of CPython's unicode_hash: +# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663 # noqa: E501 + + +@overload_method(types.UnicodeType, '__hash__') +def unicode_hash(val): + from numba.cpython.unicode import _kind_to_byte_width + + def impl(val): + kindwidth = _kind_to_byte_width(val._kind) + _len = len(val) + # use the cache if possible + current_hash = val._hash + if current_hash != -1: + return current_hash + else: + # cannot write hash value to cache in the unicode struct due to + # pass by value on the struct making the struct member immutable + return _Py_HashBytes(val._data, kindwidth * _len) + + return impl diff --git a/lib/python3.10/site-packages/numba/cpython/setobj.py b/lib/python3.10/site-packages/numba/cpython/setobj.py new file mode 100644 index 0000000000000000000000000000000000000000..441067ef365c12e71e4414de2eff05a10421f078 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/setobj.py @@ -0,0 +1,1711 @@ +""" +Support for native homogeneous sets. +""" + + +import collections +import contextlib +import math +import operator +from functools import cached_property + +from llvmlite import ir +from numba.core import types, typing, cgutils +from numba.core.imputils import (lower_builtin, lower_cast, + iternext_impl, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked, + for_iter, call_len, RefType) +from numba.misc import quicksort +from numba.cpython import slicing +from numba.core.errors import NumbaValueError, TypingError +from numba.core.extending import overload, overload_method, intrinsic + + +def get_payload_struct(context, builder, set_type, ptr): + """ + Given a set value and type, get its payload structure (as a + reference, so that mutations are seen by all). + """ + payload_type = types.SetPayload(set_type) + ptrty = context.get_data_type(payload_type).as_pointer() + payload = builder.bitcast(ptr, ptrty) + return context.make_data_helper(builder, payload_type, ref=payload) + + +def get_entry_size(context, set_type): + """ + Return the entry size for the given set type. + """ + llty = context.get_data_type(types.SetEntry(set_type)) + return context.get_abi_sizeof(llty) + + +# Note these values are special: +# - EMPTY is obtained by issuing memset(..., 0xFF) +# - (unsigned) EMPTY > (unsigned) DELETED > any other hash value +EMPTY = -1 +DELETED = -2 +FALLBACK = -43 + +# Minimal size of entries table. Must be a power of 2! +MINSIZE = 16 + +# Number of cache-friendly linear probes before switching to non-linear probing +LINEAR_PROBES = 3 + +DEBUG_ALLOCS = False + + +def get_hash_value(context, builder, typ, value): + """ + Compute the hash of the given value. + """ + typingctx = context.typing_context + fnty = typingctx.resolve_value_type(hash) + sig = fnty.get_call_type(typingctx, (typ,), {}) + fn = context.get_function(fnty, sig) + h = fn(builder, (value,)) + # Fixup reserved values + is_ok = is_hash_used(context, builder, h) + fallback = ir.Constant(h.type, FALLBACK) + return builder.select(is_ok, h, fallback) + + +@intrinsic +def _get_hash_value_intrinsic(typingctx, value): + def impl(context, builder, typ, args): + return get_hash_value(context, builder, value, args[0]) + fnty = typingctx.resolve_value_type(hash) + sig = fnty.get_call_type(typingctx, (value,), {}) + return sig, impl + + +def is_hash_empty(context, builder, h): + """ + Whether the hash value denotes an empty entry. + """ + empty = ir.Constant(h.type, EMPTY) + return builder.icmp_unsigned('==', h, empty) + +def is_hash_deleted(context, builder, h): + """ + Whether the hash value denotes a deleted entry. + """ + deleted = ir.Constant(h.type, DELETED) + return builder.icmp_unsigned('==', h, deleted) + +def is_hash_used(context, builder, h): + """ + Whether the hash value denotes an active entry. + """ + # Everything below DELETED is an used entry + deleted = ir.Constant(h.type, DELETED) + return builder.icmp_unsigned('<', h, deleted) + + +def check_all_set(*args): + if not all([isinstance(typ, types.Set) for typ in args]): + raise TypingError(f"All arguments must be Sets, got {args}") + + if not all([args[0].dtype == s.dtype for s in args]): + raise TypingError(f"All Sets must be of the same type, got {args}") + + +SetLoop = collections.namedtuple('SetLoop', ('index', 'entry', 'do_break')) + + +class _SetPayload(object): + + def __init__(self, context, builder, set_type, ptr): + payload = get_payload_struct(context, builder, set_type, ptr) + self._context = context + self._builder = builder + self._ty = set_type + self._payload = payload + self._entries = payload._get_ptr_by_name('entries') + self._ptr = ptr + + @property + def mask(self): + return self._payload.mask + + @mask.setter + def mask(self, value): + # CAUTION: mask must be a power of 2 minus 1 + self._payload.mask = value + + @property + def used(self): + return self._payload.used + + @used.setter + def used(self, value): + self._payload.used = value + + @property + def fill(self): + return self._payload.fill + + @fill.setter + def fill(self, value): + self._payload.fill = value + + @property + def finger(self): + return self._payload.finger + + @finger.setter + def finger(self, value): + self._payload.finger = value + + @property + def dirty(self): + return self._payload.dirty + + @dirty.setter + def dirty(self, value): + self._payload.dirty = value + + @property + def entries(self): + """ + A pointer to the start of the entries array. + """ + return self._entries + + @property + def ptr(self): + """ + A pointer to the start of the NRT-allocated area. + """ + return self._ptr + + def get_entry(self, idx): + """ + Get entry number *idx*. + """ + entry_ptr = cgutils.gep(self._builder, self._entries, idx) + entry = self._context.make_data_helper(self._builder, + types.SetEntry(self._ty), + ref=entry_ptr) + return entry + + def _lookup(self, item, h, for_insert=False): + """ + Lookup the *item* with the given hash values in the entries. + + Return a (found, entry index) tuple: + - If found is true, points to the entry containing + the item. + - If found is false, points to the empty entry that + the item can be written to (only if *for_insert* is true) + """ + context = self._context + builder = self._builder + + intp_t = h.type + + mask = self.mask + dtype = self._ty.dtype + tyctx = context.typing_context + fnty = tyctx.resolve_value_type(operator.eq) + sig = fnty.get_call_type(tyctx, (dtype, dtype), {}) + eqfn = context.get_function(fnty, sig) + + one = ir.Constant(intp_t, 1) + five = ir.Constant(intp_t, 5) + + # The perturbation value for probing + perturb = cgutils.alloca_once_value(builder, h) + # The index of the entry being considered: start with (hash & mask) + index = cgutils.alloca_once_value(builder, + builder.and_(h, mask)) + if for_insert: + # The index of the first deleted entry in the lookup chain + free_index_sentinel = mask.type(-1) # highest unsigned index + free_index = cgutils.alloca_once_value(builder, free_index_sentinel) + + bb_body = builder.append_basic_block("lookup.body") + bb_found = builder.append_basic_block("lookup.found") + bb_not_found = builder.append_basic_block("lookup.not_found") + bb_end = builder.append_basic_block("lookup.end") + + def check_entry(i): + """ + Check entry *i* against the value being searched for. + """ + entry = self.get_entry(i) + entry_hash = entry.hash + + with builder.if_then(builder.icmp_unsigned('==', h, entry_hash)): + # Hashes are equal, compare values + # (note this also ensures the entry is used) + eq = eqfn(builder, (item, entry.key)) + with builder.if_then(eq): + builder.branch(bb_found) + + with builder.if_then(is_hash_empty(context, builder, entry_hash)): + builder.branch(bb_not_found) + + if for_insert: + # Memorize the index of the first deleted entry + with builder.if_then(is_hash_deleted(context, builder, entry_hash)): + j = builder.load(free_index) + j = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel), + i, j) + builder.store(j, free_index) + + # First linear probing. When the number of collisions is small, + # the lineary probing loop achieves better cache locality and + # is also slightly cheaper computationally. + with cgutils.for_range(builder, ir.Constant(intp_t, LINEAR_PROBES)): + i = builder.load(index) + check_entry(i) + i = builder.add(i, one) + i = builder.and_(i, mask) + builder.store(i, index) + + # If not found after linear probing, switch to a non-linear + # perturbation keyed on the unmasked hash value. + # XXX how to tell LLVM this branch is unlikely? + builder.branch(bb_body) + with builder.goto_block(bb_body): + i = builder.load(index) + check_entry(i) + + # Perturb to go to next entry: + # perturb >>= 5 + # i = (i * 5 + 1 + perturb) & mask + p = builder.load(perturb) + p = builder.lshr(p, five) + i = builder.add(one, builder.mul(i, five)) + i = builder.and_(mask, builder.add(i, p)) + builder.store(i, index) + builder.store(p, perturb) + # Loop + builder.branch(bb_body) + + with builder.goto_block(bb_not_found): + if for_insert: + # Not found => for insertion, return the index of the first + # deleted entry (if any), to avoid creating an infinite + # lookup chain (issue #1913). + i = builder.load(index) + j = builder.load(free_index) + i = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel), + i, j) + builder.store(i, index) + builder.branch(bb_end) + + with builder.goto_block(bb_found): + builder.branch(bb_end) + + builder.position_at_end(bb_end) + + found = builder.phi(ir.IntType(1), 'found') + found.add_incoming(cgutils.true_bit, bb_found) + found.add_incoming(cgutils.false_bit, bb_not_found) + + return found, builder.load(index) + + @contextlib.contextmanager + def _iterate(self, start=None): + """ + Iterate over the payload's entries. Yield a SetLoop. + """ + context = self._context + builder = self._builder + + intp_t = context.get_value_type(types.intp) + one = ir.Constant(intp_t, 1) + size = builder.add(self.mask, one) + + with cgutils.for_range(builder, size, start=start) as range_loop: + entry = self.get_entry(range_loop.index) + is_used = is_hash_used(context, builder, entry.hash) + with builder.if_then(is_used): + loop = SetLoop(index=range_loop.index, entry=entry, + do_break=range_loop.do_break) + yield loop + + @contextlib.contextmanager + def _next_entry(self): + """ + Yield a random entry from the payload. Caller must ensure the + set isn't empty, otherwise the function won't end. + """ + context = self._context + builder = self._builder + + intp_t = context.get_value_type(types.intp) + zero = ir.Constant(intp_t, 0) + one = ir.Constant(intp_t, 1) + mask = self.mask + + # Start walking the entries from the stored "search finger" and + # break as soon as we find a used entry. + + bb_body = builder.append_basic_block('next_entry_body') + bb_end = builder.append_basic_block('next_entry_end') + + index = cgutils.alloca_once_value(builder, self.finger) + builder.branch(bb_body) + + with builder.goto_block(bb_body): + i = builder.load(index) + # ANDing with mask ensures we stay inside the table boundaries + i = builder.and_(mask, builder.add(i, one)) + builder.store(i, index) + entry = self.get_entry(i) + is_used = is_hash_used(context, builder, entry.hash) + builder.cbranch(is_used, bb_end, bb_body) + + builder.position_at_end(bb_end) + + # Update the search finger with the next position. This avoids + # O(n**2) behaviour when pop() is called in a loop. + i = builder.load(index) + self.finger = i + yield self.get_entry(i) + + +class SetInstance(object): + + def __init__(self, context, builder, set_type, set_val): + self._context = context + self._builder = builder + self._ty = set_type + self._entrysize = get_entry_size(context, set_type) + self._set = context.make_helper(builder, set_type, set_val) + + @property + def dtype(self): + return self._ty.dtype + + @property + def payload(self): + """ + The _SetPayload for this set. + """ + # This cannot be cached as the pointer can move around! + context = self._context + builder = self._builder + + ptr = self._context.nrt.meminfo_data(builder, self.meminfo) + return _SetPayload(context, builder, self._ty, ptr) + + @property + def value(self): + return self._set._getvalue() + + @property + def meminfo(self): + return self._set.meminfo + + @property + def parent(self): + return self._set.parent + + @parent.setter + def parent(self, value): + self._set.parent = value + + def get_size(self): + """ + Return the number of elements in the size. + """ + return self.payload.used + + def set_dirty(self, val): + if self._ty.reflected: + self.payload.dirty = cgutils.true_bit if val else cgutils.false_bit + + def _add_entry(self, payload, entry, item, h, do_resize=True): + context = self._context + builder = self._builder + + old_hash = entry.hash + entry.hash = h + self.incref_value(item) + entry.key = item + # used++ + used = payload.used + one = ir.Constant(used.type, 1) + used = payload.used = builder.add(used, one) + # fill++ if entry wasn't a deleted one + with builder.if_then(is_hash_empty(context, builder, old_hash), + likely=True): + payload.fill = builder.add(payload.fill, one) + # Grow table if necessary + if do_resize: + self.upsize(used) + self.set_dirty(True) + + def _add_key(self, payload, item, h, do_resize=True, do_incref=True): + context = self._context + builder = self._builder + + found, i = payload._lookup(item, h, for_insert=True) + not_found = builder.not_(found) + + with builder.if_then(not_found): + # Not found => add it + entry = payload.get_entry(i) + old_hash = entry.hash + entry.hash = h + if do_incref: + self.incref_value(item) + entry.key = item + # used++ + used = payload.used + one = ir.Constant(used.type, 1) + used = payload.used = builder.add(used, one) + # fill++ if entry wasn't a deleted one + with builder.if_then(is_hash_empty(context, builder, old_hash), + likely=True): + payload.fill = builder.add(payload.fill, one) + # Grow table if necessary + if do_resize: + self.upsize(used) + self.set_dirty(True) + + def _remove_entry(self, payload, entry, do_resize=True, do_decref=True): + # Mark entry deleted + entry.hash = ir.Constant(entry.hash.type, DELETED) + if do_decref: + self.decref_value(entry.key) + # used-- + used = payload.used + one = ir.Constant(used.type, 1) + used = payload.used = self._builder.sub(used, one) + # Shrink table if necessary + if do_resize: + self.downsize(used) + self.set_dirty(True) + + def _remove_key(self, payload, item, h, do_resize=True): + context = self._context + builder = self._builder + + found, i = payload._lookup(item, h) + + with builder.if_then(found): + entry = payload.get_entry(i) + self._remove_entry(payload, entry, do_resize) + + return found + + def add(self, item, do_resize=True): + context = self._context + builder = self._builder + + payload = self.payload + h = get_hash_value(context, builder, self._ty.dtype, item) + self._add_key(payload, item, h, do_resize) + + def add_pyapi(self, pyapi, item, do_resize=True): + """A version of .add for use inside functions following Python calling + convention. + """ + context = self._context + builder = self._builder + + payload = self.payload + h = self._pyapi_get_hash_value(pyapi, context, builder, item) + self._add_key(payload, item, h, do_resize) + + def _pyapi_get_hash_value(self, pyapi, context, builder, item): + """Python API compatible version of `get_hash_value()`. + """ + argtypes = [self._ty.dtype] + resty = types.intp + + def wrapper(val): + return _get_hash_value_intrinsic(val) + + args = [item] + sig = typing.signature(resty, *argtypes) + is_error, retval = pyapi.call_jit_code(wrapper, sig, args) + # Handle return status + with builder.if_then(is_error, likely=False): + # Raise nopython exception as a Python exception + builder.ret(pyapi.get_null_object()) + return retval + + def contains(self, item): + context = self._context + builder = self._builder + + payload = self.payload + h = get_hash_value(context, builder, self._ty.dtype, item) + found, i = payload._lookup(item, h) + return found + + def discard(self, item): + context = self._context + builder = self._builder + + payload = self.payload + h = get_hash_value(context, builder, self._ty.dtype, item) + found = self._remove_key(payload, item, h) + return found + + def pop(self): + context = self._context + builder = self._builder + + lty = context.get_value_type(self._ty.dtype) + key = cgutils.alloca_once(builder, lty) + + payload = self.payload + with payload._next_entry() as entry: + builder.store(entry.key, key) + # since the value is returned don't decref in _remove_entry() + self._remove_entry(payload, entry, do_decref=False) + + return builder.load(key) + + def clear(self): + context = self._context + builder = self._builder + + intp_t = context.get_value_type(types.intp) + minsize = ir.Constant(intp_t, MINSIZE) + self._replace_payload(minsize) + self.set_dirty(True) + + def copy(self): + """ + Return a copy of this set. + """ + context = self._context + builder = self._builder + + payload = self.payload + used = payload.used + fill = payload.fill + + other = type(self)(context, builder, self._ty, None) + + no_deleted_entries = builder.icmp_unsigned('==', used, fill) + with builder.if_else(no_deleted_entries, likely=True) \ + as (if_no_deleted, if_deleted): + with if_no_deleted: + # No deleted entries => raw copy the payload + ok = other._copy_payload(payload) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot copy set",)) + + with if_deleted: + # Deleted entries => re-insert entries one by one + nentries = self.choose_alloc_size(context, builder, used) + ok = other._allocate_payload(nentries) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot copy set",)) + + other_payload = other.payload + with payload._iterate() as loop: + entry = loop.entry + other._add_key(other_payload, entry.key, entry.hash, + do_resize=False) + + return other + + def intersect(self, other): + """ + In-place intersection with *other* set. + """ + context = self._context + builder = self._builder + payload = self.payload + other_payload = other.payload + + with payload._iterate() as loop: + entry = loop.entry + found, _ = other_payload._lookup(entry.key, entry.hash) + with builder.if_then(builder.not_(found)): + self._remove_entry(payload, entry, do_resize=False) + + # Final downsize + self.downsize(payload.used) + + def difference(self, other): + """ + In-place difference with *other* set. + """ + context = self._context + builder = self._builder + payload = self.payload + other_payload = other.payload + + with other_payload._iterate() as loop: + entry = loop.entry + self._remove_key(payload, entry.key, entry.hash, do_resize=False) + + # Final downsize + self.downsize(payload.used) + + def symmetric_difference(self, other): + """ + In-place symmetric difference with *other* set. + """ + context = self._context + builder = self._builder + other_payload = other.payload + + with other_payload._iterate() as loop: + key = loop.entry.key + h = loop.entry.hash + # We must reload our payload as it may be resized during the loop + payload = self.payload + found, i = payload._lookup(key, h, for_insert=True) + entry = payload.get_entry(i) + with builder.if_else(found) as (if_common, if_not_common): + with if_common: + self._remove_entry(payload, entry, do_resize=False) + with if_not_common: + self._add_entry(payload, entry, key, h) + + # Final downsize + self.downsize(self.payload.used) + + def issubset(self, other, strict=False): + context = self._context + builder = self._builder + payload = self.payload + other_payload = other.payload + + cmp_op = '<' if strict else '<=' + + res = cgutils.alloca_once_value(builder, cgutils.true_bit) + with builder.if_else( + builder.icmp_unsigned(cmp_op, payload.used, other_payload.used) + ) as (if_smaller, if_larger): + with if_larger: + # self larger than other => self cannot possibly a subset + builder.store(cgutils.false_bit, res) + with if_smaller: + # check whether each key of self is in other + with payload._iterate() as loop: + entry = loop.entry + found, _ = other_payload._lookup(entry.key, entry.hash) + with builder.if_then(builder.not_(found)): + builder.store(cgutils.false_bit, res) + loop.do_break() + + return builder.load(res) + + def isdisjoint(self, other): + context = self._context + builder = self._builder + payload = self.payload + other_payload = other.payload + + res = cgutils.alloca_once_value(builder, cgutils.true_bit) + + def check(smaller, larger): + # Loop over the smaller of the two, and search in the larger + with smaller._iterate() as loop: + entry = loop.entry + found, _ = larger._lookup(entry.key, entry.hash) + with builder.if_then(found): + builder.store(cgutils.false_bit, res) + loop.do_break() + + with builder.if_else( + builder.icmp_unsigned('>', payload.used, other_payload.used) + ) as (if_larger, otherwise): + + with if_larger: + # len(self) > len(other) + check(other_payload, payload) + + with otherwise: + # len(self) <= len(other) + check(payload, other_payload) + + return builder.load(res) + + def equals(self, other): + context = self._context + builder = self._builder + payload = self.payload + other_payload = other.payload + + res = cgutils.alloca_once_value(builder, cgutils.true_bit) + with builder.if_else( + builder.icmp_unsigned('==', payload.used, other_payload.used) + ) as (if_same_size, otherwise): + with if_same_size: + # same sizes => check whether each key of self is in other + with payload._iterate() as loop: + entry = loop.entry + found, _ = other_payload._lookup(entry.key, entry.hash) + with builder.if_then(builder.not_(found)): + builder.store(cgutils.false_bit, res) + loop.do_break() + with otherwise: + # different sizes => cannot possibly be equal + builder.store(cgutils.false_bit, res) + + return builder.load(res) + + @classmethod + def allocate_ex(cls, context, builder, set_type, nitems=None): + """ + Allocate a SetInstance with its storage. + Return a (ok, instance) tuple where *ok* is a LLVM boolean and + *instance* is a SetInstance object (the object's contents are + only valid when *ok* is true). + """ + intp_t = context.get_value_type(types.intp) + + if nitems is None: + nentries = ir.Constant(intp_t, MINSIZE) + else: + if isinstance(nitems, int): + nitems = ir.Constant(intp_t, nitems) + nentries = cls.choose_alloc_size(context, builder, nitems) + + self = cls(context, builder, set_type, None) + ok = self._allocate_payload(nentries) + return ok, self + + @classmethod + def allocate(cls, context, builder, set_type, nitems=None): + """ + Allocate a SetInstance with its storage. Same as allocate_ex(), + but return an initialized *instance*. If allocation failed, + control is transferred to the caller using the target's current + call convention. + """ + ok, self = cls.allocate_ex(context, builder, set_type, nitems) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot allocate set",)) + return self + + @classmethod + def from_meminfo(cls, context, builder, set_type, meminfo): + """ + Allocate a new set instance pointing to an existing payload + (a meminfo pointer). + Note the parent field has to be filled by the caller. + """ + self = cls(context, builder, set_type, None) + self._set.meminfo = meminfo + self._set.parent = context.get_constant_null(types.pyobject) + context.nrt.incref(builder, set_type, self.value) + # Payload is part of the meminfo, no need to touch it + return self + + @classmethod + def choose_alloc_size(cls, context, builder, nitems): + """ + Choose a suitable number of entries for the given number of items. + """ + intp_t = nitems.type + one = ir.Constant(intp_t, 1) + minsize = ir.Constant(intp_t, MINSIZE) + + # Ensure number of entries >= 2 * used + min_entries = builder.shl(nitems, one) + # Find out first suitable power of 2, starting from MINSIZE + size_p = cgutils.alloca_once_value(builder, minsize) + + bb_body = builder.append_basic_block("calcsize.body") + bb_end = builder.append_basic_block("calcsize.end") + + builder.branch(bb_body) + + with builder.goto_block(bb_body): + size = builder.load(size_p) + is_large_enough = builder.icmp_unsigned('>=', size, min_entries) + with builder.if_then(is_large_enough, likely=False): + builder.branch(bb_end) + next_size = builder.shl(size, one) + builder.store(next_size, size_p) + builder.branch(bb_body) + + builder.position_at_end(bb_end) + return builder.load(size_p) + + def upsize(self, nitems): + """ + When adding to the set, ensure it is properly sized for the given + number of used entries. + """ + context = self._context + builder = self._builder + intp_t = nitems.type + + one = ir.Constant(intp_t, 1) + two = ir.Constant(intp_t, 2) + + payload = self.payload + + # Ensure number of entries >= 2 * used + min_entries = builder.shl(nitems, one) + size = builder.add(payload.mask, one) + need_resize = builder.icmp_unsigned('>=', min_entries, size) + + with builder.if_then(need_resize, likely=False): + # Find out next suitable size + new_size_p = cgutils.alloca_once_value(builder, size) + + bb_body = builder.append_basic_block("calcsize.body") + bb_end = builder.append_basic_block("calcsize.end") + + builder.branch(bb_body) + + with builder.goto_block(bb_body): + # Multiply by 4 (ensuring size remains a power of two) + new_size = builder.load(new_size_p) + new_size = builder.shl(new_size, two) + builder.store(new_size, new_size_p) + is_too_small = builder.icmp_unsigned('>=', min_entries, new_size) + builder.cbranch(is_too_small, bb_body, bb_end) + + builder.position_at_end(bb_end) + + new_size = builder.load(new_size_p) + if DEBUG_ALLOCS: + context.printf(builder, + "upsize to %zd items: current size = %zd, " + "min entries = %zd, new size = %zd\n", + nitems, size, min_entries, new_size) + self._resize(payload, new_size, "cannot grow set") + + def downsize(self, nitems): + """ + When removing from the set, ensure it is properly sized for the given + number of used entries. + """ + context = self._context + builder = self._builder + intp_t = nitems.type + + one = ir.Constant(intp_t, 1) + two = ir.Constant(intp_t, 2) + minsize = ir.Constant(intp_t, MINSIZE) + + payload = self.payload + + # Ensure entries >= max(2 * used, MINSIZE) + min_entries = builder.shl(nitems, one) + min_entries = builder.select(builder.icmp_unsigned('>=', min_entries, minsize), + min_entries, minsize) + # Shrink only if size >= 4 * min_entries && size > MINSIZE + max_size = builder.shl(min_entries, two) + size = builder.add(payload.mask, one) + need_resize = builder.and_( + builder.icmp_unsigned('<=', max_size, size), + builder.icmp_unsigned('<', minsize, size)) + + with builder.if_then(need_resize, likely=False): + # Find out next suitable size + new_size_p = cgutils.alloca_once_value(builder, size) + + bb_body = builder.append_basic_block("calcsize.body") + bb_end = builder.append_basic_block("calcsize.end") + + builder.branch(bb_body) + + with builder.goto_block(bb_body): + # Divide by 2 (ensuring size remains a power of two) + new_size = builder.load(new_size_p) + new_size = builder.lshr(new_size, one) + # Keep current size if new size would be < min_entries + is_too_small = builder.icmp_unsigned('>', min_entries, new_size) + with builder.if_then(is_too_small): + builder.branch(bb_end) + builder.store(new_size, new_size_p) + builder.branch(bb_body) + + builder.position_at_end(bb_end) + + # Ensure new_size >= MINSIZE + new_size = builder.load(new_size_p) + # At this point, new_size should be < size if the factors + # above were chosen carefully! + + if DEBUG_ALLOCS: + context.printf(builder, + "downsize to %zd items: current size = %zd, " + "min entries = %zd, new size = %zd\n", + nitems, size, min_entries, new_size) + self._resize(payload, new_size, "cannot shrink set") + + def _resize(self, payload, nentries, errmsg): + """ + Resize the payload to the given number of entries. + + CAUTION: *nentries* must be a power of 2! + """ + context = self._context + builder = self._builder + + # Allocate new entries + old_payload = payload + + ok = self._allocate_payload(nentries, realloc=True) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + (errmsg,)) + + # Re-insert old entries + # No incref since they already were the first time they were inserted + payload = self.payload + with old_payload._iterate() as loop: + entry = loop.entry + self._add_key(payload, entry.key, entry.hash, + do_resize=False, do_incref=False) + + self._free_payload(old_payload.ptr) + + def _replace_payload(self, nentries): + """ + Replace the payload with a new empty payload with the given number + of entries. + + CAUTION: *nentries* must be a power of 2! + """ + context = self._context + builder = self._builder + + # decref all of the previous entries + with self.payload._iterate() as loop: + entry = loop.entry + self.decref_value(entry.key) + + # Free old payload + self._free_payload(self.payload.ptr) + + ok = self._allocate_payload(nentries, realloc=True) + with builder.if_then(builder.not_(ok), likely=False): + context.call_conv.return_user_exc(builder, MemoryError, + ("cannot reallocate set",)) + + def _allocate_payload(self, nentries, realloc=False): + """ + Allocate and initialize payload for the given number of entries. + If *realloc* is True, the existing meminfo is reused. + + CAUTION: *nentries* must be a power of 2! + """ + context = self._context + builder = self._builder + + ok = cgutils.alloca_once_value(builder, cgutils.true_bit) + + intp_t = context.get_value_type(types.intp) + zero = ir.Constant(intp_t, 0) + one = ir.Constant(intp_t, 1) + + payload_type = context.get_data_type(types.SetPayload(self._ty)) + payload_size = context.get_abi_sizeof(payload_type) + entry_size = self._entrysize + # Account for the fact that the payload struct already contains an entry + payload_size -= entry_size + + # Total allocation size = + nentries * entry_size + allocsize, ovf = cgutils.muladd_with_overflow(builder, nentries, + ir.Constant(intp_t, entry_size), + ir.Constant(intp_t, payload_size)) + with builder.if_then(ovf, likely=False): + builder.store(cgutils.false_bit, ok) + + with builder.if_then(builder.load(ok), likely=True): + if realloc: + meminfo = self._set.meminfo + ptr = context.nrt.meminfo_varsize_alloc_unchecked(builder, + meminfo, + size=allocsize) + alloc_ok = cgutils.is_null(builder, ptr) + else: + # create destructor to be called upon set destruction + dtor = self._imp_dtor(context, builder.module) + meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked( + builder, allocsize, builder.bitcast(dtor, cgutils.voidptr_t)) + alloc_ok = cgutils.is_null(builder, meminfo) + + with builder.if_else(alloc_ok, + likely=False) as (if_error, if_ok): + with if_error: + builder.store(cgutils.false_bit, ok) + with if_ok: + if not realloc: + self._set.meminfo = meminfo + self._set.parent = context.get_constant_null(types.pyobject) + payload = self.payload + # Initialize entries to 0xff (EMPTY) + cgutils.memset(builder, payload.ptr, allocsize, 0xFF) + payload.used = zero + payload.fill = zero + payload.finger = zero + new_mask = builder.sub(nentries, one) + payload.mask = new_mask + + if DEBUG_ALLOCS: + context.printf(builder, + "allocated %zd bytes for set at %p: mask = %zd\n", + allocsize, payload.ptr, new_mask) + + return builder.load(ok) + + def _free_payload(self, ptr): + """ + Free an allocated old payload at *ptr*. + """ + self._context.nrt.meminfo_varsize_free(self._builder, self.meminfo, ptr) + + def _copy_payload(self, src_payload): + """ + Raw-copy the given payload into self. + """ + context = self._context + builder = self._builder + + ok = cgutils.alloca_once_value(builder, cgutils.true_bit) + + intp_t = context.get_value_type(types.intp) + zero = ir.Constant(intp_t, 0) + one = ir.Constant(intp_t, 1) + + payload_type = context.get_data_type(types.SetPayload(self._ty)) + payload_size = context.get_abi_sizeof(payload_type) + entry_size = self._entrysize + # Account for the fact that the payload struct already contains an entry + payload_size -= entry_size + + mask = src_payload.mask + nentries = builder.add(one, mask) + + # Total allocation size = + nentries * entry_size + # (note there can't be any overflow since we're reusing an existing + # payload's parameters) + allocsize = builder.add(ir.Constant(intp_t, payload_size), + builder.mul(ir.Constant(intp_t, entry_size), + nentries)) + + with builder.if_then(builder.load(ok), likely=True): + # create destructor for new meminfo + dtor = self._imp_dtor(context, builder.module) + meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked( + builder, allocsize, builder.bitcast(dtor, cgutils.voidptr_t)) + alloc_ok = cgutils.is_null(builder, meminfo) + + with builder.if_else(alloc_ok, likely=False) as (if_error, if_ok): + with if_error: + builder.store(cgutils.false_bit, ok) + with if_ok: + self._set.meminfo = meminfo + payload = self.payload + payload.used = src_payload.used + payload.fill = src_payload.fill + payload.finger = zero + payload.mask = mask + + # instead of using `_add_key` for every entry, since the + # size of the new set is the same, we can just copy the + # data directly without having to re-compute the hash + cgutils.raw_memcpy(builder, payload.entries, + src_payload.entries, nentries, + entry_size) + # increment the refcounts to simulate `_add_key` for each + # element + with src_payload._iterate() as loop: + self.incref_value(loop.entry.key) + + if DEBUG_ALLOCS: + context.printf(builder, + "allocated %zd bytes for set at %p: mask = %zd\n", + allocsize, payload.ptr, mask) + + return builder.load(ok) + + def _imp_dtor(self, context, module): + """Define the dtor for set + """ + llvoidptr = cgutils.voidptr_t + llsize_t= context.get_value_type(types.size_t) + # create a dtor function that takes (void* set, size_t size, void* dtor_info) + fnty = ir.FunctionType( + ir.VoidType(), + [llvoidptr, llsize_t, llvoidptr], + ) + # create type-specific name + fname = f".dtor.set.{self._ty.dtype}" + + fn = cgutils.get_or_insert_function(module, fnty, name=fname) + + if fn.is_declaration: + # Set linkage + fn.linkage = 'linkonce_odr' + # Define + builder = ir.IRBuilder(fn.append_basic_block()) + payload = _SetPayload(context, builder, self._ty, fn.args[0]) + with payload._iterate() as loop: + entry = loop.entry + context.nrt.decref(builder, self._ty.dtype, entry.key) + builder.ret_void() + + return fn + + def incref_value(self, val): + """Incref an element value + """ + self._context.nrt.incref(self._builder, self._ty.dtype, val) + + def decref_value(self, val): + """Decref an element value + """ + self._context.nrt.decref(self._builder, self._ty.dtype, val) + + +class SetIterInstance(object): + + def __init__(self, context, builder, iter_type, iter_val): + self._context = context + self._builder = builder + self._ty = iter_type + self._iter = context.make_helper(builder, iter_type, iter_val) + ptr = self._context.nrt.meminfo_data(builder, self.meminfo) + self._payload = _SetPayload(context, builder, self._ty.container, ptr) + + @classmethod + def from_set(cls, context, builder, iter_type, set_val): + set_inst = SetInstance(context, builder, iter_type.container, set_val) + self = cls(context, builder, iter_type, None) + index = context.get_constant(types.intp, 0) + self._iter.index = cgutils.alloca_once_value(builder, index) + self._iter.meminfo = set_inst.meminfo + return self + + @property + def value(self): + return self._iter._getvalue() + + @property + def meminfo(self): + return self._iter.meminfo + + @property + def index(self): + return self._builder.load(self._iter.index) + + @index.setter + def index(self, value): + self._builder.store(value, self._iter.index) + + def iternext(self, result): + index = self.index + payload = self._payload + one = ir.Constant(index.type, 1) + + result.set_exhausted() + + with payload._iterate(start=index) as loop: + # An entry was found + entry = loop.entry + result.set_valid() + result.yield_(entry.key) + self.index = self._builder.add(loop.index, one) + loop.do_break() + + +#------------------------------------------------------------------------------- +# Constructors + +def build_set(context, builder, set_type, items): + """ + Build a set of the given type, containing the given items. + """ + nitems = len(items) + inst = SetInstance.allocate(context, builder, set_type, nitems) + + if nitems > 0: + + # Populate set. Inlining the insertion code for each item would be very + # costly, instead we create a LLVM array and iterate over it. + array = cgutils.pack_array(builder, items) + array_ptr = cgutils.alloca_once_value(builder, array) + + count = context.get_constant(types.intp, nitems) + with cgutils.for_range(builder, count) as loop: + item = builder.load(cgutils.gep(builder, array_ptr, 0, loop.index)) + inst.add(item) + + return impl_ret_new_ref(context, builder, set_type, inst.value) + + +@lower_builtin(set) +def set_empty_constructor(context, builder, sig, args): + set_type = sig.return_type + inst = SetInstance.allocate(context, builder, set_type) + return impl_ret_new_ref(context, builder, set_type, inst.value) + +@lower_builtin(set, types.IterableType) +def set_constructor(context, builder, sig, args): + set_type = sig.return_type + items_type, = sig.args + items, = args + + # If the argument has a len(), preallocate the set so as to + # avoid resizes. + # `for_iter` increfs each item in the set, so a `decref` is required each + # iteration to balance. Because the `incref` from `.add` is dependent on + # the item not already existing in the set, just removing its incref is not + # enough to guarantee all memory is freed + n = call_len(context, builder, items_type, items) + inst = SetInstance.allocate(context, builder, set_type, n) + with for_iter(context, builder, items_type, items) as loop: + inst.add(loop.value) + context.nrt.decref(builder, set_type.dtype, loop.value) + + return impl_ret_new_ref(context, builder, set_type, inst.value) + + +#------------------------------------------------------------------------------- +# Various operations + +@lower_builtin(len, types.Set) +def set_len(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + return inst.get_size() + +@lower_builtin(operator.contains, types.Set, types.Any) +def in_set(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + return inst.contains(args[1]) + +@lower_builtin('getiter', types.Set) +def getiter_set(context, builder, sig, args): + inst = SetIterInstance.from_set(context, builder, sig.return_type, args[0]) + return impl_ret_borrowed(context, builder, sig.return_type, inst.value) + +@lower_builtin('iternext', types.SetIter) +@iternext_impl(RefType.BORROWED) +def iternext_listiter(context, builder, sig, args, result): + inst = SetIterInstance(context, builder, sig.args[0], args[0]) + inst.iternext(result) + + +#------------------------------------------------------------------------------- +# Methods + +# One-item-at-a-time operations + +@lower_builtin("set.add", types.Set, types.Any) +def set_add(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + item = args[1] + inst.add(item) + + return context.get_dummy_value() + + +@intrinsic +def _set_discard(typingctx, s, item): + sig = types.none(s, item) + + def set_discard(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + item = args[1] + inst.discard(item) + + return context.get_dummy_value() + + return sig, set_discard + + +@overload_method(types.Set, "discard") +def ol_set_discard(s, item): + return lambda s, item: _set_discard(s, item) + + +@intrinsic +def _set_pop(typingctx, s): + sig = s.dtype(s) + + def set_pop(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + used = inst.payload.used + with builder.if_then(cgutils.is_null(builder, used), likely=False): + context.call_conv.return_user_exc(builder, KeyError, + ("set.pop(): empty set",)) + + return inst.pop() + + return sig, set_pop + + +@overload_method(types.Set, "pop") +def ol_set_pop(s): + return lambda s: _set_pop(s) + + +@intrinsic +def _set_remove(typingctx, s, item): + sig = types.none(s, item) + + def set_remove(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + item = args[1] + found = inst.discard(item) + with builder.if_then(builder.not_(found), likely=False): + context.call_conv.return_user_exc(builder, KeyError, + ("set.remove(): key not in set",)) + + return context.get_dummy_value() + + return sig, set_remove + + +@overload_method(types.Set, "remove") +def ol_set_remove(s, item): + if s.dtype == item: + return lambda s, item: _set_remove(s, item) + + +# Mutating set operations + +@intrinsic +def _set_clear(typingctx, s): + sig = types.none(s) + + def set_clear(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + inst.clear() + return context.get_dummy_value() + + return sig, set_clear + + +@overload_method(types.Set, "clear") +def ol_set_clear(s): + return lambda s: _set_clear(s) + + +@intrinsic +def _set_copy(typingctx, s): + sig = s(s) + + def set_copy(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = inst.copy() + return impl_ret_new_ref(context, builder, sig.return_type, other.value) + + return sig, set_copy + + +@overload_method(types.Set, "copy") +def ol_set_copy(s): + return lambda s: _set_copy(s) + + +def set_difference_update(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + + inst.difference(other) + + return context.get_dummy_value() + + +@intrinsic +def _set_difference_update(typingctx, a, b): + sig = types.none(a, b) + return sig, set_difference_update + + +@overload_method(types.Set, "difference_update") +def set_difference_update_impl(a, b): + check_all_set(a, b) + return lambda a, b: _set_difference_update(a, b) + + +def set_intersection_update(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + inst.intersect(other) + return context.get_dummy_value() + + +@intrinsic +def _set_intersection_update(typingctx, a, b): + sig = types.none(a, b) + return sig, set_intersection_update + + +@overload_method(types.Set, "intersection_update") +def set_intersection_update_impl(a, b): + check_all_set(a, b) + return lambda a, b: _set_intersection_update(a, b) + + +def set_symmetric_difference_update(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + inst.symmetric_difference(other) + return context.get_dummy_value() + + +@intrinsic +def _set_symmetric_difference_update(typingctx, a, b): + sig = types.none(a, b) + return sig, set_symmetric_difference_update + + +@overload_method(types.Set, "symmetric_difference_update") +def set_symmetric_difference_update_impl(a, b): + check_all_set(a, b) + return lambda a, b: _set_symmetric_difference_update(a, b) + + +@lower_builtin("set.update", types.Set, types.IterableType) +def set_update(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + items_type = sig.args[1] + items = args[1] + + # If the argument has a len(), assume there are few collisions and + # presize to len(set) + len(items) + n = call_len(context, builder, items_type, items) + if n is not None: + new_size = builder.add(inst.payload.used, n) + inst.upsize(new_size) + + with for_iter(context, builder, items_type, items) as loop: + # make sure that the items being added are of the same dtype as the + # set instance + casted = context.cast(builder, loop.value, items_type.dtype, inst.dtype) + inst.add(casted) + # decref each item to counter balance the incref from `for_iter` + # `.add` will conditionally incref when the item does not already exist + # in the set, therefore removing its incref is not enough to guarantee + # all memory is freed + context.nrt.decref(builder, items_type.dtype, loop.value) + + if n is not None: + # If we pre-grew the set, downsize in case there were many collisions + inst.downsize(inst.payload.used) + + return context.get_dummy_value() + +def gen_operator_impl(op, impl): + @intrinsic + def _set_operator_intr(typingctx, a, b): + sig = a(a, b) + def codegen(context, builder, sig, args): + assert sig.return_type == sig.args[0] + impl(context, builder, sig, args) + return impl_ret_borrowed(context, builder, sig.args[0], args[0]) + return sig, codegen + + @overload(op) + def _ol_set_operator(a, b): + check_all_set(a, b) + return lambda a, b: _set_operator_intr(a, b) + + +for op_, op_impl in [ + (operator.iand, set_intersection_update), + (operator.ior, set_update), + (operator.isub, set_difference_update), + (operator.ixor, set_symmetric_difference_update), + ]: + gen_operator_impl(op_, op_impl) + + +# Set operations creating a new set + +@overload(operator.sub) +@overload_method(types.Set, "difference") +def impl_set_difference(a, b): + check_all_set(a, b) + + def difference_impl(a, b): + s = a.copy() + s.difference_update(b) + return s + + return difference_impl + +@overload(operator.and_) +@overload_method(types.Set, "intersection") +def set_intersection(a, b): + check_all_set(a, b) + + def intersection_impl(a, b): + if len(a) < len(b): + s = a.copy() + s.intersection_update(b) + return s + else: + s = b.copy() + s.intersection_update(a) + return s + + return intersection_impl + +@overload(operator.xor) +@overload_method(types.Set, "symmetric_difference") +def set_symmetric_difference(a, b): + check_all_set(a, b) + + def symmetric_difference_impl(a, b): + if len(a) > len(b): + s = a.copy() + s.symmetric_difference_update(b) + return s + else: + s = b.copy() + s.symmetric_difference_update(a) + return s + + return symmetric_difference_impl + +@overload(operator.or_) +@overload_method(types.Set, "union") +def set_union(a, b): + check_all_set(a, b) + + def union_impl(a, b): + if len(a) > len(b): + s = a.copy() + s.update(b) + return s + else: + s = b.copy() + s.update(a) + return s + + return union_impl + + +# Predicates + +@intrinsic +def _set_isdisjoint(typingctx, a, b): + sig = types.boolean(a, b) + + def codegen(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + + return inst.isdisjoint(other) + + return sig, codegen + + +@overload_method(types.Set, "isdisjoint") +def set_isdisjoint(a, b): + check_all_set(a, b) + + return lambda a, b: _set_isdisjoint(a, b) + + +@intrinsic +def _set_issubset(typingctx, a, b): + sig = types.boolean(a, b) + + def codegen(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + + return inst.issubset(other) + + return sig, codegen + +@overload(operator.le) +@overload_method(types.Set, "issubset") +def set_issubset(a, b): + check_all_set(a, b) + + return lambda a, b: _set_issubset(a, b) + + +@overload(operator.ge) +@overload_method(types.Set, "issuperset") +def set_issuperset(a, b): + check_all_set(a, b) + + def superset_impl(a, b): + return b.issubset(a) + + return superset_impl + +@intrinsic +def _set_eq(typingctx, a, b): + sig = types.boolean(a, b) + + def codegen(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + + return inst.equals(other) + + return sig, codegen + +@overload(operator.eq) +def set_eq(a, b): + check_all_set(a, b) + + return lambda a, b: _set_eq(a, b) + +@overload(operator.ne) +def set_ne(a, b): + check_all_set(a, b) + + def ne_impl(a, b): + return not a == b + + return ne_impl + +@intrinsic +def _set_lt(typingctx, a, b): + sig = types.boolean(a, b) + + def codegen(context, builder, sig, args): + inst = SetInstance(context, builder, sig.args[0], args[0]) + other = SetInstance(context, builder, sig.args[1], args[1]) + + return inst.issubset(other, strict=True) + + return sig, codegen + +@overload(operator.lt) +def set_lt(a, b): + check_all_set(a, b) + + return lambda a, b: _set_lt(a, b) + +@overload(operator.gt) +def set_gt(a, b): + check_all_set(a, b) + + def gt_impl(a, b): + return b < a + + return gt_impl + +@lower_builtin(operator.is_, types.Set, types.Set) +def set_is(context, builder, sig, args): + a = SetInstance(context, builder, sig.args[0], args[0]) + b = SetInstance(context, builder, sig.args[1], args[1]) + ma = builder.ptrtoint(a.meminfo, cgutils.intp_t) + mb = builder.ptrtoint(b.meminfo, cgutils.intp_t) + return builder.icmp_signed('==', ma, mb) + + +# ----------------------------------------------------------------------------- +# Implicit casting + +@lower_cast(types.Set, types.Set) +def set_to_set(context, builder, fromty, toty, val): + # Casting from non-reflected to reflected + assert fromty.dtype == toty.dtype + return val diff --git a/lib/python3.10/site-packages/numba/cpython/unicode_support.py b/lib/python3.10/site-packages/numba/cpython/unicode_support.py new file mode 100644 index 0000000000000000000000000000000000000000..b42b7e0e6152a215f2329ec345fdebed0f711063 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cpython/unicode_support.py @@ -0,0 +1,768 @@ +""" +This module contains support functions for more advanced unicode operations. +This is not a public API and is for Numba internal use only. Most of the +functions are relatively straightforward translations of the functions with the +same name in CPython. +""" +from collections import namedtuple +from enum import IntEnum + +import llvmlite.ir +import numpy as np + +from numba.core import types, cgutils, config +from numba.core.imputils import (impl_ret_untracked) + +from numba.core.extending import overload, intrinsic, register_jitable +from numba.core.errors import TypingError + +# This is equivalent to the struct `_PyUnicode_TypeRecord defined in CPython's +# Objects/unicodectype.c +typerecord = namedtuple('typerecord', + 'upper lower title decimal digit flags') + +# The Py_UCS4 type from CPython: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/unicodeobject.h#L112 # noqa: E501 +if config.USE_LEGACY_TYPE_SYSTEM: + _Py_UCS4 = types.uint32 +else: + _Py_UCS4 = types.c_uint32 + +# ------------------------------------------------------------------------------ +# Start code related to/from CPython's unicodectype impl +# +# NOTE: the original source at: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c # noqa: E501 +# contains this statement: +# +# /* +# Unicode character type helpers. +# +# Written by Marc-Andre Lemburg (mal@lemburg.com). +# Modified for Python 2.0 by Fredrik Lundh (fredrik@pythonware.com) +# +# Copyright (c) Corporation for National Research Initiatives. +# +# */ + + +# This enum contains the values defined in CPython's Objects/unicodectype.c that +# provide masks for use against the various members of the typerecord +# +# See: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L13-L27 # noqa: E501 +# + + +_Py_TAB = 0x9 +_Py_LINEFEED = 0xa +_Py_CARRIAGE_RETURN = 0xd +_Py_SPACE = 0x20 + + +class _PyUnicode_TyperecordMasks(IntEnum): + ALPHA_MASK = 0x01 + DECIMAL_MASK = 0x02 + DIGIT_MASK = 0x04 + LOWER_MASK = 0x08 + LINEBREAK_MASK = 0x10 + SPACE_MASK = 0x20 + TITLE_MASK = 0x40 + UPPER_MASK = 0x80 + XID_START_MASK = 0x100 + XID_CONTINUE_MASK = 0x200 + PRINTABLE_MASK = 0x400 + NUMERIC_MASK = 0x800 + CASE_IGNORABLE_MASK = 0x1000 + CASED_MASK = 0x2000 + EXTENDED_CASE_MASK = 0x4000 + + +def _PyUnicode_gettyperecord(a): + raise RuntimeError("Calling the Python definition is invalid") + + +@intrinsic +def _gettyperecord_impl(typingctx, codepoint): + """ + Provides the binding to numba_gettyperecord, returns a `typerecord` + namedtuple of properties from the codepoint. + """ + if not isinstance(codepoint, types.Integer): + raise TypingError("codepoint must be an integer") + + def details(context, builder, signature, args): + ll_void = context.get_value_type(types.void) + ll_Py_UCS4 = context.get_value_type(_Py_UCS4) + ll_intc = context.get_value_type(types.intc) + ll_intc_ptr = ll_intc.as_pointer() + ll_uchar = context.get_value_type(types.uchar) + ll_uchar_ptr = ll_uchar.as_pointer() + ll_ushort = context.get_value_type(types.ushort) + ll_ushort_ptr = ll_ushort.as_pointer() + fnty = llvmlite.ir.FunctionType(ll_void, [ + ll_Py_UCS4, # code + ll_intc_ptr, # upper + ll_intc_ptr, # lower + ll_intc_ptr, # title + ll_uchar_ptr, # decimal + ll_uchar_ptr, # digit + ll_ushort_ptr, # flags + ]) + fn = cgutils.get_or_insert_function( + builder.module, + fnty, name="numba_gettyperecord") + upper = cgutils.alloca_once(builder, ll_intc, name='upper') + lower = cgutils.alloca_once(builder, ll_intc, name='lower') + title = cgutils.alloca_once(builder, ll_intc, name='title') + decimal = cgutils.alloca_once(builder, ll_uchar, name='decimal') + digit = cgutils.alloca_once(builder, ll_uchar, name='digit') + flags = cgutils.alloca_once(builder, ll_ushort, name='flags') + + byref = [ upper, lower, title, decimal, digit, flags] + builder.call(fn, [args[0]] + byref) + buf = [] + for x in byref: + buf.append(builder.load(x)) + + res = context.make_tuple(builder, signature.return_type, tuple(buf)) + return impl_ret_untracked(context, builder, signature.return_type, res) + + tupty = types.NamedTuple([types.intc, types.intc, types.intc, types.uchar, + types.uchar, types.ushort], typerecord) + sig = tupty(_Py_UCS4) + return sig, details + + +@overload(_PyUnicode_gettyperecord) +def gettyperecord_impl(a): + """ + Provides a _PyUnicode_gettyperecord binding, for convenience it will accept + single character strings and code points. + """ + if isinstance(a, types.UnicodeType): + from numba.cpython.unicode import _get_code_point + + def impl(a): + if len(a) > 1: + msg = "gettyperecord takes a single unicode character" + raise ValueError(msg) + code_point = _get_code_point(a, 0) + data = _gettyperecord_impl(_Py_UCS4(code_point)) + return data + return impl + if isinstance(a, types.Integer): + return lambda a: _gettyperecord_impl(_Py_UCS4(a)) + + +# whilst it's possible to grab the _PyUnicode_ExtendedCase symbol as it's global +# it is safer to use a defined api: +@intrinsic +def _PyUnicode_ExtendedCase(typingctx, index): + """ + Accessor function for the _PyUnicode_ExtendedCase array, binds to + numba_get_PyUnicode_ExtendedCase which wraps the array and does the lookup + """ + if not isinstance(index, types.Integer): + raise TypingError("Expected an index") + + def details(context, builder, signature, args): + ll_Py_UCS4 = context.get_value_type(_Py_UCS4) + ll_intc = context.get_value_type(types.intc) + fnty = llvmlite.ir.FunctionType(ll_Py_UCS4, [ll_intc]) + fn = cgutils.get_or_insert_function( + builder.module, + fnty, name="numba_get_PyUnicode_ExtendedCase") + return builder.call(fn, [args[0]]) + + sig = _Py_UCS4(types.intc) + return sig, details + +# The following functions are replications of the functions with the same name +# in CPython's Objects/unicodectype.c + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L64-L71 # noqa: E501 +@register_jitable +def _PyUnicode_ToTitlecase(ch): + ctype = _PyUnicode_gettyperecord(ch) + if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK): + return _PyUnicode_ExtendedCase(ctype.title & 0xFFFF) + return ch + ctype.title + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L76-L81 # noqa: E501 +@register_jitable +def _PyUnicode_IsTitlecase(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.TITLE_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L86-L91 # noqa: E501 +@register_jitable +def _PyUnicode_IsXidStart(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.XID_START_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L96-L101 # noqa: E501 +@register_jitable +def _PyUnicode_IsXidContinue(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.XID_CONTINUE_MASK != 0 + + +@register_jitable +def _PyUnicode_ToDecimalDigit(ch): + ctype = _PyUnicode_gettyperecord(ch) + if ctype.flags & _PyUnicode_TyperecordMasks.DECIMAL_MASK: + return ctype.decimal + return -1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L123-L1128 # noqa: E501 +@register_jitable +def _PyUnicode_ToDigit(ch): + ctype = _PyUnicode_gettyperecord(ch) + if ctype.flags & _PyUnicode_TyperecordMasks.DIGIT_MASK: + return ctype.digit + return -1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L140-L145 # noqa: E501 +@register_jitable +def _PyUnicode_IsNumeric(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.NUMERIC_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L160-L165 # noqa: E501 +@register_jitable +def _PyUnicode_IsPrintable(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.PRINTABLE_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L170-L175 # noqa: E501 +@register_jitable +def _PyUnicode_IsLowercase(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.LOWER_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L180-L185 # noqa: E501 +@register_jitable +def _PyUnicode_IsUppercase(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.UPPER_MASK != 0 + + +@register_jitable +def _PyUnicode_IsLineBreak(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.LINEBREAK_MASK != 0 + + +@register_jitable +def _PyUnicode_ToUppercase(ch): + raise NotImplementedError + + +@register_jitable +def _PyUnicode_ToLowercase(ch): + raise NotImplementedError + + +# From: https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Objects/unicodectype.c#L211-L225 # noqa: E501 +@register_jitable +def _PyUnicode_ToLowerFull(ch, res): + ctype = _PyUnicode_gettyperecord(ch) + if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK): + index = ctype.lower & 0xFFFF + n = ctype.lower >> 24 + for i in range(n): + res[i] = _PyUnicode_ExtendedCase(index + i) + return n + res[0] = ch + ctype.lower + return 1 + + +# From: https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Objects/unicodectype.c#L227-L241 # noqa: E501 +@register_jitable +def _PyUnicode_ToTitleFull(ch, res): + ctype = _PyUnicode_gettyperecord(ch) + if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK): + index = ctype.title & 0xFFFF + n = ctype.title >> 24 + for i in range(n): + res[i] = _PyUnicode_ExtendedCase(index + i) + return n + res[0] = ch + ctype.title + return 1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L243-L257 # noqa: E501 +@register_jitable +def _PyUnicode_ToUpperFull(ch, res): + ctype = _PyUnicode_gettyperecord(ch) + if (ctype.flags & _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK): + index = ctype.upper & 0xFFFF + n = ctype.upper >> 24 + for i in range(n): + # Perhaps needed to use unicode._set_code_point() here + res[i] = _PyUnicode_ExtendedCase(index + i) + return n + res[0] = ch + ctype.upper + return 1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L259-L272 # noqa: E501 +@register_jitable +def _PyUnicode_ToFoldedFull(ch, res): + ctype = _PyUnicode_gettyperecord(ch) + extended_case_mask = _PyUnicode_TyperecordMasks.EXTENDED_CASE_MASK + if ctype.flags & extended_case_mask and (ctype.lower >> 20) & 7: + index = (ctype.lower & 0xFFFF) + (ctype.lower >> 24) + n = (ctype.lower >> 20) & 7 + for i in range(n): + res[i] = _PyUnicode_ExtendedCase(index + i) + return n + return _PyUnicode_ToLowerFull(ch, res) + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L274-L279 # noqa: E501 +@register_jitable +def _PyUnicode_IsCased(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.CASED_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L281-L286 # noqa: E501 +@register_jitable +def _PyUnicode_IsCaseIgnorable(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.CASE_IGNORABLE_MASK != 0 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L123-L135 # noqa: E501 +@register_jitable +def _PyUnicode_IsDigit(ch): + if _PyUnicode_ToDigit(ch) < 0: + return 0 + return 1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L106-L118 # noqa: E501 +@register_jitable +def _PyUnicode_IsDecimalDigit(ch): + if _PyUnicode_ToDecimalDigit(ch) < 0: + return 0 + return 1 + + +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L291-L296 # noqa: E501 +@register_jitable +def _PyUnicode_IsSpace(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.SPACE_MASK != 0 + + +@register_jitable +def _PyUnicode_IsAlpha(ch): + ctype = _PyUnicode_gettyperecord(ch) + return ctype.flags & _PyUnicode_TyperecordMasks.ALPHA_MASK != 0 + + +# End code related to/from CPython's unicodectype impl +# ------------------------------------------------------------------------------ + + +# ------------------------------------------------------------------------------ +# Start code related to/from CPython's pyctype + +# From the definition in CPython's Include/pyctype.h +# From: https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L5-L11 # noqa: E501 +class _PY_CTF(IntEnum): + LOWER = 0x01 + UPPER = 0x02 + ALPHA = 0x01 | 0x02 + DIGIT = 0x04 + ALNUM = 0x01 | 0x02 | 0x04 + SPACE = 0x08 + XDIGIT = 0x10 + + +# From the definition in CPython's Python/pyctype.c +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L5 # noqa: E501 +_Py_ctype_table = np.array([ + 0, # 0x0 '\x00' + 0, # 0x1 '\x01' + 0, # 0x2 '\x02' + 0, # 0x3 '\x03' + 0, # 0x4 '\x04' + 0, # 0x5 '\x05' + 0, # 0x6 '\x06' + 0, # 0x7 '\x07' + 0, # 0x8 '\x08' + _PY_CTF.SPACE, # 0x9 '\t' + _PY_CTF.SPACE, # 0xa '\n' + _PY_CTF.SPACE, # 0xb '\v' + _PY_CTF.SPACE, # 0xc '\f' + _PY_CTF.SPACE, # 0xd '\r' + 0, # 0xe '\x0e' + 0, # 0xf '\x0f' + 0, # 0x10 '\x10' + 0, # 0x11 '\x11' + 0, # 0x12 '\x12' + 0, # 0x13 '\x13' + 0, # 0x14 '\x14' + 0, # 0x15 '\x15' + 0, # 0x16 '\x16' + 0, # 0x17 '\x17' + 0, # 0x18 '\x18' + 0, # 0x19 '\x19' + 0, # 0x1a '\x1a' + 0, # 0x1b '\x1b' + 0, # 0x1c '\x1c' + 0, # 0x1d '\x1d' + 0, # 0x1e '\x1e' + 0, # 0x1f '\x1f' + _PY_CTF.SPACE, # 0x20 ' ' + 0, # 0x21 '!' + 0, # 0x22 '"' + 0, # 0x23 '#' + 0, # 0x24 '$' + 0, # 0x25 '%' + 0, # 0x26 '&' + 0, # 0x27 "'" + 0, # 0x28 '(' + 0, # 0x29 ')' + 0, # 0x2a '*' + 0, # 0x2b '+' + 0, # 0x2c ',' + 0, # 0x2d '-' + 0, # 0x2e '.' + 0, # 0x2f '/' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x30 '0' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x31 '1' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x32 '2' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x33 '3' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x34 '4' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x35 '5' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x36 '6' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x37 '7' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x38 '8' + _PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x39 '9' + 0, # 0x3a ':' + 0, # 0x3b ';' + 0, # 0x3c '<' + 0, # 0x3d '=' + 0, # 0x3e '>' + 0, # 0x3f '?' + 0, # 0x40 '@' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x41 'A' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x42 'B' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x43 'C' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x44 'D' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x45 'E' + _PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x46 'F' + _PY_CTF.UPPER, # 0x47 'G' + _PY_CTF.UPPER, # 0x48 'H' + _PY_CTF.UPPER, # 0x49 'I' + _PY_CTF.UPPER, # 0x4a 'J' + _PY_CTF.UPPER, # 0x4b 'K' + _PY_CTF.UPPER, # 0x4c 'L' + _PY_CTF.UPPER, # 0x4d 'M' + _PY_CTF.UPPER, # 0x4e 'N' + _PY_CTF.UPPER, # 0x4f 'O' + _PY_CTF.UPPER, # 0x50 'P' + _PY_CTF.UPPER, # 0x51 'Q' + _PY_CTF.UPPER, # 0x52 'R' + _PY_CTF.UPPER, # 0x53 'S' + _PY_CTF.UPPER, # 0x54 'T' + _PY_CTF.UPPER, # 0x55 'U' + _PY_CTF.UPPER, # 0x56 'V' + _PY_CTF.UPPER, # 0x57 'W' + _PY_CTF.UPPER, # 0x58 'X' + _PY_CTF.UPPER, # 0x59 'Y' + _PY_CTF.UPPER, # 0x5a 'Z' + 0, # 0x5b '[' + 0, # 0x5c '\\' + 0, # 0x5d ']' + 0, # 0x5e '^' + 0, # 0x5f '_' + 0, # 0x60 '`' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x61 'a' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x62 'b' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x63 'c' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x64 'd' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x65 'e' + _PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x66 'f' + _PY_CTF.LOWER, # 0x67 'g' + _PY_CTF.LOWER, # 0x68 'h' + _PY_CTF.LOWER, # 0x69 'i' + _PY_CTF.LOWER, # 0x6a 'j' + _PY_CTF.LOWER, # 0x6b 'k' + _PY_CTF.LOWER, # 0x6c 'l' + _PY_CTF.LOWER, # 0x6d 'm' + _PY_CTF.LOWER, # 0x6e 'n' + _PY_CTF.LOWER, # 0x6f 'o' + _PY_CTF.LOWER, # 0x70 'p' + _PY_CTF.LOWER, # 0x71 'q' + _PY_CTF.LOWER, # 0x72 'r' + _PY_CTF.LOWER, # 0x73 's' + _PY_CTF.LOWER, # 0x74 't' + _PY_CTF.LOWER, # 0x75 'u' + _PY_CTF.LOWER, # 0x76 'v' + _PY_CTF.LOWER, # 0x77 'w' + _PY_CTF.LOWER, # 0x78 'x' + _PY_CTF.LOWER, # 0x79 'y' + _PY_CTF.LOWER, # 0x7a 'z' + 0, # 0x7b '{' + 0, # 0x7c '|' + 0, # 0x7d '}' + 0, # 0x7e '~' + 0, # 0x7f '\x7f' + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +], dtype=np.intc) + + +# From the definition in CPython's Python/pyctype.c +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L145 # noqa: E501 +_Py_ctype_tolower = np.array([ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +], dtype=np.uint8) + + +# From the definition in CPython's Python/pyctype.c +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L180 +_Py_ctype_toupper = np.array([ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +], dtype=np.uint8) + + +class _PY_CTF_LB(IntEnum): + LINE_BREAK = 0x01 + LINE_FEED = 0x02 + CARRIAGE_RETURN = 0x04 + + +_Py_ctype_islinebreak = np.array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + _PY_CTF_LB.LINE_BREAK | _PY_CTF_LB.LINE_FEED, # 0xa '\n' + _PY_CTF_LB.LINE_BREAK, # 0xb '\v' + _PY_CTF_LB.LINE_BREAK, # 0xc '\f' + _PY_CTF_LB.LINE_BREAK | _PY_CTF_LB.CARRIAGE_RETURN, # 0xd '\r' + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + _PY_CTF_LB.LINE_BREAK, # 0x1c '\x1c' + _PY_CTF_LB.LINE_BREAK, # 0x1d '\x1d' + _PY_CTF_LB.LINE_BREAK, # 0x1e '\x1e' + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + _PY_CTF_LB.LINE_BREAK, # 0x85 '\x85' + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +], dtype=np.intc) + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pymacro.h#L25 # noqa: E501 +@register_jitable +def _Py_CHARMASK(ch): + """ + Equivalent to the CPython macro `Py_CHARMASK()`, masks off all but the + lowest 256 bits of ch. + """ + return types.uint8(ch) & types.uint8(0xff) + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L30 # noqa: E501 +@register_jitable +def _Py_TOUPPER(ch): + """ + Equivalent to the CPython macro `Py_TOUPPER()` converts an ASCII range + code point to the upper equivalent + """ + return _Py_ctype_toupper[_Py_CHARMASK(ch)] + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L29 # noqa: E501 +@register_jitable +def _Py_TOLOWER(ch): + """ + Equivalent to the CPython macro `Py_TOLOWER()` converts an ASCII range + code point to the lower equivalent + """ + return _Py_ctype_tolower[_Py_CHARMASK(ch)] + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L18 # noqa: E501 +@register_jitable +def _Py_ISLOWER(ch): + """ + Equivalent to the CPython macro `Py_ISLOWER()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.LOWER + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L19 # noqa: E501 +@register_jitable +def _Py_ISUPPER(ch): + """ + Equivalent to the CPython macro `Py_ISUPPER()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.UPPER + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L20 # noqa: E501 +@register_jitable +def _Py_ISALPHA(ch): + """ + Equivalent to the CPython macro `Py_ISALPHA()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.ALPHA + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L21 # noqa: E501 +@register_jitable +def _Py_ISDIGIT(ch): + """ + Equivalent to the CPython macro `Py_ISDIGIT()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.DIGIT + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L22 # noqa: E501 +@register_jitable +def _Py_ISXDIGIT(ch): + """ + Equivalent to the CPython macro `Py_ISXDIGIT()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.XDIGIT + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L23 # noqa: E501 +@register_jitable +def _Py_ISALNUM(ch): + """ + Equivalent to the CPython macro `Py_ISALNUM()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.ALNUM + + +# Translation of: +# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Include/pyctype.h#L24 # noqa: E501 +@register_jitable +def _Py_ISSPACE(ch): + """ + Equivalent to the CPython macro `Py_ISSPACE()` + """ + return _Py_ctype_table[_Py_CHARMASK(ch)] & _PY_CTF.SPACE + + +@register_jitable +def _Py_ISLINEBREAK(ch): + """Check if character is ASCII line break""" + return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.LINE_BREAK + + +@register_jitable +def _Py_ISLINEFEED(ch): + """Check if character is line feed `\n`""" + return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.LINE_FEED + + +@register_jitable +def _Py_ISCARRIAGERETURN(ch): + """Check if character is carriage return `\r`""" + return _Py_ctype_islinebreak[_Py_CHARMASK(ch)] & _PY_CTF_LB.CARRIAGE_RETURN + + +# End code related to/from CPython's pyctype +# ------------------------------------------------------------------------------ diff --git a/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py b/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py new file mode 100644 index 0000000000000000000000000000000000000000..6cc9e2e393fc07276f01c5c4c14952bd81f04b50 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py @@ -0,0 +1,248 @@ +""" +Expose each GPU devices directly. + +This module implements a API that is like the "CUDA runtime" context manager +for managing CUDA context stack and clean up. It relies on thread-local globals +to separate the context stack management of each thread. Contexts are also +shareable among threads. Only the main thread can destroy Contexts. + +Note: +- This module must be imported by the main-thread. + +""" +import functools +import threading +from contextlib import contextmanager + +from .driver import driver, USE_NV_BINDING + + +class _DeviceList(object): + def __getattr__(self, attr): + # First time looking at "lst" attribute. + if attr == "lst": + # Device list is not initialized. + # Query all CUDA devices. + numdev = driver.get_device_count() + gpus = [_DeviceContextManager(driver.get_device(devid)) + for devid in range(numdev)] + # Define "lst" to avoid re-initialization + self.lst = gpus + return gpus + + # Other attributes + return super(_DeviceList, self).__getattr__(attr) + + def __getitem__(self, devnum): + ''' + Returns the context manager for device *devnum*. + ''' + return self.lst[devnum] + + def __str__(self): + return ', '.join([str(d) for d in self.lst]) + + def __iter__(self): + return iter(self.lst) + + def __len__(self): + return len(self.lst) + + @property + def current(self): + """Returns the active device or None if there's no active device + """ + with driver.get_active_context() as ac: + devnum = ac.devnum + if devnum is not None: + return self[devnum] + + +class _DeviceContextManager(object): + """ + Provides a context manager for executing in the context of the chosen + device. The normal use of instances of this type is from + ``numba.cuda.gpus``. For example, to execute on device 2:: + + with numba.cuda.gpus[2]: + d_a = numba.cuda.to_device(a) + + to copy the array *a* onto device 2, referred to by *d_a*. + """ + + def __init__(self, device): + self._device = device + + def __getattr__(self, item): + return getattr(self._device, item) + + def __enter__(self): + _runtime.get_or_create_context(self._device.id) + + def __exit__(self, exc_type, exc_val, exc_tb): + # this will verify that we are popping the right device context. + self._device.get_primary_context().pop() + + def __str__(self): + return "".format(self=self) + + +class _Runtime(object): + """Emulate the CUDA runtime context management. + + It owns all Devices and Contexts. + Keeps at most one Context per Device + """ + + def __init__(self): + self.gpus = _DeviceList() + + # For caching the attached CUDA Context + self._tls = threading.local() + + # Remember the main thread + # Only the main thread can *actually* destroy + self._mainthread = threading.current_thread() + + # Avoid mutation of runtime state in multithreaded programs + self._lock = threading.RLock() + + @contextmanager + def ensure_context(self): + """Ensure a CUDA context is available inside the context. + + On entrance, queries the CUDA driver for an active CUDA context and + attaches it in TLS for subsequent calls so they do not need to query + the CUDA driver again. On exit, detach the CUDA context from the TLS. + + This will allow us to pickup thirdparty activated CUDA context in + any top-level Numba CUDA API. + """ + with driver.get_active_context(): + oldctx = self._get_attached_context() + newctx = self.get_or_create_context(None) + self._set_attached_context(newctx) + try: + yield + finally: + self._set_attached_context(oldctx) + + def get_or_create_context(self, devnum): + """Returns the primary context and push+create it if needed + for *devnum*. If *devnum* is None, use the active CUDA context (must + be primary) or create a new one with ``devnum=0``. + """ + if devnum is None: + attached_ctx = self._get_attached_context() + if attached_ctx is None: + return self._get_or_create_context_uncached(devnum) + else: + return attached_ctx + else: + if USE_NV_BINDING: + devnum = int(devnum) + return self._activate_context_for(devnum) + + def _get_or_create_context_uncached(self, devnum): + """See also ``get_or_create_context(devnum)``. + This version does not read the cache. + """ + with self._lock: + # Try to get the active context in the CUDA stack or + # activate GPU-0 with the primary context + with driver.get_active_context() as ac: + if not ac: + return self._activate_context_for(0) + else: + # Get primary context for the active device + ctx = self.gpus[ac.devnum].get_primary_context() + # Is active context the primary context? + if USE_NV_BINDING: + ctx_handle = int(ctx.handle) + ac_ctx_handle = int(ac.context_handle) + else: + ctx_handle = ctx.handle.value + ac_ctx_handle = ac.context_handle.value + if ctx_handle != ac_ctx_handle: + msg = ('Numba cannot operate on non-primary' + ' CUDA context {:x}') + raise RuntimeError(msg.format(ac_ctx_handle)) + # Ensure the context is ready + ctx.prepare_for_use() + return ctx + + def _activate_context_for(self, devnum): + with self._lock: + gpu = self.gpus[devnum] + newctx = gpu.get_primary_context() + # Detect unexpected context switch + cached_ctx = self._get_attached_context() + if cached_ctx is not None and cached_ctx is not newctx: + raise RuntimeError('Cannot switch CUDA-context.') + newctx.push() + return newctx + + def _get_attached_context(self): + return getattr(self._tls, 'attached_context', None) + + def _set_attached_context(self, ctx): + self._tls.attached_context = ctx + + def reset(self): + """Clear all contexts in the thread. Destroy the context if and only + if we are in the main thread. + """ + # Pop all active context. + while driver.pop_active_context() is not None: + pass + + # If it is the main thread + if threading.current_thread() == self._mainthread: + self._destroy_all_contexts() + + def _destroy_all_contexts(self): + # Reset all devices + for gpu in self.gpus: + gpu.reset() + + +_runtime = _Runtime() + +# ================================ PUBLIC API ================================ + +gpus = _runtime.gpus + + +def get_context(devnum=None): + """Get the current device or use a device by device number, and + return the CUDA context. + """ + return _runtime.get_or_create_context(devnum) + + +def require_context(fn): + """ + A decorator that ensures a CUDA context is available when *fn* is executed. + + Note: The function *fn* cannot switch CUDA-context. + """ + @functools.wraps(fn) + def _require_cuda_context(*args, **kws): + with _runtime.ensure_context(): + return fn(*args, **kws) + + return _require_cuda_context + + +def reset(): + """Reset the CUDA subsystem for the current thread. + + In the main thread: + This removes all CUDA contexts. Only use this at shutdown or for + cleaning up between tests. + + In non-main threads: + This clear the CUDA context stack only. + + """ + _runtime.reset() diff --git a/lib/python3.10/site-packages/numba/cuda/cudadrv/libs.py b/lib/python3.10/site-packages/numba/cuda/cudadrv/libs.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3ed9c96e4057b9742f1a12021657fc40ea2580 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadrv/libs.py @@ -0,0 +1,176 @@ +"""CUDA Toolkit libraries lookup utilities. + +CUDA Toolkit libraries can be available via either: + +- the `cuda-nvcc` and `cuda-nvrtc` conda packages for CUDA 12, +- the `cudatoolkit` conda package for CUDA 11, +- a user supplied location from CUDA_HOME, +- a system wide location, +- package-specific locations (e.g. the Debian NVIDIA packages), +- or can be discovered by the system loader. +""" + +import os +import sys +import ctypes + +from numba.misc.findlib import find_lib +from numba.cuda.cuda_paths import get_cuda_paths +from numba.cuda.cudadrv.driver import locate_driver_and_loader, load_driver +from numba.cuda.cudadrv.error import CudaSupportError + + +if sys.platform == 'win32': + _dllnamepattern = '%s.dll' + _staticnamepattern = '%s.lib' +elif sys.platform == 'darwin': + _dllnamepattern = 'lib%s.dylib' + _staticnamepattern = 'lib%s.a' +else: + _dllnamepattern = 'lib%s.so' + _staticnamepattern = 'lib%s.a' + + +def get_libdevice(): + d = get_cuda_paths() + paths = d['libdevice'].info + return paths + + +def open_libdevice(): + with open(get_libdevice(), 'rb') as bcfile: + return bcfile.read() + + +def get_cudalib(lib, static=False): + """ + Find the path of a CUDA library based on a search of known locations. If + the search fails, return a generic filename for the library (e.g. + 'libnvvm.so' for 'nvvm') so that we may attempt to load it using the system + loader's search mechanism. + """ + if lib == 'nvvm': + return get_cuda_paths()['nvvm'].info or _dllnamepattern % 'nvvm' + else: + dir_type = 'static_cudalib_dir' if static else 'cudalib_dir' + libdir = get_cuda_paths()[dir_type].info + + candidates = find_lib(lib, libdir, static=static) + namepattern = _staticnamepattern if static else _dllnamepattern + return max(candidates) if candidates else namepattern % lib + + +def open_cudalib(lib): + path = get_cudalib(lib) + return ctypes.CDLL(path) + + +def check_static_lib(path): + if not os.path.isfile(path): + raise FileNotFoundError(f'{path} not found') + + +def _get_source_variable(lib, static=False): + if lib == 'nvvm': + return get_cuda_paths()['nvvm'].by + elif lib == 'libdevice': + return get_cuda_paths()['libdevice'].by + else: + dir_type = 'static_cudalib_dir' if static else 'cudalib_dir' + return get_cuda_paths()[dir_type].by + + +def test(): + """Test library lookup. Path info is printed to stdout. + """ + failed = False + + # Check for the driver + try: + dlloader, candidates = locate_driver_and_loader() + print('Finding driver from candidates:') + for location in candidates: + print(f'\t{location}') + print(f'Using loader {dlloader}') + print('\tTrying to load driver', end='...') + dll, path = load_driver(dlloader, candidates) + print('\tok') + print(f'\t\tLoaded from {path}') + except CudaSupportError as e: + print(f'\tERROR: failed to open driver: {e}') + failed = True + + # Find the absolute location of the driver on Linux. Various driver-related + # issues have been reported by WSL2 users, and it is almost always due to a + # Linux (i.e. not- WSL2) driver being installed in a WSL2 system. + # Providing the absolute location of the driver indicates its version + # number in the soname (e.g. "libcuda.so.530.30.02"), which can be used to + # look up whether the driver was intended for "native" Linux. + if sys.platform == 'linux' and not failed: + pid = os.getpid() + mapsfile = os.path.join(os.path.sep, 'proc', f'{pid}', 'maps') + try: + with open(mapsfile) as f: + maps = f.read() + # It's difficult to predict all that might go wrong reading the maps + # file - in case various error conditions ensue (the file is not found, + # not readable, etc.) we use OSError to hopefully catch any of them. + except OSError: + # It's helpful to report that this went wrong to the user, but we + # don't set failed to True because this doesn't have any connection + # to actual CUDA functionality. + print(f'\tERROR: Could not open {mapsfile} to determine absolute ' + 'path to libcuda.so') + else: + # In this case we could read the maps, so we can report the + # relevant ones to the user + locations = set(s for s in maps.split() if 'libcuda.so' in s) + print('\tMapped libcuda.so paths:') + for location in locations: + print(f'\t\t{location}') + + # Checks for dynamic libraries + libs = 'nvvm nvrtc cudart'.split() + for lib in libs: + path = get_cudalib(lib) + print('Finding {} from {}'.format(lib, _get_source_variable(lib))) + print('\tLocated at', path) + + try: + print('\tTrying to open library', end='...') + open_cudalib(lib) + print('\tok') + except OSError as e: + print('\tERROR: failed to open %s:\n%s' % (lib, e)) + failed = True + + # Check for cudadevrt (the only static library) + lib = 'cudadevrt' + path = get_cudalib(lib, static=True) + print('Finding {} from {}'.format(lib, _get_source_variable(lib, + static=True))) + print('\tLocated at', path) + + try: + print('\tChecking library', end='...') + check_static_lib(path) + print('\tok') + except FileNotFoundError as e: + print('\tERROR: failed to find %s:\n%s' % (lib, e)) + failed = True + + # Check for libdevice + where = _get_source_variable('libdevice') + print(f'Finding libdevice from {where}') + path = get_libdevice() + print('\tLocated at', path) + + try: + print('\tChecking library', end='...') + check_static_lib(path) + print('\tok') + except FileNotFoundError as e: + print('\tERROR: failed to find %s:\n%s' % (lib, e)) + failed = True + + return not failed diff --git a/lib/python3.10/site-packages/numba/cuda/cudadrv/ndarray.py b/lib/python3.10/site-packages/numba/cuda/cudadrv/ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..bca40dfd977dc3c657835d93fd45142d16fe46f7 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadrv/ndarray.py @@ -0,0 +1,20 @@ +from numba.cuda.cudadrv import devices, driver +from numba.core.registry import cpu_target + + +def _calc_array_sizeof(ndim): + """ + Use the ABI size in the CPU target + """ + ctx = cpu_target.target_context + return ctx.calc_array_sizeof(ndim) + + +def ndarray_device_allocate_data(ary): + """ + Allocate gpu data buffer + """ + datasize = driver.host_memory_size(ary) + # allocate + gpu_data = devices.get_context().memalloc(datasize) + return gpu_data diff --git a/lib/python3.10/site-packages/numba/cuda/cudadrv/nvvm.py b/lib/python3.10/site-packages/numba/cuda/cudadrv/nvvm.py new file mode 100644 index 0000000000000000000000000000000000000000..1da13a325cae089f7bb60aaadffd0d48217e17fe --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadrv/nvvm.py @@ -0,0 +1,707 @@ +""" +This is a direct translation of nvvm.h +""" +import logging +import re +import sys +import warnings +from ctypes import (c_void_p, c_int, POINTER, c_char_p, c_size_t, byref, + c_char) + +import threading + +from llvmlite import ir + +from .error import NvvmError, NvvmSupportError, NvvmWarning +from .libs import get_libdevice, open_libdevice, open_cudalib +from numba.core import cgutils, config + + +logger = logging.getLogger(__name__) + +ADDRSPACE_GENERIC = 0 +ADDRSPACE_GLOBAL = 1 +ADDRSPACE_SHARED = 3 +ADDRSPACE_CONSTANT = 4 +ADDRSPACE_LOCAL = 5 + +# Opaque handle for compilation unit +nvvm_program = c_void_p + +# Result code +nvvm_result = c_int + +RESULT_CODE_NAMES = ''' +NVVM_SUCCESS +NVVM_ERROR_OUT_OF_MEMORY +NVVM_ERROR_PROGRAM_CREATION_FAILURE +NVVM_ERROR_IR_VERSION_MISMATCH +NVVM_ERROR_INVALID_INPUT +NVVM_ERROR_INVALID_PROGRAM +NVVM_ERROR_INVALID_IR +NVVM_ERROR_INVALID_OPTION +NVVM_ERROR_NO_MODULE_IN_PROGRAM +NVVM_ERROR_COMPILATION +'''.split() + +for i, k in enumerate(RESULT_CODE_NAMES): + setattr(sys.modules[__name__], k, i) + +# Data layouts. NVVM IR 1.8 (CUDA 11.6) introduced 128-bit integer support. + +_datalayout_original = ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-' + 'i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-' + 'v64:64:64-v128:128:128-n16:32:64') +_datalayout_i128 = ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-' + 'i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-' + 'v64:64:64-v128:128:128-n16:32:64') + + +def is_available(): + """ + Return if libNVVM is available + """ + try: + NVVM() + except NvvmSupportError: + return False + else: + return True + + +_nvvm_lock = threading.Lock() + + +class NVVM(object): + '''Process-wide singleton. + ''' + _PROTOTYPES = { + + # nvvmResult nvvmVersion(int *major, int *minor) + 'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)), + + # nvvmResult nvvmCreateProgram(nvvmProgram *cu) + 'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)), + + # nvvmResult nvvmDestroyProgram(nvvmProgram *cu) + 'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)), + + # nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer, + # size_t size, const char *name) + 'nvvmAddModuleToProgram': ( + nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p), + + # nvvmResult nvvmLazyAddModuleToProgram(nvvmProgram cu, + # const char* buffer, + # size_t size, + # const char *name) + 'nvvmLazyAddModuleToProgram': ( + nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p), + + # nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions, + # const char **options) + 'nvvmCompileProgram': ( + nvvm_result, nvvm_program, c_int, POINTER(c_char_p)), + + # nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu, + # size_t *bufferSizeRet) + 'nvvmGetCompiledResultSize': ( + nvvm_result, nvvm_program, POINTER(c_size_t)), + + # nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer) + 'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p), + + # nvvmResult nvvmGetProgramLogSize(nvvmProgram cu, + # size_t *bufferSizeRet) + 'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)), + + # nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer) + 'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p), + + # nvvmResult nvvmIRVersion (int* majorIR, int* minorIR, int* majorDbg, + # int* minorDbg ) + 'nvvmIRVersion': (nvvm_result, POINTER(c_int), POINTER(c_int), + POINTER(c_int), POINTER(c_int)), + # nvvmResult nvvmVerifyProgram (nvvmProgram prog, int numOptions, + # const char** options) + 'nvvmVerifyProgram': (nvvm_result, nvvm_program, c_int, + POINTER(c_char_p)) + } + + # Singleton reference + __INSTANCE = None + + def __new__(cls): + with _nvvm_lock: + if cls.__INSTANCE is None: + cls.__INSTANCE = inst = object.__new__(cls) + try: + inst.driver = open_cudalib('nvvm') + except OSError as e: + cls.__INSTANCE = None + errmsg = ("libNVVM cannot be found. Do `conda install " + "cudatoolkit`:\n%s") + raise NvvmSupportError(errmsg % e) + + # Find & populate functions + for name, proto in inst._PROTOTYPES.items(): + func = getattr(inst.driver, name) + func.restype = proto[0] + func.argtypes = proto[1:] + setattr(inst, name, func) + + return cls.__INSTANCE + + def __init__(self): + ir_versions = self.get_ir_version() + self._majorIR = ir_versions[0] + self._minorIR = ir_versions[1] + self._majorDbg = ir_versions[2] + self._minorDbg = ir_versions[3] + self._supported_ccs = get_supported_ccs() + + @property + def data_layout(self): + if (self._majorIR, self._minorIR) < (1, 8): + return _datalayout_original + else: + return _datalayout_i128 + + @property + def supported_ccs(self): + return self._supported_ccs + + def get_version(self): + major = c_int() + minor = c_int() + err = self.nvvmVersion(byref(major), byref(minor)) + self.check_error(err, 'Failed to get version.') + return major.value, minor.value + + def get_ir_version(self): + majorIR = c_int() + minorIR = c_int() + majorDbg = c_int() + minorDbg = c_int() + err = self.nvvmIRVersion(byref(majorIR), byref(minorIR), + byref(majorDbg), byref(minorDbg)) + self.check_error(err, 'Failed to get IR version.') + return majorIR.value, minorIR.value, majorDbg.value, minorDbg.value + + def check_error(self, error, msg, exit=False): + if error: + exc = NvvmError(msg, RESULT_CODE_NAMES[error]) + if exit: + print(exc) + sys.exit(1) + else: + raise exc + + +class CompilationUnit(object): + def __init__(self): + self.driver = NVVM() + self._handle = nvvm_program() + err = self.driver.nvvmCreateProgram(byref(self._handle)) + self.driver.check_error(err, 'Failed to create CU') + + def __del__(self): + driver = NVVM() + err = driver.nvvmDestroyProgram(byref(self._handle)) + driver.check_error(err, 'Failed to destroy CU', exit=True) + + def add_module(self, buffer): + """ + Add a module level NVVM IR to a compilation unit. + - The buffer should contain an NVVM module IR either in the bitcode + representation (LLVM3.0) or in the text representation. + """ + err = self.driver.nvvmAddModuleToProgram(self._handle, buffer, + len(buffer), None) + self.driver.check_error(err, 'Failed to add module') + + def lazy_add_module(self, buffer): + """ + Lazily add an NVVM IR module to a compilation unit. + The buffer should contain NVVM module IR either in the bitcode + representation or in the text representation. + """ + err = self.driver.nvvmLazyAddModuleToProgram(self._handle, buffer, + len(buffer), None) + self.driver.check_error(err, 'Failed to add module') + + def compile(self, **options): + """Perform Compilation. + + Compilation options are accepted as keyword arguments, with the + following considerations: + + - Underscores (`_`) in option names are converted to dashes (`-`), to + match NVVM's option name format. + - Options that take a value will be emitted in the form + "-=". + - Booleans passed as option values will be converted to integers. + - Options which take no value (such as `-gen-lto`) should have a value + of `None` passed in and will be emitted in the form "-". + + For documentation on NVVM compilation options, see the CUDA Toolkit + Documentation: + + https://docs.nvidia.com/cuda/libnvvm-api/index.html#_CPPv418nvvmCompileProgram11nvvmProgramiPPKc + """ + + def stringify_option(k, v): + k = k.replace('_', '-') + + if v is None: + return f'-{k}' + + if isinstance(v, bool): + v = int(v) + + return f'-{k}={v}' + + options = [stringify_option(k, v) for k, v in options.items()] + + c_opts = (c_char_p * len(options))(*[c_char_p(x.encode('utf8')) + for x in options]) + # verify + err = self.driver.nvvmVerifyProgram(self._handle, len(options), c_opts) + self._try_error(err, 'Failed to verify\n') + + # compile + err = self.driver.nvvmCompileProgram(self._handle, len(options), c_opts) + self._try_error(err, 'Failed to compile\n') + + # get result + reslen = c_size_t() + err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen)) + + self._try_error(err, 'Failed to get size of compiled result.') + + output_buffer = (c_char * reslen.value)() + err = self.driver.nvvmGetCompiledResult(self._handle, output_buffer) + self._try_error(err, 'Failed to get compiled result.') + + # get log + self.log = self.get_log() + if self.log: + warnings.warn(self.log, category=NvvmWarning) + + return output_buffer[:] + + def _try_error(self, err, msg): + self.driver.check_error(err, "%s\n%s" % (msg, self.get_log())) + + def get_log(self): + reslen = c_size_t() + err = self.driver.nvvmGetProgramLogSize(self._handle, byref(reslen)) + self.driver.check_error(err, 'Failed to get compilation log size.') + + if reslen.value > 1: + logbuf = (c_char * reslen.value)() + err = self.driver.nvvmGetProgramLog(self._handle, logbuf) + self.driver.check_error(err, 'Failed to get compilation log.') + + return logbuf.value.decode('utf8') # populate log attribute + + return '' + + +COMPUTE_CAPABILITIES = ( + (3, 5), (3, 7), + (5, 0), (5, 2), (5, 3), + (6, 0), (6, 1), (6, 2), + (7, 0), (7, 2), (7, 5), + (8, 0), (8, 6), (8, 7), (8, 9), + (9, 0) +) + +# Maps CTK version -> (min supported cc, max supported cc) inclusive +CTK_SUPPORTED = { + (11, 2): ((3, 5), (8, 6)), + (11, 3): ((3, 5), (8, 6)), + (11, 4): ((3, 5), (8, 7)), + (11, 5): ((3, 5), (8, 7)), + (11, 6): ((3, 5), (8, 7)), + (11, 7): ((3, 5), (8, 7)), + (11, 8): ((3, 5), (9, 0)), + (12, 0): ((5, 0), (9, 0)), + (12, 1): ((5, 0), (9, 0)), + (12, 2): ((5, 0), (9, 0)), + (12, 3): ((5, 0), (9, 0)), + (12, 4): ((5, 0), (9, 0)), +} + + +def ccs_supported_by_ctk(ctk_version): + try: + # For supported versions, we look up the range of supported CCs + min_cc, max_cc = CTK_SUPPORTED[ctk_version] + return tuple([cc for cc in COMPUTE_CAPABILITIES + if min_cc <= cc <= max_cc]) + except KeyError: + # For unsupported CUDA toolkit versions, all we can do is assume all + # non-deprecated versions we are aware of are supported. + return tuple([cc for cc in COMPUTE_CAPABILITIES + if cc >= config.CUDA_DEFAULT_PTX_CC]) + + +def get_supported_ccs(): + try: + from numba.cuda.cudadrv.runtime import runtime + cudart_version = runtime.get_version() + except: # noqa: E722 + # We can't support anything if there's an error getting the runtime + # version (e.g. if it's not present or there's another issue) + _supported_cc = () + return _supported_cc + + # Ensure the minimum CTK version requirement is met + min_cudart = min(CTK_SUPPORTED) + if cudart_version < min_cudart: + _supported_cc = () + ctk_ver = f"{cudart_version[0]}.{cudart_version[1]}" + unsupported_ver = (f"CUDA Toolkit {ctk_ver} is unsupported by Numba - " + f"{min_cudart[0]}.{min_cudart[1]} is the minimum " + "required version.") + warnings.warn(unsupported_ver) + return _supported_cc + + _supported_cc = ccs_supported_by_ctk(cudart_version) + return _supported_cc + + +def find_closest_arch(mycc): + """ + Given a compute capability, return the closest compute capability supported + by the CUDA toolkit. + + :param mycc: Compute capability as a tuple ``(MAJOR, MINOR)`` + :return: Closest supported CC as a tuple ``(MAJOR, MINOR)`` + """ + supported_ccs = NVVM().supported_ccs + + if not supported_ccs: + msg = "No supported GPU compute capabilities found. " \ + "Please check your cudatoolkit version matches your CUDA version." + raise NvvmSupportError(msg) + + for i, cc in enumerate(supported_ccs): + if cc == mycc: + # Matches + return cc + elif cc > mycc: + # Exceeded + if i == 0: + # CC lower than supported + msg = "GPU compute capability %d.%d is not supported" \ + "(requires >=%d.%d)" % (mycc + cc) + raise NvvmSupportError(msg) + else: + # return the previous CC + return supported_ccs[i - 1] + + # CC higher than supported + return supported_ccs[-1] # Choose the highest + + +def get_arch_option(major, minor): + """Matches with the closest architecture option + """ + if config.FORCE_CUDA_CC: + arch = config.FORCE_CUDA_CC + else: + arch = find_closest_arch((major, minor)) + return 'compute_%d%d' % arch + + +MISSING_LIBDEVICE_FILE_MSG = '''Missing libdevice file. +Please ensure you have a CUDA Toolkit 11.2 or higher. +For CUDA 12, ``cuda-nvcc`` and ``cuda-nvrtc`` are required: + + $ conda install -c conda-forge cuda-nvcc cuda-nvrtc "cuda-version>=12.0" + +For CUDA 11, ``cudatoolkit`` is required: + + $ conda install -c conda-forge cudatoolkit "cuda-version>=11.2,<12.0" +''' + + +class LibDevice(object): + _cache_ = None + + def __init__(self): + if self._cache_ is None: + if get_libdevice() is None: + raise RuntimeError(MISSING_LIBDEVICE_FILE_MSG) + self._cache_ = open_libdevice() + + self.bc = self._cache_ + + def get(self): + return self.bc + + +cas_nvvm = """ + %cas_success = cmpxchg volatile {Ti}* %iptr, {Ti} %old, {Ti} %new monotonic monotonic + %cas = extractvalue {{ {Ti}, i1 }} %cas_success, 0 +""" # noqa: E501 + + +# Translation of code from CUDA Programming Guide v6.5, section B.12 +ir_numba_atomic_binary_template = """ +define internal {T} @___numba_atomic_{T}_{FUNC}({T}* %ptr, {T} %val) alwaysinline {{ +entry: + %iptr = bitcast {T}* %ptr to {Ti}* + %old2 = load volatile {Ti}, {Ti}* %iptr + br label %attempt + +attempt: + %old = phi {Ti} [ %old2, %entry ], [ %cas, %attempt ] + %dold = bitcast {Ti} %old to {T} + %dnew = {OP} {T} %dold, %val + %new = bitcast {T} %dnew to {Ti} + {CAS} + %repeat = icmp ne {Ti} %cas, %old + br i1 %repeat, label %attempt, label %done + +done: + %result = bitcast {Ti} %old to {T} + ret {T} %result +}} +""" # noqa: E501 + +ir_numba_atomic_inc_template = """ +define internal {T} @___numba_atomic_{Tu}_inc({T}* %iptr, {T} %val) alwaysinline {{ +entry: + %old2 = load volatile {T}, {T}* %iptr + br label %attempt + +attempt: + %old = phi {T} [ %old2, %entry ], [ %cas, %attempt ] + %bndchk = icmp ult {T} %old, %val + %inc = add {T} %old, 1 + %new = select i1 %bndchk, {T} %inc, {T} 0 + {CAS} + %repeat = icmp ne {T} %cas, %old + br i1 %repeat, label %attempt, label %done + +done: + ret {T} %old +}} +""" # noqa: E501 + +ir_numba_atomic_dec_template = """ +define internal {T} @___numba_atomic_{Tu}_dec({T}* %iptr, {T} %val) alwaysinline {{ +entry: + %old2 = load volatile {T}, {T}* %iptr + br label %attempt + +attempt: + %old = phi {T} [ %old2, %entry ], [ %cas, %attempt ] + %dec = add {T} %old, -1 + %bndchk = icmp ult {T} %dec, %val + %new = select i1 %bndchk, {T} %dec, {T} %val + {CAS} + %repeat = icmp ne {T} %cas, %old + br i1 %repeat, label %attempt, label %done + +done: + ret {T} %old +}} +""" # noqa: E501 + +ir_numba_atomic_minmax_template = """ +define internal {T} @___numba_atomic_{T}_{NAN}{FUNC}({T}* %ptr, {T} %val) alwaysinline {{ +entry: + %ptrval = load volatile {T}, {T}* %ptr + ; Return early when: + ; - For nanmin / nanmax when val is a NaN + ; - For min / max when val or ptr is a NaN + %early_return = fcmp uno {T} %val, %{PTR_OR_VAL}val + br i1 %early_return, label %done, label %lt_check + +lt_check: + %dold = phi {T} [ %ptrval, %entry ], [ %dcas, %attempt ] + ; Continue attempts if dold less or greater than val (depending on whether min or max) + ; or if dold is NaN (for nanmin / nanmax) + %cmp = fcmp {OP} {T} %dold, %val + br i1 %cmp, label %attempt, label %done + +attempt: + ; Attempt to swap in the value + %old = bitcast {T} %dold to {Ti} + %iptr = bitcast {T}* %ptr to {Ti}* + %new = bitcast {T} %val to {Ti} + {CAS} + %dcas = bitcast {Ti} %cas to {T} + br label %lt_check + +done: + ret {T} %ptrval +}} +""" # noqa: E501 + + +def ir_cas(Ti): + return cas_nvvm.format(Ti=Ti) + + +def ir_numba_atomic_binary(T, Ti, OP, FUNC): + params = dict(T=T, Ti=Ti, OP=OP, FUNC=FUNC, CAS=ir_cas(Ti)) + return ir_numba_atomic_binary_template.format(**params) + + +def ir_numba_atomic_minmax(T, Ti, NAN, OP, PTR_OR_VAL, FUNC): + params = dict(T=T, Ti=Ti, NAN=NAN, OP=OP, PTR_OR_VAL=PTR_OR_VAL, + FUNC=FUNC, CAS=ir_cas(Ti)) + + return ir_numba_atomic_minmax_template.format(**params) + + +def ir_numba_atomic_inc(T, Tu): + return ir_numba_atomic_inc_template.format(T=T, Tu=Tu, CAS=ir_cas(T)) + + +def ir_numba_atomic_dec(T, Tu): + return ir_numba_atomic_dec_template.format(T=T, Tu=Tu, CAS=ir_cas(T)) + + +def llvm_replace(llvmir): + replacements = [ + ('declare double @"___numba_atomic_double_add"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_binary(T='double', Ti='i64', OP='fadd', FUNC='add')), + ('declare float @"___numba_atomic_float_sub"(float* %".1", float %".2")', # noqa: E501 + ir_numba_atomic_binary(T='float', Ti='i32', OP='fsub', FUNC='sub')), + ('declare double @"___numba_atomic_double_sub"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_binary(T='double', Ti='i64', OP='fsub', FUNC='sub')), + ('declare i64 @"___numba_atomic_u64_inc"(i64* %".1", i64 %".2")', + ir_numba_atomic_inc(T='i64', Tu='u64')), + ('declare i64 @"___numba_atomic_u64_dec"(i64* %".1", i64 %".2")', + ir_numba_atomic_dec(T='i64', Tu='u64')), + ('declare float @"___numba_atomic_float_max"(float* %".1", float %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='float', Ti='i32', NAN='', OP='nnan olt', + PTR_OR_VAL='ptr', FUNC='max')), + ('declare double @"___numba_atomic_double_max"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='double', Ti='i64', NAN='', OP='nnan olt', + PTR_OR_VAL='ptr', FUNC='max')), + ('declare float @"___numba_atomic_float_min"(float* %".1", float %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='float', Ti='i32', NAN='', OP='nnan ogt', + PTR_OR_VAL='ptr', FUNC='min')), + ('declare double @"___numba_atomic_double_min"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='double', Ti='i64', NAN='', OP='nnan ogt', + PTR_OR_VAL='ptr', FUNC='min')), + ('declare float @"___numba_atomic_float_nanmax"(float* %".1", float %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='float', Ti='i32', NAN='nan', OP='ult', + PTR_OR_VAL='', FUNC='max')), + ('declare double @"___numba_atomic_double_nanmax"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='double', Ti='i64', NAN='nan', OP='ult', + PTR_OR_VAL='', FUNC='max')), + ('declare float @"___numba_atomic_float_nanmin"(float* %".1", float %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='float', Ti='i32', NAN='nan', OP='ugt', + PTR_OR_VAL='', FUNC='min')), + ('declare double @"___numba_atomic_double_nanmin"(double* %".1", double %".2")', # noqa: E501 + ir_numba_atomic_minmax(T='double', Ti='i64', NAN='nan', OP='ugt', + PTR_OR_VAL='', FUNC='min')), + ('immarg', '') + ] + + for decl, fn in replacements: + llvmir = llvmir.replace(decl, fn) + + llvmir = llvm140_to_70_ir(llvmir) + + return llvmir + + +def compile_ir(llvmir, **opts): + if isinstance(llvmir, str): + llvmir = [llvmir] + + if opts.pop('fastmath', False): + opts.update({ + 'ftz': True, + 'fma': True, + 'prec_div': False, + 'prec_sqrt': False, + }) + + cu = CompilationUnit() + libdevice = LibDevice() + + for mod in llvmir: + mod = llvm_replace(mod) + cu.add_module(mod.encode('utf8')) + cu.lazy_add_module(libdevice.get()) + + return cu.compile(**opts) + + +re_attributes_def = re.compile(r"^attributes #\d+ = \{ ([\w\s]+)\ }") + + +def llvm140_to_70_ir(ir): + """ + Convert LLVM 14.0 IR for LLVM 7.0. + """ + buf = [] + for line in ir.splitlines(): + if line.startswith('attributes #'): + # Remove function attributes unsupported by LLVM 7.0 + m = re_attributes_def.match(line) + attrs = m.group(1).split() + attrs = ' '.join(a for a in attrs if a != 'willreturn') + line = line.replace(m.group(1), attrs) + + buf.append(line) + + return '\n'.join(buf) + + +def set_cuda_kernel(function): + """ + Mark a function as a CUDA kernel. Kernels have the following requirements: + + - Metadata that marks them as a kernel. + - Addition to the @llvm.used list, so that they will not be discarded. + - The noinline attribute is not permitted, because this causes NVVM to emit + a warning, which counts as failing IR verification. + + Presently it is assumed that there is one kernel per module, which holds + for Numba-jitted functions. If this changes in future or this function is + to be used externally, this function may need modification to add to the + @llvm.used list rather than creating it. + """ + module = function.module + + # Add kernel metadata + mdstr = ir.MetaDataString(module, "kernel") + mdvalue = ir.Constant(ir.IntType(32), 1) + md = module.add_metadata((function, mdstr, mdvalue)) + + nmd = cgutils.get_or_insert_named_metadata(module, 'nvvm.annotations') + nmd.add(md) + + # Create the used list + ptrty = ir.IntType(8).as_pointer() + usedty = ir.ArrayType(ptrty, 1) + + fnptr = function.bitcast(ptrty) + + llvm_used = ir.GlobalVariable(module, usedty, 'llvm.used') + llvm_used.linkage = 'appending' + llvm_used.section = 'llvm.metadata' + llvm_used.initializer = ir.Constant(usedty, [fnptr]) + + # Remove 'noinline' if it is present. + function.attributes.discard('noinline') + + +def add_ir_version(mod): + """Add NVVM IR version to module""" + # We specify the IR version to match the current NVVM's IR version + i32 = ir.IntType(32) + ir_versions = [i32(v) for v in NVVM().get_ir_version()] + md_ver = mod.add_metadata(ir_versions) + mod.add_named_metadata('nvvmir.version', md_ver) diff --git a/lib/python3.10/site-packages/numba/cuda/cudadrv/rtapi.py b/lib/python3.10/site-packages/numba/cuda/cudadrv/rtapi.py new file mode 100644 index 0000000000000000000000000000000000000000..4a88457f9cb5a1e0cb134eb4dcb59267d1cf3f54 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/cudadrv/rtapi.py @@ -0,0 +1,10 @@ +""" +Declarations of the Runtime API functions. +""" + +from ctypes import c_int, POINTER + +API_PROTOTYPES = { + # cudaError_t cudaRuntimeGetVersion ( int* runtimeVersion ) + 'cudaRuntimeGetVersion': (c_int, POINTER(c_int)), +} diff --git a/lib/python3.10/site-packages/numba/cuda/kernels/__init__.py b/lib/python3.10/site-packages/numba/cuda/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/numba/cuda/kernels/reduction.py b/lib/python3.10/site-packages/numba/cuda/kernels/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..f733935b6223c301bbf13251c4a9f50ffb38b622 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/kernels/reduction.py @@ -0,0 +1,262 @@ +""" +A library written in CUDA Python for generating reduction kernels +""" + +from numba.np.numpy_support import from_dtype + + +_WARPSIZE = 32 +_NUMWARPS = 4 + + +def _gpu_reduce_factory(fn, nbtype): + from numba import cuda + + reduce_op = cuda.jit(device=True)(fn) + inner_sm_size = _WARPSIZE + 1 # plus one to avoid SM collision + max_blocksize = _NUMWARPS * _WARPSIZE + + @cuda.jit(device=True) + def inner_warp_reduction(sm_partials, init): + """ + Compute reduction within a single warp + """ + tid = cuda.threadIdx.x + warpid = tid // _WARPSIZE + laneid = tid % _WARPSIZE + + sm_this = sm_partials[warpid, :] + sm_this[laneid] = init + cuda.syncwarp() + + width = _WARPSIZE // 2 + while width: + if laneid < width: + old = sm_this[laneid] + sm_this[laneid] = reduce_op(old, sm_this[laneid + width]) + cuda.syncwarp() + width //= 2 + + @cuda.jit(device=True) + def device_reduce_full_block(arr, partials, sm_partials): + """ + Partially reduce `arr` into `partials` using `sm_partials` as working + space. The algorithm goes like: + + array chunks of 128: | 0 | 128 | 256 | 384 | 512 | + block-0: | x | | | x | | + block-1: | | x | | | x | + block-2: | | | x | | | + + The array is divided into chunks of 128 (size of a threadblock). + The threadblocks consumes the chunks in roundrobin scheduling. + First, a threadblock loads a chunk into temp memory. Then, all + subsequent chunks are combined into the temp memory. + + Once all chunks are processed. Inner-block reduction is performed + on the temp memory. So that, there will just be one scalar result + per block. The result from each block is stored to `partials` at + the dedicated slot. + """ + tid = cuda.threadIdx.x + blkid = cuda.blockIdx.x + blksz = cuda.blockDim.x + gridsz = cuda.gridDim.x + + # block strided loop to compute the reduction + start = tid + blksz * blkid + stop = arr.size + step = blksz * gridsz + + # load first value + tmp = arr[start] + # loop over all values in block-stride + for i in range(start + step, stop, step): + tmp = reduce_op(tmp, arr[i]) + + cuda.syncthreads() + # inner-warp reduction + inner_warp_reduction(sm_partials, tmp) + + cuda.syncthreads() + # at this point, only the first slot for each warp in tsm_partials + # is valid. + + # finish up block reduction + # warning: this is assuming 4 warps. + # assert numwarps == 4 + if tid < 2: + sm_partials[tid, 0] = reduce_op(sm_partials[tid, 0], + sm_partials[tid + 2, 0]) + cuda.syncwarp() + if tid == 0: + partials[blkid] = reduce_op(sm_partials[0, 0], sm_partials[1, 0]) + + @cuda.jit(device=True) + def device_reduce_partial_block(arr, partials, sm_partials): + """ + This computes reduction on `arr`. + This device function must be used by 1 threadblock only. + The blocksize must match `arr.size` and must not be greater than 128. + """ + tid = cuda.threadIdx.x + blkid = cuda.blockIdx.x + blksz = cuda.blockDim.x + warpid = tid // _WARPSIZE + laneid = tid % _WARPSIZE + + size = arr.size + # load first value + tid = cuda.threadIdx.x + value = arr[tid] + sm_partials[warpid, laneid] = value + + cuda.syncthreads() + + if (warpid + 1) * _WARPSIZE < size: + # fully populated warps + inner_warp_reduction(sm_partials, value) + else: + # partially populated warps + # NOTE: this uses a very inefficient sequential algorithm + if laneid == 0: + sm_this = sm_partials[warpid, :] + base = warpid * _WARPSIZE + for i in range(1, size - base): + sm_this[0] = reduce_op(sm_this[0], sm_this[i]) + + cuda.syncthreads() + # finish up + if tid == 0: + num_active_warps = (blksz + _WARPSIZE - 1) // _WARPSIZE + + result = sm_partials[0, 0] + for i in range(1, num_active_warps): + result = reduce_op(result, sm_partials[i, 0]) + + partials[blkid] = result + + def gpu_reduce_block_strided(arr, partials, init, use_init): + """ + Perform reductions on *arr* and writing out partial reduction result + into *partials*. The length of *partials* is determined by the + number of threadblocks. The initial value is set with *init*. + + Launch config: + + Blocksize must be multiple of warpsize and it is limited to 4 warps. + """ + tid = cuda.threadIdx.x + + sm_partials = cuda.shared.array((_NUMWARPS, inner_sm_size), + dtype=nbtype) + if cuda.blockDim.x == max_blocksize: + device_reduce_full_block(arr, partials, sm_partials) + else: + device_reduce_partial_block(arr, partials, sm_partials) + # deal with the initializer + if use_init and tid == 0 and cuda.blockIdx.x == 0: + partials[0] = reduce_op(partials[0], init) + + return cuda.jit(gpu_reduce_block_strided) + + +class Reduce(object): + """Create a reduction object that reduces values using a given binary + function. The binary function is compiled once and cached inside this + object. Keeping this object alive will prevent re-compilation. + """ + + _cache = {} + + def __init__(self, functor): + """ + :param functor: A function implementing a binary operation for + reduction. It will be compiled as a CUDA device + function using ``cuda.jit(device=True)``. + """ + self._functor = functor + + def _compile(self, dtype): + key = self._functor, dtype + if key in self._cache: + kernel = self._cache[key] + else: + kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype)) + self._cache[key] = kernel + return kernel + + def __call__(self, arr, size=None, res=None, init=0, stream=0): + """Performs a full reduction. + + :param arr: A host or device array. + :param size: Optional integer specifying the number of elements in + ``arr`` to reduce. If this parameter is not specified, the + entire array is reduced. + :param res: Optional device array into which to write the reduction + result to. The result is written into the first element of + this array. If this parameter is specified, then no + communication of the reduction output takes place from the + device to the host. + :param init: Optional initial value for the reduction, the type of which + must match ``arr.dtype``. + :param stream: Optional CUDA stream in which to perform the reduction. + If no stream is specified, the default stream of 0 is + used. + :return: If ``res`` is specified, ``None`` is returned. Otherwise, the + result of the reduction is returned. + """ + from numba import cuda + + # ensure 1d array + if arr.ndim != 1: + raise TypeError("only support 1D array") + + # adjust array size + if size is not None: + arr = arr[:size] + + init = arr.dtype.type(init) # ensure the right type + + # return `init` if `arr` is empty + if arr.size < 1: + return init + + kernel = self._compile(arr.dtype) + + # Perform the reduction on the GPU + blocksize = _NUMWARPS * _WARPSIZE + size_full = (arr.size // blocksize) * blocksize + size_partial = arr.size - size_full + full_blockct = min(size_full // blocksize, _WARPSIZE * 2) + + # allocate size of partials array + partials_size = full_blockct + if size_partial: + partials_size += 1 + partials = cuda.device_array(shape=partials_size, dtype=arr.dtype) + + if size_full: + # kernel for the fully populated threadblocks + kernel[full_blockct, blocksize, stream](arr[:size_full], + partials[:full_blockct], + init, + True) + + if size_partial: + # kernel for partially populated threadblocks + kernel[1, size_partial, stream](arr[size_full:], + partials[full_blockct:], + init, + not full_blockct) + + if partials.size > 1: + # finish up + kernel[1, partials_size, stream](partials, partials, init, False) + + # handle return value + if res is not None: + res[:1].copy_to_device(partials[:1], stream=stream) + return + else: + return partials[0] diff --git a/lib/python3.10/site-packages/numba/cuda/kernels/transpose.py b/lib/python3.10/site-packages/numba/cuda/kernels/transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..b1df36e048891119b08fa67b452df637c85db9df --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/kernels/transpose.py @@ -0,0 +1,65 @@ +from numba import cuda +from numba.cuda.cudadrv.driver import driver +import math +from numba.np import numpy_support as nps + + +def transpose(a, b=None): + """Compute the transpose of 'a' and store it into 'b', if given, + and return it. If 'b' is not given, allocate a new array + and return that. + + This implements the algorithm documented in + http://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/ + + :param a: an `np.ndarray` or a `DeviceNDArrayBase` subclass. If already on + the device its stream will be used to perform the transpose (and to copy + `b` to the device if necessary). + """ + + # prefer `a`'s stream if + stream = getattr(a, 'stream', 0) + + if not b: + cols, rows = a.shape + strides = a.dtype.itemsize * cols, a.dtype.itemsize + b = cuda.cudadrv.devicearray.DeviceNDArray( + (rows, cols), + strides, + dtype=a.dtype, + stream=stream) + + dt = nps.from_dtype(a.dtype) + + tpb = driver.get_device().MAX_THREADS_PER_BLOCK + # we need to factor available threads into x and y axis + tile_width = int(math.pow(2, math.log(tpb, 2) / 2)) + tile_height = int(tpb / tile_width) + + tile_shape = (tile_height, tile_width + 1) + + @cuda.jit + def kernel(input, output): + + tile = cuda.shared.array(shape=tile_shape, dtype=dt) + + tx = cuda.threadIdx.x + ty = cuda.threadIdx.y + bx = cuda.blockIdx.x * cuda.blockDim.x + by = cuda.blockIdx.y * cuda.blockDim.y + x = by + tx + y = bx + ty + + if by + ty < input.shape[0] and bx + tx < input.shape[1]: + tile[ty, tx] = input[by + ty, bx + tx] + cuda.syncthreads() + if y < output.shape[0] and x < output.shape[1]: + output[y, x] = tile[tx, ty] + + # one block per tile, plus one for remainders + blocks = int(b.shape[0] / tile_height + 1), int(b.shape[1] / tile_width + 1) + # one thread per tile element + threads = tile_height, tile_width + kernel[blocks, threads, stream](a, b) + + return b diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/__init__.py b/lib/python3.10/site-packages/numba/cuda/simulator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d24aa6e7df0f941e2bf781c681122530ceb93e68 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/__init__.py @@ -0,0 +1,38 @@ +import sys + +from .api import * +from .vector_types import vector_types +from .reduction import Reduce +from .cudadrv.devicearray import (device_array, device_array_like, pinned, + pinned_array, pinned_array_like, + mapped_array, to_device, auto_device) +from .cudadrv import devicearray +from .cudadrv.devices import require_context, gpus +from .cudadrv.devices import get_context as current_context +from .cudadrv.runtime import runtime +from numba.core import config +reduce = Reduce + +# Register simulated vector types as module level variables +for name, svty in vector_types.items(): + setattr(sys.modules[__name__], name, svty) + for alias in svty.aliases: + setattr(sys.modules[__name__], alias, svty) +del vector_types, name, svty, alias + +# Ensure that any user code attempting to import cudadrv etc. gets the +# simulator's version and not the real version if the simulator is enabled. +if config.ENABLE_CUDASIM: + import sys + from numba.cuda.simulator import cudadrv + sys.modules['numba.cuda.cudadrv'] = cudadrv + sys.modules['numba.cuda.cudadrv.devicearray'] = cudadrv.devicearray + sys.modules['numba.cuda.cudadrv.devices'] = cudadrv.devices + sys.modules['numba.cuda.cudadrv.driver'] = cudadrv.driver + sys.modules['numba.cuda.cudadrv.runtime'] = cudadrv.runtime + sys.modules['numba.cuda.cudadrv.drvapi'] = cudadrv.drvapi + sys.modules['numba.cuda.cudadrv.error'] = cudadrv.error + sys.modules['numba.cuda.cudadrv.nvvm'] = cudadrv.nvvm + + from . import compiler + sys.modules['numba.cuda.compiler'] = compiler diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/api.py b/lib/python3.10/site-packages/numba/cuda/simulator/api.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3c5bfb5331794b1881132b1f47eb7417e6382b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/api.py @@ -0,0 +1,110 @@ +''' +Contains CUDA API functions +''' + +# Imports here bring together parts of the API from other modules, so some of +# them appear unused. +from contextlib import contextmanager + +from .cudadrv.devices import require_context, reset, gpus # noqa: F401 +from .kernel import FakeCUDAKernel +from numba.core.sigutils import is_signature +from warnings import warn +from ..args import In, Out, InOut # noqa: F401 + + +def select_device(dev=0): + assert dev == 0, 'Only a single device supported by the simulator' + + +def is_float16_supported(): + return True + + +class stream(object): + ''' + The stream API is supported in the simulator - however, all execution + occurs synchronously, so synchronization requires no operation. + ''' + @contextmanager + def auto_synchronize(self): + yield + + def synchronize(self): + pass + + +def synchronize(): + pass + + +def close(): + gpus.closed = True + + +def declare_device(*args, **kwargs): + pass + + +def detect(): + print('Found 1 CUDA devices') + print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]')) + print('%40s: 5.0' % 'compute capability') + + +def list_devices(): + return gpus + + +# Events + +class Event(object): + ''' + The simulator supports the event API, but they do not record timing info, + and all simulation is synchronous. Execution time is not recorded. + ''' + def record(self, stream=0): + pass + + def wait(self, stream=0): + pass + + def synchronize(self): + pass + + def elapsed_time(self, event): + warn('Simulator timings are bogus') + return 0.0 + + +event = Event + + +def jit(func_or_sig=None, device=False, debug=False, argtypes=None, + inline=False, restype=None, fastmath=False, link=None, + boundscheck=None, opt=True, cache=None + ): + # Here for API compatibility + if boundscheck: + raise NotImplementedError("bounds checking is not supported for CUDA") + + if link is not None: + raise NotImplementedError('Cannot link PTX in the simulator') + + # Check for first argument specifying types - in that case the + # decorator is not being passed a function + if (func_or_sig is None or is_signature(func_or_sig) + or isinstance(func_or_sig, list)): + def jitwrapper(fn): + return FakeCUDAKernel(fn, + device=device, + fastmath=fastmath, + debug=debug) + return jitwrapper + return FakeCUDAKernel(func_or_sig, device=device, debug=debug) + + +@contextmanager +def defer_cleanup(): + # No effect for simulator + yield diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/compiler.py b/lib/python3.10/site-packages/numba/cuda/simulator/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..7db28d41ac65a8669f9ae2f6ed231304091df940 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/compiler.py @@ -0,0 +1,9 @@ +''' +The compiler is not implemented in the simulator. This module provides a stub +to allow tests to import successfully. +''' + +compile = None +compile_for_current_device = None +compile_ptx = None +compile_ptx_for_current_device = None diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/__init__.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dde9362d44669831843a33ed2d944c3c64ed91fa --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/__init__.py @@ -0,0 +1,2 @@ +from numba.cuda.simulator.cudadrv import (devicearray, devices, driver, drvapi, + error, nvvm) diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devicearray.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devicearray.py new file mode 100644 index 0000000000000000000000000000000000000000..785f7cdc1748e496f894c0ad1f84c1b48abeba90 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devicearray.py @@ -0,0 +1,436 @@ +''' +The Device Array API is not implemented in the simulator. This module provides +stubs to allow tests to import correctly. +''' +from contextlib import contextmanager +from numba.np.numpy_support import numpy_version + +import numpy as np + + +DeviceRecord = None +from_record_like = None + + +errmsg_contiguous_buffer = ("Array contains non-contiguous buffer and cannot " + "be transferred as a single memory region. Please " + "ensure contiguous buffer with numpy " + ".ascontiguousarray()") + + +class FakeShape(tuple): + ''' + The FakeShape class is used to provide a shape which does not allow negative + indexing, similar to the shape in CUDA Python. (Numpy shape arrays allow + negative indexing) + ''' + + def __getitem__(self, k): + if isinstance(k, int) and k < 0: + raise IndexError('tuple index out of range') + return super(FakeShape, self).__getitem__(k) + + +class FakeWithinKernelCUDAArray(object): + ''' + Created to emulate the behavior of arrays within kernels, where either + array.item or array['item'] is valid (that is, give all structured + arrays `numpy.recarray`-like semantics). This behaviour does not follow + the semantics of Python and NumPy with non-jitted code, and will be + deprecated and removed. + ''' + + def __init__(self, item): + assert isinstance(item, FakeCUDAArray) + self.__dict__['_item'] = item + + def __wrap_if_fake(self, item): + if isinstance(item, FakeCUDAArray): + return FakeWithinKernelCUDAArray(item) + else: + return item + + def __getattr__(self, attrname): + try: + if attrname in dir(self._item._ary): # For e.g. array size. + return self.__wrap_if_fake(getattr(self._item._ary, attrname)) + else: + return self.__wrap_if_fake(self._item.__getitem__(attrname)) + except Exception as e: + if not isinstance(e, AttributeError): + raise AttributeError(attrname) from e + + def __setattr__(self, nm, val): + self._item.__setitem__(nm, val) + + def __getitem__(self, idx): + return self.__wrap_if_fake(self._item.__getitem__(idx)) + + def __setitem__(self, idx, val): + self._item.__setitem__(idx, val) + + def __len__(self): + return len(self._item) + + def __array_ufunc__(self, ufunc, method, *args, **kwargs): + # ufuncs can only be called directly on instances of numpy.ndarray (not + # things that implement its interfaces, like the FakeCUDAArray or + # FakeWithinKernelCUDAArray). For other objects, __array_ufunc__ is + # called when they are arguments to ufuncs, to provide an opportunity + # to somehow implement the ufunc. Since the FakeWithinKernelCUDAArray + # is just a thin wrapper over an ndarray, we can implement all ufuncs + # by passing the underlying ndarrays to a call to the intended ufunc. + call = getattr(ufunc, method) + + def convert_fakes(obj): + if isinstance(obj, FakeWithinKernelCUDAArray): + obj = obj._item._ary + + return obj + + out = kwargs.get('out') + if out: + kwargs['out'] = tuple(convert_fakes(o) for o in out) + args = tuple(convert_fakes(a) for a in args) + return call(*args, **kwargs) + + +class FakeCUDAArray(object): + ''' + Implements the interface of a DeviceArray/DeviceRecord, but mostly just + wraps a NumPy array. + ''' + + __cuda_ndarray__ = True # There must be gpu_data attribute + + def __init__(self, ary, stream=0): + self._ary = ary + self.stream = stream + + @property + def alloc_size(self): + return self._ary.nbytes + + @property + def nbytes(self): + # return nbytes -- FakeCUDAArray is a wrapper around NumPy + return self._ary.nbytes + + def __getattr__(self, attrname): + try: + attr = getattr(self._ary, attrname) + return attr + except AttributeError as e: + msg = "Wrapped array has no attribute '%s'" % attrname + raise AttributeError(msg) from e + + def bind(self, stream=0): + return FakeCUDAArray(self._ary, stream) + + @property + def T(self): + return self.transpose() + + def transpose(self, axes=None): + return FakeCUDAArray(np.transpose(self._ary, axes=axes)) + + def __getitem__(self, idx): + ret = self._ary.__getitem__(idx) + if type(ret) not in [np.ndarray, np.void]: + return ret + else: + return FakeCUDAArray(ret, stream=self.stream) + + def __setitem__(self, idx, val): + return self._ary.__setitem__(idx, val) + + def copy_to_host(self, ary=None, stream=0): + if ary is None: + ary = np.empty_like(self._ary) + else: + check_array_compatibility(self, ary) + np.copyto(ary, self._ary) + return ary + + def copy_to_device(self, ary, stream=0): + ''' + Copy from the provided array into this array. + + This may be less forgiving than the CUDA Python implementation, which + will copy data up to the length of the smallest of the two arrays, + whereas this expects the size of the arrays to be equal. + ''' + sentry_contiguous(self) + self_core, ary_core = array_core(self), array_core(ary) + if isinstance(ary, FakeCUDAArray): + sentry_contiguous(ary) + check_array_compatibility(self_core, ary_core) + else: + ary_core = np.array( + ary_core, + order='C' if self_core.flags['C_CONTIGUOUS'] else 'F', + subok=True, + copy=False if numpy_version < (2, 0) else None) + check_array_compatibility(self_core, ary_core) + np.copyto(self_core._ary, ary_core) + + @property + def shape(self): + return FakeShape(self._ary.shape) + + def ravel(self, *args, **kwargs): + return FakeCUDAArray(self._ary.ravel(*args, **kwargs)) + + def reshape(self, *args, **kwargs): + return FakeCUDAArray(self._ary.reshape(*args, **kwargs)) + + def view(self, *args, **kwargs): + return FakeCUDAArray(self._ary.view(*args, **kwargs)) + + def is_c_contiguous(self): + return self._ary.flags.c_contiguous + + def is_f_contiguous(self): + return self._ary.flags.f_contiguous + + def __str__(self): + return str(self._ary) + + def __repr__(self): + return repr(self._ary) + + def __len__(self): + return len(self._ary) + + # TODO: Add inplace, bitwise, unary magic methods + # (or maybe inherit this class from numpy)? + def __eq__(self, other): + return FakeCUDAArray(self._ary == other) + + def __ne__(self, other): + return FakeCUDAArray(self._ary != other) + + def __lt__(self, other): + return FakeCUDAArray(self._ary < other) + + def __le__(self, other): + return FakeCUDAArray(self._ary <= other) + + def __gt__(self, other): + return FakeCUDAArray(self._ary > other) + + def __ge__(self, other): + return FakeCUDAArray(self._ary >= other) + + def __add__(self, other): + return FakeCUDAArray(self._ary + other) + + def __sub__(self, other): + return FakeCUDAArray(self._ary - other) + + def __mul__(self, other): + return FakeCUDAArray(self._ary * other) + + def __floordiv__(self, other): + return FakeCUDAArray(self._ary // other) + + def __truediv__(self, other): + return FakeCUDAArray(self._ary / other) + + def __mod__(self, other): + return FakeCUDAArray(self._ary % other) + + def __pow__(self, other): + return FakeCUDAArray(self._ary ** other) + + def split(self, section, stream=0): + return [ + FakeCUDAArray(a) + for a in np.split(self._ary, range(section, len(self), section)) + ] + + +def array_core(ary): + """ + Extract the repeated core of a broadcast array. + + Broadcast arrays are by definition non-contiguous due to repeated + dimensions, i.e., dimensions with stride 0. In order to ascertain memory + contiguity and copy the underlying data from such arrays, we must create + a view without the repeated dimensions. + + """ + if not ary.strides or not ary.size: + return ary + core_index = [] + for stride in ary.strides: + core_index.append(0 if stride == 0 else slice(None)) + return ary[tuple(core_index)] + + +def is_contiguous(ary): + """ + Returns True iff `ary` is C-style contiguous while ignoring + broadcasted and 1-sized dimensions. + As opposed to array_core(), it does not call require_context(), + which can be quite expensive. + """ + size = ary.dtype.itemsize + for shape, stride in zip(reversed(ary.shape), reversed(ary.strides)): + if shape > 1 and stride != 0: + if size != stride: + return False + size *= shape + return True + + +def sentry_contiguous(ary): + core = array_core(ary) + if not core.flags['C_CONTIGUOUS'] and not core.flags['F_CONTIGUOUS']: + raise ValueError(errmsg_contiguous_buffer) + + +def check_array_compatibility(ary1, ary2): + ary1sq, ary2sq = ary1.squeeze(), ary2.squeeze() + if ary1.dtype != ary2.dtype: + raise TypeError('incompatible dtype: %s vs. %s' % + (ary1.dtype, ary2.dtype)) + if ary1sq.shape != ary2sq.shape: + raise ValueError('incompatible shape: %s vs. %s' % + (ary1.shape, ary2.shape)) + if ary1sq.strides != ary2sq.strides: + raise ValueError('incompatible strides: %s vs. %s' % + (ary1.strides, ary2.strides)) + + +def to_device(ary, stream=0, copy=True, to=None): + ary = np.array(ary, + copy=False if numpy_version < (2, 0) else None, + subok=True) + sentry_contiguous(ary) + if to is None: + buffer_dtype = np.int64 if ary.dtype.char in 'Mm' else ary.dtype + return FakeCUDAArray( + np.ndarray( + buffer=np.copy(array_core(ary)).view(buffer_dtype), + dtype=ary.dtype, + shape=ary.shape, + strides=ary.strides, + ).view(type=type(ary)), + ) + else: + to.copy_to_device(ary, stream=stream) + + +@contextmanager +def pinned(arg): + yield + + +def mapped_array(*args, **kwargs): + for unused_arg in ('portable', 'wc'): + if unused_arg in kwargs: + kwargs.pop(unused_arg) + return device_array(*args, **kwargs) + + +def pinned_array(shape, dtype=np.float64, strides=None, order='C'): + return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order) + + +def managed_array(shape, dtype=np.float64, strides=None, order='C'): + return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order) + + +def device_array(*args, **kwargs): + stream = kwargs.pop('stream') if 'stream' in kwargs else 0 + return FakeCUDAArray(np.ndarray(*args, **kwargs), stream=stream) + + +def _contiguous_strides_like_array(ary): + """ + Given an array, compute strides for a new contiguous array of the same + shape. + """ + # Don't recompute strides if the default strides will be sufficient to + # create a contiguous array. + if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1: + return None + + # Otherwise, we need to compute new strides using an algorithm adapted from + # NumPy v1.17.4's PyArray_NewLikeArrayWithShape in + # core/src/multiarray/ctors.c. We permute the strides in ascending order + # then compute the stride for the dimensions with the same permutation. + + # Stride permutation. E.g. a stride array (4, -2, 12) becomes + # [(1, -2), (0, 4), (2, 12)] + strideperm = [ x for x in enumerate(ary.strides) ] + strideperm.sort(key=lambda x: x[1]) + + # Compute new strides using permutation + strides = [0] * len(ary.strides) + stride = ary.dtype.itemsize + for i_perm, _ in strideperm: + strides[i_perm] = stride + stride *= ary.shape[i_perm] + return tuple(strides) + + +def _order_like_array(ary): + if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']: + return 'F' + else: + return 'C' + + +def device_array_like(ary, stream=0): + strides = _contiguous_strides_like_array(ary) + order = _order_like_array(ary) + return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides, + order=order) + + +def pinned_array_like(ary): + strides = _contiguous_strides_like_array(ary) + order = _order_like_array(ary) + return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides, + order=order) + + +def auto_device(ary, stream=0, copy=True): + if isinstance(ary, FakeCUDAArray): + return ary, False + + if not isinstance(ary, np.void): + ary = np.array( + ary, + copy=False if numpy_version < (2, 0) else None, + subok=True) + return to_device(ary, stream, copy), True + + +def is_cuda_ndarray(obj): + "Check if an object is a CUDA ndarray" + return getattr(obj, '__cuda_ndarray__', False) + + +def verify_cuda_ndarray_interface(obj): + "Verify the CUDA ndarray interface for an obj" + require_cuda_ndarray(obj) + + def requires_attr(attr, typ): + if not hasattr(obj, attr): + raise AttributeError(attr) + if not isinstance(getattr(obj, attr), typ): + raise AttributeError('%s must be of type %s' % (attr, typ)) + + requires_attr('shape', tuple) + requires_attr('strides', tuple) + requires_attr('dtype', np.dtype) + requires_attr('size', int) + + +def require_cuda_ndarray(obj): + "Raises ValueError is is_cuda_ndarray(obj) evaluates False" + if not is_cuda_ndarray(obj): + raise ValueError('require an cuda ndarray object') diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devices.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devices.py new file mode 100644 index 0000000000000000000000000000000000000000..3237fb2c6adea223bf079665319d3ef7b3c8489e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/devices.py @@ -0,0 +1,117 @@ +import numpy as np +from collections import namedtuple + +_MemoryInfo = namedtuple("_MemoryInfo", "free,total") + +_SIMULATOR_CC = (5, 2) + + +class FakeCUDADevice: + def __init__(self): + self.uuid = 'GPU-00000000-0000-0000-0000-000000000000' + + @property + def compute_capability(self): + return _SIMULATOR_CC + + +class FakeCUDAContext: + ''' + This stub implements functionality only for simulating a single GPU + at the moment. + ''' + def __init__(self, device_id): + self._device_id = device_id + self._device = FakeCUDADevice() + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def __str__(self): + return "".format(self=self) + + @property + def id(self): + return self._device_id + + @property + def device(self): + return self._device + + @property + def compute_capability(self): + return _SIMULATOR_CC + + def reset(self): + pass + + def get_memory_info(self): + """ + Cross-platform free / total host memory is hard without external + dependencies, e.g. `psutil` - so return infinite memory to maintain API + type compatibility + """ + return _MemoryInfo(float('inf'), float('inf')) + + def memalloc(self, sz): + """ + Allocates memory on the simulated device + At present, there is no division between simulated + host memory and simulated device memory. + """ + return np.ndarray(sz, dtype='u1') + + def memhostalloc(self, sz, mapped=False, portable=False, wc=False): + '''Allocates memory on the host''' + return self.memalloc(sz) + + +class FakeDeviceList: + ''' + This stub implements a device list containing a single GPU. It also + keeps track of the GPU status, i.e. whether the context is closed or not, + which may have been set by the user calling reset() + ''' + def __init__(self): + self.lst = (FakeCUDAContext(0),) + self.closed = False + + def __getitem__(self, devnum): + self.closed = False + return self.lst[devnum] + + def __str__(self): + return ', '.join([str(d) for d in self.lst]) + + def __iter__(self): + return iter(self.lst) + + def __len__(self): + return len(self.lst) + + @property + def current(self): + if self.closed: + return None + return self.lst[0] + + +gpus = FakeDeviceList() + + +def reset(): + gpus[0].closed = True + + +def get_context(devnum=0): + return FakeCUDAContext(devnum) + + +def require_context(func): + ''' + In the simulator, a context is always "available", so this is a no-op. + ''' + return func diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/driver.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/driver.py new file mode 100644 index 0000000000000000000000000000000000000000..09de5b729af1da79db35f9c73bed08dfabffff48 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/driver.py @@ -0,0 +1,62 @@ +''' +Most of the driver API is unsupported in the simulator, but some stubs are +provided to allow tests to import correctly. +''' + + +def device_memset(dst, val, size, stream=0): + dst.view('u1')[:size].fill(bytes([val])[0]) + + +def host_to_device(dst, src, size, stream=0): + dst.view('u1')[:size] = src.view('u1')[:size] + + +def device_to_host(dst, src, size, stream=0): + host_to_device(dst, src, size) + + +def device_memory_size(obj): + return obj.itemsize * obj.size + + +def device_to_device(dst, src, size, stream=0): + host_to_device(dst, src, size) + + +class FakeDriver(object): + def get_device_count(self): + return 1 + + +driver = FakeDriver() + + +class Linker: + @classmethod + def new(cls, max_registers=0, lineinfo=False, cc=None): + return Linker() + + @property + def lto(self): + return False + + +class LinkerError(RuntimeError): + pass + + +class NvrtcError(RuntimeError): + pass + + +class CudaAPIError(RuntimeError): + pass + + +def launch_kernel(*args, **kwargs): + msg = 'Launching kernels directly is not supported in the simulator' + raise RuntimeError(msg) + + +USE_NV_BINDING = False diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/drvapi.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/drvapi.py new file mode 100644 index 0000000000000000000000000000000000000000..44c697f37debb3a6a80d7516063f4524cbc3a152 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/drvapi.py @@ -0,0 +1,4 @@ +''' +drvapi is not implemented in the simulator, but this module exists to allow +tests to import correctly. +''' diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/dummyarray.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/dummyarray.py new file mode 100644 index 0000000000000000000000000000000000000000..adabaa7828c24856a0a52d45c29f27bbdd544831 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/dummyarray.py @@ -0,0 +1,4 @@ +# Dummy arrays are not implemented in the simulator. This file allows the dummy +# array tests to be imported, but they are skipped on the simulator. + +Array = None diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/error.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/error.py new file mode 100644 index 0000000000000000000000000000000000000000..eaaa2884a0d2380015b6a4f11177e2fcaaa7f51d --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/error.py @@ -0,0 +1,6 @@ +class CudaSupportError(RuntimeError): + pass + + +class NvrtcError(Exception): + pass diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/libs.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/libs.py new file mode 100644 index 0000000000000000000000000000000000000000..347b936c5d9ae465b3d8644dc63529d363add4ed --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/libs.py @@ -0,0 +1,2 @@ +def check_static_lib(lib): + raise FileNotFoundError('Linking libraries not supported by cudasim') diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/nvvm.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/nvvm.py new file mode 100644 index 0000000000000000000000000000000000000000..2a011a77a4002e655085d2da67a7f06f4b1f0519 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/nvvm.py @@ -0,0 +1,29 @@ +''' +NVVM is not supported in the simulator, but stubs are provided to allow tests +to import correctly. +''' + + +class NvvmSupportError(ImportError): + pass + + +class NVVM(object): + def __init__(self): + raise NvvmSupportError('NVVM not supported in the simulator') + + +CompilationUnit = None +compile_ir = None +set_cuda_kernel = None +get_arch_option = None +LibDevice = None +NvvmError = None + + +def is_available(): + return False + + +def get_supported_ccs(): + return () diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/runtime.py b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..308d19e7683b27754c68dca334f22805e50821d2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/cudadrv/runtime.py @@ -0,0 +1,19 @@ +''' +The runtime API is unsupported in the simulator, but some stubs are +provided to allow tests to import correctly. +''' + + +class FakeRuntime(object): + def get_version(self): + return (-1, -1) + + def is_supported_version(self): + return True + + @property + def supported_versions(self): + return (-1, -1), + + +runtime = FakeRuntime() diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/kernel.py b/lib/python3.10/site-packages/numba/cuda/simulator/kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ca2259938b99f0beefffe10b199715dd5e6707 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/kernel.py @@ -0,0 +1,308 @@ +from contextlib import contextmanager +import functools +import sys +import threading + +import numpy as np + +from .cudadrv.devicearray import FakeCUDAArray, FakeWithinKernelCUDAArray +from .kernelapi import Dim3, FakeCUDAModule, swapped_cuda_module +from ..errors import normalize_kernel_dimensions +from ..args import wrap_arg, ArgHint + + +""" +Global variable to keep track of the current "kernel context", i.e the +FakeCUDAModule. We only support one kernel launch at a time. +No support for concurrent kernel launch. +""" +_kernel_context = None + + +@contextmanager +def _push_kernel_context(mod): + """ + Push the current kernel context. + """ + global _kernel_context + assert _kernel_context is None, "concurrent simulated kernel not supported" + _kernel_context = mod + try: + yield + finally: + _kernel_context = None + + +def _get_kernel_context(): + """ + Get the current kernel context. This is usually done by a device function. + """ + return _kernel_context + + +class FakeOverload: + ''' + Used only to provide the max_cooperative_grid_blocks method + ''' + def max_cooperative_grid_blocks(self, blockdim): + # We can only run one block in a cooperative grid because we have no + # mechanism for synchronization between different blocks + return 1 + + +class FakeOverloadDict(dict): + def __getitem__(self, key): + # Always return a fake overload for any signature, as we don't keep + # track of overloads in the simulator. + return FakeOverload() + + +class FakeCUDAKernel(object): + ''' + Wraps a @cuda.jit-ed function. + ''' + + def __init__(self, fn, device, fastmath=False, extensions=[], debug=False): + self.fn = fn + self._device = device + self._fastmath = fastmath + self._debug = debug + self.extensions = list(extensions) # defensive copy + # Initial configuration: grid unconfigured, stream 0, no dynamic shared + # memory. + self.grid_dim = None + self.block_dim = None + self.stream = 0 + self.dynshared_size = 0 + functools.update_wrapper(self, fn) + + def __call__(self, *args): + if self._device: + with swapped_cuda_module(self.fn, _get_kernel_context()): + return self.fn(*args) + + # Ensure we've been given a valid grid configuration + grid_dim, block_dim = normalize_kernel_dimensions(self.grid_dim, + self.block_dim) + + fake_cuda_module = FakeCUDAModule(grid_dim, block_dim, + self.dynshared_size) + with _push_kernel_context(fake_cuda_module): + # fake_args substitutes all numpy arrays for FakeCUDAArrays + # because they implement some semantics differently + retr = [] + + def fake_arg(arg): + # map the arguments using any extension you've registered + _, arg = functools.reduce( + lambda ty_val, extension: extension.prepare_args( + *ty_val, + stream=0, + retr=retr), + self.extensions, + (None, arg) + ) + + if isinstance(arg, np.ndarray) and arg.ndim > 0: + ret = wrap_arg(arg).to_device(retr) + elif isinstance(arg, ArgHint): + ret = arg.to_device(retr) + elif isinstance(arg, np.void): + ret = FakeCUDAArray(arg) # In case a np record comes in. + else: + ret = arg + if isinstance(ret, FakeCUDAArray): + return FakeWithinKernelCUDAArray(ret) + return ret + + fake_args = [fake_arg(arg) for arg in args] + with swapped_cuda_module(self.fn, fake_cuda_module): + # Execute one block at a time + for grid_point in np.ndindex(*grid_dim): + bm = BlockManager(self.fn, grid_dim, block_dim, self._debug) + bm.run(grid_point, *fake_args) + + for wb in retr: + wb() + + def __getitem__(self, configuration): + self.grid_dim, self.block_dim = \ + normalize_kernel_dimensions(*configuration[:2]) + + if len(configuration) == 4: + self.dynshared_size = configuration[3] + + return self + + def bind(self): + pass + + def specialize(self, *args): + return self + + def forall(self, ntasks, tpb=0, stream=0, sharedmem=0): + if ntasks < 0: + raise ValueError("Can't create ForAll with negative task count: %s" + % ntasks) + return self[ntasks, 1, stream, sharedmem] + + @property + def overloads(self): + return FakeOverloadDict() + + @property + def py_func(self): + return self.fn + + +# Thread emulation + +class BlockThread(threading.Thread): + ''' + Manages the execution of a function for a single CUDA thread. + ''' + def __init__(self, f, manager, blockIdx, threadIdx, debug): + if debug: + def debug_wrapper(*args, **kwargs): + np.seterr(divide='raise') + f(*args, **kwargs) + target = debug_wrapper + else: + target = f + + super(BlockThread, self).__init__(target=target) + self.syncthreads_event = threading.Event() + self.syncthreads_blocked = False + self._manager = manager + self.blockIdx = Dim3(*blockIdx) + self.threadIdx = Dim3(*threadIdx) + self.exception = None + self.daemon = True + self.abort = False + self.debug = debug + blockDim = Dim3(*self._manager._block_dim) + self.thread_id = self.threadIdx.x + (blockDim.x * (self.threadIdx.y + + blockDim.y * + self.threadIdx.z)) + + def run(self): + try: + super(BlockThread, self).run() + except Exception as e: + tid = 'tid=%s' % list(self.threadIdx) + ctaid = 'ctaid=%s' % list(self.blockIdx) + if str(e) == '': + msg = '%s %s' % (tid, ctaid) + else: + msg = '%s %s: %s' % (tid, ctaid, e) + tb = sys.exc_info()[2] + # Using `with_traceback` here would cause it to be mutated by + # future raise statements, which may or may not matter. + self.exception = (type(e)(msg), tb) + + def syncthreads(self): + + if self.abort: + raise RuntimeError("abort flag set on syncthreads call") + + self.syncthreads_blocked = True + self.syncthreads_event.wait() + self.syncthreads_event.clear() + + if self.abort: + raise RuntimeError("abort flag set on syncthreads clear") + + def syncthreads_count(self, value): + idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z + self._manager.block_state[idx] = value + self.syncthreads() + count = np.count_nonzero(self._manager.block_state) + self.syncthreads() + return count + + def syncthreads_and(self, value): + idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z + self._manager.block_state[idx] = value + self.syncthreads() + test = np.all(self._manager.block_state) + self.syncthreads() + return 1 if test else 0 + + def syncthreads_or(self, value): + idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z + self._manager.block_state[idx] = value + self.syncthreads() + test = np.any(self._manager.block_state) + self.syncthreads() + return 1 if test else 0 + + def __str__(self): + return 'Thread <<<%s, %s>>>' % (self.blockIdx, self.threadIdx) + + +class BlockManager(object): + ''' + Manages the execution of a thread block. + + When run() is called, all threads are started. Each thread executes until it + hits syncthreads(), at which point it sets its own syncthreads_blocked to + True so that the BlockManager knows it is blocked. It then waits on its + syncthreads_event. + + The BlockManager polls threads to determine if they are blocked in + syncthreads(). If it finds a blocked thread, it adds it to the set of + blocked threads. When all threads are blocked, it unblocks all the threads. + The thread are unblocked by setting their syncthreads_blocked back to False + and setting their syncthreads_event. + + The polling continues until no threads are alive, when execution is + complete. + ''' + def __init__(self, f, grid_dim, block_dim, debug): + self._grid_dim = grid_dim + self._block_dim = block_dim + self._f = f + self._debug = debug + self.block_state = np.zeros(block_dim, dtype=np.bool_) + + def run(self, grid_point, *args): + # Create all threads + threads = set() + livethreads = set() + blockedthreads = set() + for block_point in np.ndindex(*self._block_dim): + def target(): + self._f(*args) + t = BlockThread(target, self, grid_point, block_point, self._debug) + t.start() + threads.add(t) + livethreads.add(t) + + # Potential optimisations: + # 1. Continue the while loop immediately after finding a blocked thread + # 2. Don't poll already-blocked threads + while livethreads: + for t in livethreads: + if t.syncthreads_blocked: + blockedthreads.add(t) + elif t.exception: + + # Abort all other simulator threads on exception, + # do *not* join immediately to facilitate debugging. + for t_other in threads: + t_other.abort = True + t_other.syncthreads_blocked = False + t_other.syncthreads_event.set() + + raise t.exception[0].with_traceback(t.exception[1]) + if livethreads == blockedthreads: + for t in blockedthreads: + t.syncthreads_blocked = False + t.syncthreads_event.set() + blockedthreads = set() + livethreads = set([ t for t in livethreads if t.is_alive() ]) + # Final check for exceptions in case any were set prior to thread + # finishing, before we could check it + for t in threads: + if t.exception: + raise t.exception[0].with_traceback(t.exception[1]) diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/kernelapi.py b/lib/python3.10/site-packages/numba/cuda/simulator/kernelapi.py new file mode 100644 index 0000000000000000000000000000000000000000..64793df054cc2e2baeb00d059338fa570e53718a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/kernelapi.py @@ -0,0 +1,495 @@ +''' +Implements the cuda module as called from within an executing kernel +(@cuda.jit-decorated function). +''' + +from contextlib import contextmanager +import sys +import threading +import traceback +from numba.core import types +import numpy as np + +from numba.np import numpy_support + +from .vector_types import vector_types + + +class Dim3(object): + ''' + Used to implement thread/block indices/dimensions + ''' + def __init__(self, x, y, z): + self.x = x + self.y = y + self.z = z + + def __str__(self): + return '(%s, %s, %s)' % (self.x, self.y, self.z) + + def __repr__(self): + return 'Dim3(%s, %s, %s)' % (self.x, self.y, self.z) + + def __iter__(self): + yield self.x + yield self.y + yield self.z + + +class GridGroup: + ''' + Used to implement the grid group. + ''' + + def sync(self): + # Synchronization of the grid group is equivalent to synchronization of + # the thread block, because we only support cooperative grids with one + # block. + threading.current_thread().syncthreads() + + +class FakeCUDACg: + ''' + CUDA Cooperative Groups + ''' + def this_grid(self): + return GridGroup() + + +class FakeCUDALocal(object): + ''' + CUDA Local arrays + ''' + def array(self, shape, dtype): + if isinstance(dtype, types.Type): + dtype = numpy_support.as_dtype(dtype) + return np.empty(shape, dtype) + + +class FakeCUDAConst(object): + ''' + CUDA Const arrays + ''' + def array_like(self, ary): + return ary + + +class FakeCUDAShared(object): + ''' + CUDA Shared arrays. + + Limitations: assumes that only one call to cuda.shared.array is on a line, + and that that line is only executed once per thread. i.e.:: + + a = cuda.shared.array(...); b = cuda.shared.array(...) + + will erroneously alias a and b, and:: + + for i in range(10): + sharedarrs[i] = cuda.shared.array(...) + + will alias all arrays created at that point (though it is not certain that + this would be supported by Numba anyway). + ''' + + def __init__(self, dynshared_size): + self._allocations = {} + self._dynshared_size = dynshared_size + self._dynshared = np.zeros(dynshared_size, dtype=np.byte) + + def array(self, shape, dtype): + if isinstance(dtype, types.Type): + dtype = numpy_support.as_dtype(dtype) + # Dynamic shared memory is requested with size 0 - this all shares the + # same underlying memory + if shape == 0: + # Count must be the maximum number of whole elements that fit in the + # buffer (Numpy complains if the buffer is not a multiple of the + # element size) + count = self._dynshared_size // dtype.itemsize + return np.frombuffer(self._dynshared.data, dtype=dtype, count=count) + + # Otherwise, identify allocations by source file and line number + # We pass the reference frame explicitly to work around + # http://bugs.python.org/issue25108 + stack = traceback.extract_stack(sys._getframe()) + caller = stack[-2][0:2] + res = self._allocations.get(caller) + if res is None: + res = np.empty(shape, dtype) + self._allocations[caller] = res + return res + + +addlock = threading.Lock() +sublock = threading.Lock() +andlock = threading.Lock() +orlock = threading.Lock() +xorlock = threading.Lock() +maxlock = threading.Lock() +minlock = threading.Lock() +compare_and_swaplock = threading.Lock() +caslock = threading.Lock() +inclock = threading.Lock() +declock = threading.Lock() +exchlock = threading.Lock() + + +class FakeCUDAAtomic(object): + def add(self, array, index, val): + with addlock: + old = array[index] + array[index] += val + return old + + def sub(self, array, index, val): + with sublock: + old = array[index] + array[index] -= val + return old + + def and_(self, array, index, val): + with andlock: + old = array[index] + array[index] &= val + return old + + def or_(self, array, index, val): + with orlock: + old = array[index] + array[index] |= val + return old + + def xor(self, array, index, val): + with xorlock: + old = array[index] + array[index] ^= val + return old + + def inc(self, array, index, val): + with inclock: + old = array[index] + if old >= val: + array[index] = 0 + else: + array[index] += 1 + return old + + def dec(self, array, index, val): + with declock: + old = array[index] + if (old == 0) or (old > val): + array[index] = val + else: + array[index] -= 1 + return old + + def exch(self, array, index, val): + with exchlock: + old = array[index] + array[index] = val + return old + + def max(self, array, index, val): + with maxlock: + old = array[index] + array[index] = max(old, val) + return old + + def min(self, array, index, val): + with minlock: + old = array[index] + array[index] = min(old, val) + return old + + def nanmax(self, array, index, val): + with maxlock: + old = array[index] + array[index] = np.nanmax([array[index], val]) + return old + + def nanmin(self, array, index, val): + with minlock: + old = array[index] + array[index] = np.nanmin([array[index], val]) + return old + + def compare_and_swap(self, array, old, val): + with compare_and_swaplock: + index = (0,) * array.ndim + loaded = array[index] + if loaded == old: + array[index] = val + return loaded + + def cas(self, array, index, old, val): + with caslock: + loaded = array[index] + if loaded == old: + array[index] = val + return loaded + + +class FakeCUDAFp16(object): + def hadd(self, a, b): + return a + b + + def hsub(self, a, b): + return a - b + + def hmul(self, a, b): + return a * b + + def hdiv(self, a, b): + return a / b + + def hfma(self, a, b, c): + return a * b + c + + def hneg(self, a): + return -a + + def habs(self, a): + return abs(a) + + def hsin(self, x): + return np.sin(x, dtype=np.float16) + + def hcos(self, x): + return np.cos(x, dtype=np.float16) + + def hlog(self, x): + return np.log(x, dtype=np.float16) + + def hlog2(self, x): + return np.log2(x, dtype=np.float16) + + def hlog10(self, x): + return np.log10(x, dtype=np.float16) + + def hexp(self, x): + return np.exp(x, dtype=np.float16) + + def hexp2(self, x): + return np.exp2(x, dtype=np.float16) + + def hexp10(self, x): + return np.float16(10 ** x) + + def hsqrt(self, x): + return np.sqrt(x, dtype=np.float16) + + def hrsqrt(self, x): + return np.float16(x ** -0.5) + + def hceil(self, x): + return np.ceil(x, dtype=np.float16) + + def hfloor(self, x): + return np.ceil(x, dtype=np.float16) + + def hrcp(self, x): + return np.reciprocal(x, dtype=np.float16) + + def htrunc(self, x): + return np.trunc(x, dtype=np.float16) + + def hrint(self, x): + return np.rint(x, dtype=np.float16) + + def heq(self, a, b): + return a == b + + def hne(self, a, b): + return a != b + + def hge(self, a, b): + return a >= b + + def hgt(self, a, b): + return a > b + + def hle(self, a, b): + return a <= b + + def hlt(self, a, b): + return a < b + + def hmax(self, a, b): + return max(a, b) + + def hmin(self, a, b): + return min(a, b) + + +class FakeCUDAModule(object): + ''' + An instance of this class will be injected into the __globals__ for an + executing function in order to implement calls to cuda.*. This will fail to + work correctly if the user code does:: + + from numba import cuda as something_else + + In other words, the CUDA module must be called cuda. + ''' + + def __init__(self, grid_dim, block_dim, dynshared_size): + self.gridDim = Dim3(*grid_dim) + self.blockDim = Dim3(*block_dim) + self._cg = FakeCUDACg() + self._local = FakeCUDALocal() + self._shared = FakeCUDAShared(dynshared_size) + self._const = FakeCUDAConst() + self._atomic = FakeCUDAAtomic() + self._fp16 = FakeCUDAFp16() + # Insert the vector types into the kernel context + # Note that we need to do this in addition to exposing them as module + # variables in `simulator.__init__.py`, because the test cases need + # to access the actual cuda module as well as the fake cuda module + # for vector types. + for name, svty in vector_types.items(): + setattr(self, name, svty) + for alias in svty.aliases: + setattr(self, alias, svty) + + @property + def cg(self): + return self._cg + + @property + def local(self): + return self._local + + @property + def shared(self): + return self._shared + + @property + def const(self): + return self._const + + @property + def atomic(self): + return self._atomic + + @property + def fp16(self): + return self._fp16 + + @property + def threadIdx(self): + return threading.current_thread().threadIdx + + @property + def blockIdx(self): + return threading.current_thread().blockIdx + + @property + def warpsize(self): + return 32 + + @property + def laneid(self): + return threading.current_thread().thread_id % 32 + + def syncthreads(self): + threading.current_thread().syncthreads() + + def threadfence(self): + # No-op + pass + + def threadfence_block(self): + # No-op + pass + + def threadfence_system(self): + # No-op + pass + + def syncthreads_count(self, val): + return threading.current_thread().syncthreads_count(val) + + def syncthreads_and(self, val): + return threading.current_thread().syncthreads_and(val) + + def syncthreads_or(self, val): + return threading.current_thread().syncthreads_or(val) + + def popc(self, val): + return bin(val).count("1") + + def fma(self, a, b, c): + return a * b + c + + def cbrt(self, a): + return a ** (1 / 3) + + def brev(self, val): + return int('{:032b}'.format(val)[::-1], 2) + + def clz(self, val): + s = '{:032b}'.format(val) + return len(s) - len(s.lstrip('0')) + + def ffs(self, val): + # The algorithm is: + # 1. Count the number of trailing zeros. + # 2. Add 1, because the LSB is numbered 1 rather than 0, and so on. + # 3. If we've counted 32 zeros (resulting in 33), there were no bits + # set so we need to return zero. + s = '{:032b}'.format(val) + r = (len(s) - len(s.rstrip('0')) + 1) % 33 + return r + + def selp(self, a, b, c): + return b if a else c + + def grid(self, n): + bdim = self.blockDim + bid = self.blockIdx + tid = self.threadIdx + x = bid.x * bdim.x + tid.x + if n == 1: + return x + y = bid.y * bdim.y + tid.y + if n == 2: + return (x, y) + z = bid.z * bdim.z + tid.z + if n == 3: + return (x, y, z) + + raise RuntimeError("Global ID has 1-3 dimensions. %d requested" % n) + + def gridsize(self, n): + bdim = self.blockDim + gdim = self.gridDim + x = bdim.x * gdim.x + if n == 1: + return x + y = bdim.y * gdim.y + if n == 2: + return (x, y) + z = bdim.z * gdim.z + if n == 3: + return (x, y, z) + + raise RuntimeError("Global grid has 1-3 dimensions. %d requested" % n) + + +@contextmanager +def swapped_cuda_module(fn, fake_cuda_module): + from numba import cuda + + fn_globs = fn.__globals__ + # get all globals that is the "cuda" module + orig = dict((k, v) for k, v in fn_globs.items() if v is cuda) + # build replacement dict + repl = dict((k, fake_cuda_module) for k, v in orig.items()) + # replace + fn_globs.update(repl) + try: + yield + finally: + # revert + fn_globs.update(orig) diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/reduction.py b/lib/python3.10/site-packages/numba/cuda/simulator/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..1b819c043549c936fc9a73271fe846f02eb05001 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/reduction.py @@ -0,0 +1,15 @@ +from functools import reduce as pyreduce + + +def Reduce(func): + def reduce_wrapper(seq, res=None, init=0): + r = pyreduce(func, seq, init) + if res is not None: + res[0] = r + return None + else: + return r + return reduce_wrapper + + +reduce = Reduce diff --git a/lib/python3.10/site-packages/numba/cuda/simulator/vector_types.py b/lib/python3.10/site-packages/numba/cuda/simulator/vector_types.py new file mode 100644 index 0000000000000000000000000000000000000000..de82ab35e1085b816e934f09f87c457b9c6e2f45 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/simulator/vector_types.py @@ -0,0 +1,63 @@ +from numba import types, config +from numba.cuda.stubs import _vector_type_stubs + + +class SimulatedVectorType: + attributes = ['x', 'y', 'z', 'w'] + + def __init__(self, *args): + args_flattened = [] + for arg in args: + if isinstance(arg, SimulatedVectorType): + args_flattened += arg.as_list() + else: + args_flattened.append(arg) + self._attrs = self.attributes[:len(args_flattened)] + if not self.num_elements == len(args_flattened): + raise TypeError( + f"{self.name} expects {self.num_elements}" + f" elements, got {len(args_flattened)}" + ) + + for arg, attr in zip(args_flattened, self._attrs): + setattr(self, attr, arg) + + @property + def name(self): + raise NotImplementedError() + + @property + def num_elements(self): + raise NotImplementedError() + + def as_list(self): + return [getattr(self, attr) for attr in self._attrs] + + +def make_simulated_vector_type(num_elements, name): + if config.USE_LEGACY_TYPE_SYSTEM: + base_type = types.float32 + else: + base_type = types.np_float32 + + obj = type(name, (SimulatedVectorType,), { + "num_elements": num_elements, + "base_type": base_type, + "name": name + }) + obj.user_facing_object = obj + return obj + + +def _initialize(): + _simulated_vector_types = {} + for stub in _vector_type_stubs: + num_elements = int(stub.__name__[-1]) + _simulated_vector_types[stub.__name__] = ( + make_simulated_vector_type(num_elements, stub.__name__) + ) + _simulated_vector_types[stub.__name__].aliases = stub.aliases + return _simulated_vector_types + + +vector_types = _initialize() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/__init__.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e7d31af3b99e121a9ae04bc855a6c80cc4594d --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/__init__.py @@ -0,0 +1,8 @@ +from numba.cuda.testing import ensure_supported_ccs_initialized +from numba.testing import load_testsuite +import os + + +def load_tests(loader, tests, pattern): + ensure_supported_ccs_initialized() + return load_testsuite(loader, os.path.dirname(__file__)) diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_array_attr.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_array_attr.py new file mode 100644 index 0000000000000000000000000000000000000000..32f75c855cc5657ad81a15e805503e6ace650c45 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_array_attr.py @@ -0,0 +1,145 @@ +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim + + +class TestArrayAttr(CUDATestCase): + + def test_contigous_2d(self): + ary = np.arange(10) + cary = ary.reshape(2, 5) + fary = np.asfortranarray(cary) + + dcary = cuda.to_device(cary) + dfary = cuda.to_device(fary) + self.assertTrue(dcary.is_c_contiguous()) + self.assertTrue(not dfary.is_c_contiguous()) + self.assertTrue(not dcary.is_f_contiguous()) + self.assertTrue(dfary.is_f_contiguous()) + + def test_contigous_3d(self): + ary = np.arange(20) + cary = ary.reshape(2, 5, 2) + fary = np.asfortranarray(cary) + + dcary = cuda.to_device(cary) + dfary = cuda.to_device(fary) + self.assertTrue(dcary.is_c_contiguous()) + self.assertTrue(not dfary.is_c_contiguous()) + self.assertTrue(not dcary.is_f_contiguous()) + self.assertTrue(dfary.is_f_contiguous()) + + def test_contigous_4d(self): + ary = np.arange(60) + cary = ary.reshape(2, 5, 2, 3) + fary = np.asfortranarray(cary) + + dcary = cuda.to_device(cary) + dfary = cuda.to_device(fary) + self.assertTrue(dcary.is_c_contiguous()) + self.assertTrue(not dfary.is_c_contiguous()) + self.assertTrue(not dcary.is_f_contiguous()) + self.assertTrue(dfary.is_f_contiguous()) + + def test_ravel_1d(self): + ary = np.arange(60) + dary = cuda.to_device(ary) + for order in 'CFA': + expect = ary.ravel(order=order) + dflat = dary.ravel(order=order) + flat = dflat.copy_to_host() + self.assertTrue(dary is not dflat) # ravel returns new array + self.assertEqual(flat.ndim, 1) + self.assertPreciseEqual(expect, flat) + + @skip_on_cudasim('CUDA Array Interface is not supported in the simulator') + def test_ravel_stride_1d(self): + ary = np.arange(60) + dary = cuda.to_device(ary) + # No-copy stride device array + darystride = dary[::2] + dary_data = dary.__cuda_array_interface__['data'][0] + ddarystride_data = darystride.__cuda_array_interface__['data'][0] + self.assertEqual(dary_data, ddarystride_data) + # Fail on ravel on non-contiguous array + with self.assertRaises(NotImplementedError): + darystride.ravel() + + def test_ravel_c(self): + ary = np.arange(60) + reshaped = ary.reshape(2, 5, 2, 3) + + expect = reshaped.ravel(order='C') + dary = cuda.to_device(reshaped) + dflat = dary.ravel() + flat = dflat.copy_to_host() + self.assertTrue(dary is not dflat) + self.assertEqual(flat.ndim, 1) + self.assertPreciseEqual(expect, flat) + + # explicit order kwarg + for order in 'CA': + expect = reshaped.ravel(order=order) + dary = cuda.to_device(reshaped) + dflat = dary.ravel(order=order) + flat = dflat.copy_to_host() + self.assertTrue(dary is not dflat) + self.assertEqual(flat.ndim, 1) + self.assertPreciseEqual(expect, flat) + + @skip_on_cudasim('CUDA Array Interface is not supported in the simulator') + def test_ravel_stride_c(self): + ary = np.arange(60) + reshaped = ary.reshape(2, 5, 2, 3) + + dary = cuda.to_device(reshaped) + darystride = dary[::2, ::2, ::2, ::2] + dary_data = dary.__cuda_array_interface__['data'][0] + ddarystride_data = darystride.__cuda_array_interface__['data'][0] + self.assertEqual(dary_data, ddarystride_data) + with self.assertRaises(NotImplementedError): + darystride.ravel() + + def test_ravel_f(self): + ary = np.arange(60) + reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3)) + for order in 'FA': + expect = reshaped.ravel(order=order) + dary = cuda.to_device(reshaped) + dflat = dary.ravel(order=order) + flat = dflat.copy_to_host() + self.assertTrue(dary is not dflat) + self.assertEqual(flat.ndim, 1) + self.assertPreciseEqual(expect, flat) + + @skip_on_cudasim('CUDA Array Interface is not supported in the simulator') + def test_ravel_stride_f(self): + ary = np.arange(60) + reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3)) + dary = cuda.to_device(reshaped) + darystride = dary[::2, ::2, ::2, ::2] + dary_data = dary.__cuda_array_interface__['data'][0] + ddarystride_data = darystride.__cuda_array_interface__['data'][0] + self.assertEqual(dary_data, ddarystride_data) + with self.assertRaises(NotImplementedError): + darystride.ravel() + + def test_reshape_c(self): + ary = np.arange(10) + expect = ary.reshape(2, 5) + dary = cuda.to_device(ary) + dary_reshaped = dary.reshape(2, 5) + got = dary_reshaped.copy_to_host() + self.assertPreciseEqual(expect, got) + + def test_reshape_f(self): + ary = np.arange(10) + expect = ary.reshape(2, 5, order='F') + dary = cuda.to_device(ary) + dary_reshaped = dary.reshape(2, 5, order='F') + got = dary_reshaped.copy_to_host() + self.assertPreciseEqual(expect, got) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_context_stack.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_context_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..030052507358b4c2e0f1d0c48599cd1db3fc6b4b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_context_stack.py @@ -0,0 +1,145 @@ +import numbers +from ctypes import byref +import weakref + +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim +from numba.cuda.cudadrv import driver + + +class TestContextStack(CUDATestCase): + def setUp(self): + super().setUp() + # Reset before testing + cuda.close() + + def test_gpus_current(self): + self.assertIs(cuda.gpus.current, None) + with cuda.gpus[0]: + self.assertEqual(int(cuda.gpus.current.id), 0) + + def test_gpus_len(self): + self.assertGreater(len(cuda.gpus), 0) + + def test_gpus_iter(self): + gpulist = list(cuda.gpus) + self.assertGreater(len(gpulist), 0) + + +class TestContextAPI(CUDATestCase): + + def tearDown(self): + super().tearDown() + cuda.close() + + def test_context_memory(self): + try: + mem = cuda.current_context().get_memory_info() + except NotImplementedError: + self.skipTest('EMM Plugin does not implement get_memory_info()') + + self.assertIsInstance(mem.free, numbers.Number) + self.assertEqual(mem.free, mem[0]) + + self.assertIsInstance(mem.total, numbers.Number) + self.assertEqual(mem.total, mem[1]) + + self.assertLessEqual(mem.free, mem.total) + + @unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus") + @skip_on_cudasim('CUDA HW required') + def test_forbidden_context_switch(self): + # Cannot switch context inside a `cuda.require_context` + @cuda.require_context + def switch_gpu(): + with cuda.gpus[1]: + pass + + with cuda.gpus[0]: + with self.assertRaises(RuntimeError) as raises: + switch_gpu() + + self.assertIn("Cannot switch CUDA-context.", str(raises.exception)) + + @unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus") + def test_accepted_context_switch(self): + def switch_gpu(): + with cuda.gpus[1]: + return cuda.current_context().device.id + + with cuda.gpus[0]: + devid = switch_gpu() + self.assertEqual(int(devid), 1) + + +@skip_on_cudasim('CUDA HW required') +class Test3rdPartyContext(CUDATestCase): + def tearDown(self): + super().tearDown() + cuda.close() + + def test_attached_primary(self, extra_work=lambda: None): + # Emulate primary context creation by 3rd party + the_driver = driver.driver + if driver.USE_NV_BINDING: + dev = driver.binding.CUdevice(0) + hctx = the_driver.cuDevicePrimaryCtxRetain(dev) + else: + dev = 0 + hctx = driver.drvapi.cu_context() + the_driver.cuDevicePrimaryCtxRetain(byref(hctx), dev) + try: + ctx = driver.Context(weakref.proxy(self), hctx) + ctx.push() + # Check that the context from numba matches the created primary + # context. + my_ctx = cuda.current_context() + if driver.USE_NV_BINDING: + self.assertEqual(int(my_ctx.handle), int(ctx.handle)) + else: + self.assertEqual(my_ctx.handle.value, ctx.handle.value) + + extra_work() + finally: + ctx.pop() + the_driver.cuDevicePrimaryCtxRelease(dev) + + def test_attached_non_primary(self): + # Emulate non-primary context creation by 3rd party + the_driver = driver.driver + if driver.USE_NV_BINDING: + flags = 0 + dev = driver.binding.CUdevice(0) + hctx = the_driver.cuCtxCreate(flags, dev) + else: + hctx = driver.drvapi.cu_context() + the_driver.cuCtxCreate(byref(hctx), 0, 0) + try: + cuda.current_context() + except RuntimeError as e: + # Expecting an error about non-primary CUDA context + self.assertIn("Numba cannot operate on non-primary CUDA context ", + str(e)) + else: + self.fail("No RuntimeError raised") + finally: + the_driver.cuCtxDestroy(hctx) + + def test_cudajit_in_attached_primary_context(self): + def do(): + from numba import cuda + + @cuda.jit + def foo(a): + for i in range(a.size): + a[i] = i + + a = cuda.device_array(10) + foo[1, 1](a) + self.assertEqual(list(a.copy_to_host()), list(range(10))) + + self.test_attached_primary(do) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..5033a115fa1fcf407f9db8f3ff75435e5bdaf3aa --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py @@ -0,0 +1,375 @@ +from itertools import product + +import numpy as np + +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim +from unittest.mock import patch + + +class CudaArrayIndexing(CUDATestCase): + def test_index_1d(self): + arr = np.arange(10) + darr = cuda.to_device(arr) + x, = arr.shape + for i in range(-x, x): + self.assertEqual(arr[i], darr[i]) + with self.assertRaises(IndexError): + darr[-x - 1] + with self.assertRaises(IndexError): + darr[x] + + def test_index_2d(self): + arr = np.arange(3 * 4).reshape(3, 4) + darr = cuda.to_device(arr) + x, y = arr.shape + for i in range(-x, x): + for j in range(-y, y): + self.assertEqual(arr[i, j], darr[i, j]) + with self.assertRaises(IndexError): + darr[-x - 1, 0] + with self.assertRaises(IndexError): + darr[x, 0] + with self.assertRaises(IndexError): + darr[0, -y - 1] + with self.assertRaises(IndexError): + darr[0, y] + + def test_index_3d(self): + arr = np.arange(3 * 4 * 5).reshape(3, 4, 5) + darr = cuda.to_device(arr) + x, y, z = arr.shape + for i in range(-x, x): + for j in range(-y, y): + for k in range(-z, z): + self.assertEqual(arr[i, j, k], darr[i, j, k]) + with self.assertRaises(IndexError): + darr[-x - 1, 0, 0] + with self.assertRaises(IndexError): + darr[x, 0, 0] + with self.assertRaises(IndexError): + darr[0, -y - 1, 0] + with self.assertRaises(IndexError): + darr[0, y, 0] + with self.assertRaises(IndexError): + darr[0, 0, -z - 1] + with self.assertRaises(IndexError): + darr[0, 0, z] + + +class CudaArrayStridedSlice(CUDATestCase): + + def test_strided_index_1d(self): + arr = np.arange(10) + darr = cuda.to_device(arr) + for i in range(arr.size): + np.testing.assert_equal(arr[i::2], darr[i::2].copy_to_host()) + + def test_strided_index_2d(self): + arr = np.arange(6 * 7).reshape(6, 7) + darr = cuda.to_device(arr) + + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + np.testing.assert_equal(arr[i::2, j::2], + darr[i::2, j::2].copy_to_host()) + + def test_strided_index_3d(self): + arr = np.arange(6 * 7 * 8).reshape(6, 7, 8) + darr = cuda.to_device(arr) + + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + for k in range(arr.shape[2]): + np.testing.assert_equal( + arr[i::2, j::2, k::2], + darr[i::2, j::2, k::2].copy_to_host()) + + +class CudaArraySlicing(CUDATestCase): + def test_prefix_1d(self): + arr = np.arange(5) + darr = cuda.to_device(arr) + for i in range(arr.size): + expect = arr[i:] + got = darr[i:].copy_to_host() + self.assertTrue(np.all(expect == got)) + + def test_prefix_2d(self): + arr = np.arange(3 ** 2).reshape(3, 3) + darr = cuda.to_device(arr) + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + expect = arr[i:, j:] + sliced = darr[i:, j:] + self.assertEqual(expect.shape, sliced.shape) + self.assertEqual(expect.strides, sliced.strides) + got = sliced.copy_to_host() + self.assertTrue(np.all(expect == got)) + + def test_select_3d_first_two_dim(self): + arr = np.arange(3 * 4 * 5).reshape(3, 4, 5) + darr = cuda.to_device(arr) + # Select first dimension + for i in range(arr.shape[0]): + expect = arr[i] + sliced = darr[i] + self.assertEqual(expect.shape, sliced.shape) + self.assertEqual(expect.strides, sliced.strides) + got = sliced.copy_to_host() + self.assertTrue(np.all(expect == got)) + # Select second dimension + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + expect = arr[i, j] + sliced = darr[i, j] + self.assertEqual(expect.shape, sliced.shape) + self.assertEqual(expect.strides, sliced.strides) + got = sliced.copy_to_host() + self.assertTrue(np.all(expect == got)) + + def test_select_f(self): + a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='F') + da = cuda.to_device(a) + + for i in range(a.shape[0]): + for j in range(a.shape[1]): + self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(), + a[i, j, :])) + for j in range(a.shape[2]): + self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(), + a[i, :, j])) + for i in range(a.shape[1]): + for j in range(a.shape[2]): + self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(), + a[:, i, j])) + + def test_select_c(self): + a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='C') + da = cuda.to_device(a) + + for i in range(a.shape[0]): + for j in range(a.shape[1]): + self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(), + a[i, j, :])) + for j in range(a.shape[2]): + self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(), + a[i, :, j])) + for i in range(a.shape[1]): + for j in range(a.shape[2]): + self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(), + a[:, i, j])) + + def test_prefix_select(self): + arr = np.arange(5 * 7).reshape(5, 7, order='F') + + darr = cuda.to_device(arr) + self.assertTrue(np.all(darr[:1, 1].copy_to_host() == arr[:1, 1])) + + def test_negative_slicing_1d(self): + arr = np.arange(10) + darr = cuda.to_device(arr) + for i, j in product(range(-10, 10), repeat=2): + np.testing.assert_array_equal(arr[i:j], + darr[i:j].copy_to_host()) + + def test_negative_slicing_2d(self): + arr = np.arange(12).reshape(3, 4) + darr = cuda.to_device(arr) + for x, y, w, s in product(range(-4, 4), repeat=4): + np.testing.assert_array_equal(arr[x:y, w:s], + darr[x:y, w:s].copy_to_host()) + + def test_empty_slice_1d(self): + arr = np.arange(5) + darr = cuda.to_device(arr) + for i in range(darr.shape[0]): + np.testing.assert_array_equal(darr[i:i].copy_to_host(), arr[i:i]) + # empty slice of empty slice + self.assertFalse(darr[:0][:0].copy_to_host()) + # out-of-bound slice just produces empty slices + np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), + arr[:0][:1]) + np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(), + arr[:0][-1:]) + + def test_empty_slice_2d(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + np.testing.assert_array_equal(darr[:0].copy_to_host(), arr[:0]) + np.testing.assert_array_equal(darr[3, :0].copy_to_host(), arr[3, :0]) + # empty slice of empty slice + self.assertFalse(darr[:0][:0].copy_to_host()) + # out-of-bound slice just produces empty slices + np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), arr[:0][:1]) + np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(), + arr[:0][-1:]) + + +class CudaArraySetting(CUDATestCase): + """ + Most of the slicing logic is tested in the cases above, so these + tests focus on the setting logic. + """ + + def test_scalar(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + arr[2, 2] = 500 + darr[2, 2] = 500 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_rank(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + arr[2] = 500 + darr[2] = 500 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_broadcast(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + arr[:, 2] = 500 + darr[:, 2] = 500 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_array_assign_column(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + _400 = np.full(shape=7, fill_value=400) + arr[2] = _400 + darr[2] = _400 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_array_assign_row(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + _400 = np.full(shape=5, fill_value=400) + arr[:, 2] = _400 + darr[:, 2] = _400 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_array_assign_subarray(self): + arr = np.arange(5 * 6 * 7).reshape(5, 6, 7) + darr = cuda.to_device(arr) + _400 = np.full(shape=(6, 7), fill_value=400) + arr[2] = _400 + darr[2] = _400 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_array_assign_deep_subarray(self): + arr = np.arange(5 * 6 * 7 * 8).reshape(5, 6, 7, 8) + darr = cuda.to_device(arr) + _400 = np.full(shape=(5, 6, 8), fill_value=400) + arr[:, :, 2] = _400 + darr[:, :, 2] = _400 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_array_assign_all(self): + arr = np.arange(5 * 7).reshape(5, 7) + darr = cuda.to_device(arr) + _400 = np.full(shape=(5, 7), fill_value=400) + arr[:] = _400 + darr[:] = _400 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_strides(self): + arr = np.ones(20) + darr = cuda.to_device(arr) + arr[::2] = 500 + darr[::2] = 500 + np.testing.assert_array_equal(darr.copy_to_host(), arr) + + def test_incompatible_highdim(self): + darr = cuda.to_device(np.arange(5 * 7)) + + with self.assertRaises(ValueError) as e: + darr[:] = np.ones(shape=(1, 2, 3)) + + self.assertIn( + member=str(e.exception), + container=[ + "Can't assign 3-D array to 1-D self", # device + "could not broadcast input array from shape (2,3) " + "into shape (35,)", # simulator, NP >= 1.20 + ]) + + def test_incompatible_shape(self): + darr = cuda.to_device(np.arange(5)) + + with self.assertRaises(ValueError) as e: + darr[:] = [1, 3] + + self.assertIn( + member=str(e.exception), + container=[ + "Can't copy sequence with size 2 to array axis 0 with " + "dimension 5", # device + "could not broadcast input array from shape (2,) into " + "shape (5,)", # simulator, NP >= 1.20 + ]) + + @skip_on_cudasim('cudasim does not use streams and operates synchronously') + def test_sync(self): + # There should be a synchronization when no stream is supplied + darr = cuda.to_device(np.arange(5)) + + with patch.object(cuda.cudadrv.driver.Stream, 'synchronize', + return_value=None) as mock_sync: + darr[0] = 10 + + mock_sync.assert_called_once() + + @skip_on_cudasim('cudasim does not use streams and operates synchronously') + def test_no_sync_default_stream(self): + # There should not be a synchronization when the array has a default + # stream, whether it is the default stream, the legacy default stream, + # the per-thread default stream, or another stream. + streams = (cuda.stream(), cuda.default_stream(), + cuda.legacy_default_stream(), + cuda.per_thread_default_stream()) + + for stream in streams: + darr = cuda.to_device(np.arange(5), stream=stream) + + with patch.object(cuda.cudadrv.driver.Stream, 'synchronize', + return_value=None) as mock_sync: + darr[0] = 10 + + mock_sync.assert_not_called() + + @skip_on_cudasim('cudasim does not use streams and operates synchronously') + def test_no_sync_supplied_stream(self): + # There should not be a synchronization when a stream is supplied for + # the setitem call, whether it is the default stream, the legacy default + # stream, the per-thread default stream, or another stream. + streams = (cuda.stream(), cuda.default_stream(), + cuda.legacy_default_stream(), + cuda.per_thread_default_stream()) + + for stream in streams: + darr = cuda.to_device(np.arange(5)) + + with patch.object(cuda.cudadrv.driver.Stream, 'synchronize', + return_value=None) as mock_sync: + darr.setitem(0, 10, stream=stream) + + mock_sync.assert_not_called() + + @unittest.skip('Requires PR #6367') + def test_issue_6505(self): + # On Windows, the writes to ary_v would not be visible prior to the + # assertion, due to the assignment being done with a kernel launch that + # returns asynchronously - there should now be a sync after the kernel + # launch to ensure that the writes are always visible. + ary = cuda.mapped_array(2, dtype=np.int32) + ary[:] = 0 + + ary_v = ary.view('u1') + ary_v[1] = 1 + ary_v[5] = 1 + self.assertEqual(sum(ary), 512) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_auto_context.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_auto_context.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4d59310dd34b36a1d8bd473a8f5e5d7eda5d93 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_auto_context.py @@ -0,0 +1,21 @@ +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase + + +class TestCudaAutoContext(CUDATestCase): + def test_auto_context(self): + """A problem was revealed by a customer that the use cuda.to_device + does not create a CUDA context. + This tests the problem + """ + A = np.arange(10, dtype=np.float32) + newA = np.empty_like(A) + dA = cuda.to_device(A) + + dA.copy_to_host(newA) + self.assertTrue(np.allclose(A, newA)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py new file mode 100644 index 0000000000000000000000000000000000000000..e2acd34d7eca1dcc1efe48b38089c50bbeade0e7 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py @@ -0,0 +1,179 @@ +import numpy as np +import ctypes +from numba.cuda.cudadrv.devicearray import (DeviceRecord, from_record_like, + auto_device) +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import skip_on_cudasim +from numba.np import numpy_support +from numba import cuda + +N_CHARS = 5 + +recordtype = np.dtype( + [ + ('a', np.float64), + ('b', np.int32), + ('c', np.complex64), + ('d', (np.str_, N_CHARS)) + ], + align=True +) + +recordwitharray = np.dtype( + [ + ('g', np.int32), + ('h', np.float32, 2) + ], + align=True +) + +recwithmat = np.dtype([('i', np.int32), + ('j', np.float32, (3, 3))]) + +recwithrecwithmat = np.dtype([('x', np.int32), ('y', recwithmat)]) + + +@skip_on_cudasim('Device Record API unsupported in the simulator') +class TestCudaDeviceRecord(CUDATestCase): + """ + Tests the DeviceRecord class with np.void host types. + """ + def setUp(self): + super().setUp() + self._create_data(np.zeros) + + def _create_data(self, array_ctor): + self.dtype = np.dtype([('a', np.int32), ('b', np.float32)], align=True) + self.hostz = array_ctor(1, self.dtype)[0] + self.hostnz = array_ctor(1, self.dtype)[0] + self.hostnz['a'] = 10 + self.hostnz['b'] = 11.0 + + def _check_device_record(self, reference, rec): + self.assertEqual(rec.shape, tuple()) + self.assertEqual(rec.strides, tuple()) + self.assertEqual(rec.dtype, reference.dtype) + self.assertEqual(rec.alloc_size, reference.dtype.itemsize) + self.assertIsNotNone(rec.gpu_data) + self.assertNotEqual(rec.device_ctypes_pointer, ctypes.c_void_p(0)) + + numba_type = numpy_support.from_dtype(reference.dtype) + self.assertEqual(rec._numba_type_, numba_type) + + def test_device_record_interface(self): + hostrec = self.hostz.copy() + devrec = DeviceRecord(self.dtype) + self._check_device_record(hostrec, devrec) + + def test_device_record_copy(self): + hostrec = self.hostz.copy() + devrec = DeviceRecord(self.dtype) + devrec.copy_to_device(hostrec) + + # Copy back and check values are all zeros + hostrec2 = self.hostnz.copy() + devrec.copy_to_host(hostrec2) + np.testing.assert_equal(self.hostz, hostrec2) + + # Copy non-zero values to GPU and back and check values + hostrec3 = self.hostnz.copy() + devrec.copy_to_device(hostrec3) + + hostrec4 = self.hostz.copy() + devrec.copy_to_host(hostrec4) + np.testing.assert_equal(hostrec4, self.hostnz) + + def test_from_record_like(self): + # Create record from host record + hostrec = self.hostz.copy() + devrec = from_record_like(hostrec) + self._check_device_record(hostrec, devrec) + + # Create record from device record and check for distinct data + devrec2 = from_record_like(devrec) + self._check_device_record(devrec, devrec2) + self.assertNotEqual(devrec.gpu_data, devrec2.gpu_data) + + def test_auto_device(self): + # Create record from host record + hostrec = self.hostnz.copy() + devrec, new_gpu_obj = auto_device(hostrec) + self._check_device_record(hostrec, devrec) + self.assertTrue(new_gpu_obj) + + # Copy data back and check it is equal to auto_device arg + hostrec2 = self.hostz.copy() + devrec.copy_to_host(hostrec2) + np.testing.assert_equal(hostrec2, hostrec) + + +class TestCudaDeviceRecordWithRecord(TestCudaDeviceRecord): + """ + Tests the DeviceRecord class with np.record host types + """ + def setUp(self): + CUDATestCase.setUp(self) + self._create_data(np.recarray) + + +@skip_on_cudasim('Structured array attr access not supported in simulator') +class TestRecordDtypeWithStructArrays(CUDATestCase): + ''' + Test operation of device arrays on structured arrays. + ''' + + def _createSampleArrays(self): + self.sample1d = cuda.device_array(3, dtype=recordtype) + self.samplerec1darr = cuda.device_array(1, dtype=recordwitharray)[0] + self.samplerecmat = cuda.device_array(1,dtype=recwithmat)[0] + + def setUp(self): + super().setUp() + self._createSampleArrays() + + ary = self.sample1d + for i in range(ary.size): + x = i + 1 + ary[i]['a'] = x / 2 + ary[i]['b'] = x + ary[i]['c'] = x * 1j + ary[i]['d'] = str(x) * N_CHARS + + def test_structured_array1(self): + ary = self.sample1d + for i in range(self.sample1d.size): + x = i + 1 + self.assertEqual(ary[i]['a'], x / 2) + self.assertEqual(ary[i]['b'], x) + self.assertEqual(ary[i]['c'], x * 1j) + self.assertEqual(ary[i]['d'], str(x) * N_CHARS) + + def test_structured_array2(self): + ary = self.samplerec1darr + ary['g'] = 2 + ary['h'][0] = 3.0 + ary['h'][1] = 4.0 + self.assertEqual(ary['g'], 2) + self.assertEqual(ary['h'][0], 3.0) + self.assertEqual(ary['h'][1], 4.0) + + def test_structured_array3(self): + ary = self.samplerecmat + mat = np.array([[5.0, 10.0, 15.0], + [20.0, 25.0, 30.0], + [35.0, 40.0, 45.0]], + dtype=np.float32).reshape(3,3) + ary['j'][:] = mat + np.testing.assert_equal(ary['j'], mat) + + def test_structured_array4(self): + arr = np.zeros(1, dtype=recwithrecwithmat) + d_arr = cuda.to_device(arr) + d_arr[0]['y']['i'] = 1 + self.assertEqual(d_arr[0]['y']['i'], 1) + d_arr[0]['y']['j'][0, 0] = 2.0 + self.assertEqual(d_arr[0]['y']['j'][0, 0], 2.0) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_driver.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_driver.py new file mode 100644 index 0000000000000000000000000000000000000000..ea9d72fa89cefa739be556061f19e26f680cbba7 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_driver.py @@ -0,0 +1,235 @@ +from ctypes import byref, c_int, c_void_p, sizeof + +from numba.cuda.cudadrv.driver import (host_to_device, device_to_host, driver, + launch_kernel) +from numba.cuda.cudadrv import devices, drvapi, driver as _driver +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import skip_on_cudasim + + +ptx1 = ''' + .version 1.4 + .target sm_10, map_f64_to_f32 + + .entry _Z10helloworldPi ( + .param .u64 __cudaparm__Z10helloworldPi_A) + { + .reg .u32 %r<3>; + .reg .u64 %rd<6>; + .loc 14 4 0 +$LDWbegin__Z10helloworldPi: + .loc 14 6 0 + cvt.s32.u16 %r1, %tid.x; + ld.param.u64 %rd1, [__cudaparm__Z10helloworldPi_A]; + cvt.u64.u16 %rd2, %tid.x; + mul.lo.u64 %rd3, %rd2, 4; + add.u64 %rd4, %rd1, %rd3; + st.global.s32 [%rd4+0], %r1; + .loc 14 7 0 + exit; +$LDWend__Z10helloworldPi: + } // _Z10helloworldPi +''' + +ptx2 = ''' +.version 3.0 +.target sm_20 +.address_size 64 + + .file 1 "/tmp/tmpxft_000012c7_00000000-9_testcuda.cpp3.i" + .file 2 "testcuda.cu" + +.entry _Z10helloworldPi( + .param .u64 _Z10helloworldPi_param_0 +) +{ + .reg .s32 %r<3>; + .reg .s64 %rl<5>; + + + ld.param.u64 %rl1, [_Z10helloworldPi_param_0]; + cvta.to.global.u64 %rl2, %rl1; + .loc 2 6 1 + mov.u32 %r1, %tid.x; + mul.wide.u32 %rl3, %r1, 4; + add.s64 %rl4, %rl2, %rl3; + st.global.u32 [%rl4], %r1; + .loc 2 7 2 + ret; +} +''' + + +@skip_on_cudasim('CUDA Driver API unsupported in the simulator') +class TestCudaDriver(CUDATestCase): + def setUp(self): + super().setUp() + self.assertTrue(len(devices.gpus) > 0) + self.context = devices.get_context() + device = self.context.device + ccmajor, _ = device.compute_capability + if ccmajor >= 2: + self.ptx = ptx2 + else: + self.ptx = ptx1 + + def tearDown(self): + super().tearDown() + del self.context + + def test_cuda_driver_basic(self): + module = self.context.create_module_ptx(self.ptx) + function = module.get_function('_Z10helloworldPi') + + array = (c_int * 100)() + + memory = self.context.memalloc(sizeof(array)) + host_to_device(memory, array, sizeof(array)) + + ptr = memory.device_ctypes_pointer + stream = 0 + + if _driver.USE_NV_BINDING: + ptr = c_void_p(int(ptr)) + stream = _driver.binding.CUstream(stream) + + launch_kernel(function.handle, # Kernel + 1, 1, 1, # gx, gy, gz + 100, 1, 1, # bx, by, bz + 0, # dynamic shared mem + stream, # stream + [ptr]) # arguments + + device_to_host(array, memory, sizeof(array)) + for i, v in enumerate(array): + self.assertEqual(i, v) + + module.unload() + + def test_cuda_driver_stream_operations(self): + module = self.context.create_module_ptx(self.ptx) + function = module.get_function('_Z10helloworldPi') + + array = (c_int * 100)() + + stream = self.context.create_stream() + + with stream.auto_synchronize(): + memory = self.context.memalloc(sizeof(array)) + host_to_device(memory, array, sizeof(array), stream=stream) + + ptr = memory.device_ctypes_pointer + if _driver.USE_NV_BINDING: + ptr = c_void_p(int(ptr)) + + launch_kernel(function.handle, # Kernel + 1, 1, 1, # gx, gy, gz + 100, 1, 1, # bx, by, bz + 0, # dynamic shared mem + stream.handle, # stream + [ptr]) # arguments + + device_to_host(array, memory, sizeof(array), stream=stream) + + for i, v in enumerate(array): + self.assertEqual(i, v) + + def test_cuda_driver_default_stream(self): + # Test properties of the default stream + ds = self.context.get_default_stream() + self.assertIn("Default CUDA stream", repr(ds)) + self.assertEqual(0, int(ds)) + # bool(stream) is the check that is done in memcpy to decide if async + # version should be used. So the default (0) stream should be true-ish + # even though 0 is usually false-ish in Python. + self.assertTrue(ds) + self.assertFalse(ds.external) + + def test_cuda_driver_legacy_default_stream(self): + # Test properties of the legacy default stream + ds = self.context.get_legacy_default_stream() + self.assertIn("Legacy default CUDA stream", repr(ds)) + self.assertEqual(1, int(ds)) + self.assertTrue(ds) + self.assertFalse(ds.external) + + def test_cuda_driver_per_thread_default_stream(self): + # Test properties of the per-thread default stream + ds = self.context.get_per_thread_default_stream() + self.assertIn("Per-thread default CUDA stream", repr(ds)) + self.assertEqual(2, int(ds)) + self.assertTrue(ds) + self.assertFalse(ds.external) + + def test_cuda_driver_stream(self): + # Test properties of non-default streams + s = self.context.create_stream() + self.assertIn("CUDA stream", repr(s)) + self.assertNotIn("Default", repr(s)) + self.assertNotIn("External", repr(s)) + self.assertNotEqual(0, int(s)) + self.assertTrue(s) + self.assertFalse(s.external) + + def test_cuda_driver_external_stream(self): + # Test properties of a stream created from an external stream object. + # We use the driver API directly to create a stream, to emulate an + # external library creating a stream + if _driver.USE_NV_BINDING: + handle = driver.cuStreamCreate(0) + ptr = int(handle) + else: + handle = drvapi.cu_stream() + driver.cuStreamCreate(byref(handle), 0) + ptr = handle.value + s = self.context.create_external_stream(ptr) + + self.assertIn("External CUDA stream", repr(s)) + # Ensure neither "Default" nor "default" + self.assertNotIn("efault", repr(s)) + self.assertEqual(ptr, int(s)) + self.assertTrue(s) + self.assertTrue(s.external) + + def test_cuda_driver_occupancy(self): + module = self.context.create_module_ptx(self.ptx) + function = module.get_function('_Z10helloworldPi') + + value = self.context.get_active_blocks_per_multiprocessor(function, + 128, 128) + self.assertTrue(value > 0) + + def b2d(bs): + return bs + + grid, block = self.context.get_max_potential_block_size(function, b2d, + 128, 128) + self.assertTrue(grid > 0) + self.assertTrue(block > 0) + + +class TestDevice(CUDATestCase): + def test_device_get_uuid(self): + # A device UUID looks like: + # + # GPU-e6489c45-5b68-3b03-bab7-0e7c8e809643 + # + # To test, we construct an RE that matches this form and verify that + # the returned UUID matches. + # + # Device UUIDs may not conform to parts of the UUID specification (RFC + # 4122) pertaining to versions and variants, so we do not extract and + # validate the values of these bits. + + h = '[0-9a-f]{%d}' + h4 = h % 4 + h8 = h % 8 + h12 = h % 12 + uuid_format = f'^GPU-{h8}-{h4}-{h4}-{h4}-{h12}$' + + dev = devices.get_context().device + self.assertRegex(dev.uuid, uuid_format) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_libraries.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_libraries.py new file mode 100644 index 0000000000000000000000000000000000000000..890bf68293565a24d0f36a56a613ea7a126d202e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_libraries.py @@ -0,0 +1,22 @@ +from numba.cuda.testing import unittest +from numba.cuda.testing import skip_on_cudasim, skip_unless_conda_cudatoolkit +from numba.misc.findlib import find_lib + + +@skip_on_cudasim('Library detection unsupported in the simulator') +@skip_unless_conda_cudatoolkit +class TestLibraryDetection(unittest.TestCase): + def test_detect(self): + """ + This test is solely present to ensure that shipped cudatoolkits have + additional core libraries in locations that Numba scans by default. + PyCulib (and potentially others) rely on Numba's library finding + capacity to find and subsequently load these libraries. + """ + core_libs = ['nvvm'] + for l in core_libs: + self.assertNotEqual(find_lib(l), []) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_memory.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..6402f77730cc841f3d622974caf2db9f7db61a7e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_memory.py @@ -0,0 +1,193 @@ +import ctypes + +import numpy as np + +from numba.cuda.cudadrv import driver, drvapi, devices +from numba.cuda.testing import unittest, ContextResettingTestCase +from numba.cuda.testing import skip_on_cudasim + + +@skip_on_cudasim('CUDA Memory API unsupported in the simulator') +class TestCudaMemory(ContextResettingTestCase): + def setUp(self): + super().setUp() + self.context = devices.get_context() + + def tearDown(self): + del self.context + super(TestCudaMemory, self).tearDown() + + def _template(self, obj): + self.assertTrue(driver.is_device_memory(obj)) + driver.require_device_memory(obj) + if driver.USE_NV_BINDING: + expected_class = driver.binding.CUdeviceptr + else: + expected_class = drvapi.cu_device_ptr + self.assertTrue(isinstance(obj.device_ctypes_pointer, + expected_class)) + + def test_device_memory(self): + devmem = self.context.memalloc(1024) + self._template(devmem) + + def test_device_view(self): + devmem = self.context.memalloc(1024) + self._template(devmem.view(10)) + + def test_host_alloc(self): + devmem = self.context.memhostalloc(1024, mapped=True) + self._template(devmem) + + def test_pinned_memory(self): + ary = np.arange(10) + devmem = self.context.mempin(ary, ary.ctypes.data, + ary.size * ary.dtype.itemsize, + mapped=True) + self._template(devmem) + + def test_managed_memory(self): + devmem = self.context.memallocmanaged(1024) + self._template(devmem) + + def test_derived_pointer(self): + # Use MemoryPointer.view to create derived pointer + + def handle_val(mem): + if driver.USE_NV_BINDING: + return int(mem.handle) + else: + return mem.handle.value + + def check(m, offset): + # create view + v1 = m.view(offset) + self.assertEqual(handle_val(v1.owner), handle_val(m)) + self.assertEqual(m.refct, 2) + self.assertEqual(handle_val(v1) - offset, handle_val(v1.owner)) + # create a view + v2 = v1.view(offset) + self.assertEqual(handle_val(v2.owner), handle_val(m)) + self.assertEqual(handle_val(v2.owner), handle_val(m)) + self.assertEqual(handle_val(v2) - offset * 2, + handle_val(v2.owner)) + self.assertEqual(m.refct, 3) + del v2 + self.assertEqual(m.refct, 2) + del v1 + self.assertEqual(m.refct, 1) + + m = self.context.memalloc(1024) + check(m=m, offset=0) + check(m=m, offset=1) + + def test_user_extension(self): + # User can use MemoryPointer to wrap externally defined pointers. + # This test checks if the finalizer is invokded at correct time + fake_ptr = ctypes.c_void_p(0xdeadbeef) + dtor_invoked = [0] + + def dtor(): + dtor_invoked[0] += 1 + + # Ensure finalizer is called when pointer is deleted + ptr = driver.MemoryPointer(context=self.context, pointer=fake_ptr, + size=40, finalizer=dtor) + self.assertEqual(dtor_invoked[0], 0) + del ptr + self.assertEqual(dtor_invoked[0], 1) + + # Ensure removing derived pointer doesn't call finalizer + ptr = driver.MemoryPointer(context=self.context, pointer=fake_ptr, + size=40, finalizer=dtor) + owned = ptr.own() + del owned + self.assertEqual(dtor_invoked[0], 1) + del ptr + self.assertEqual(dtor_invoked[0], 2) + + +class TestCudaMemoryFunctions(ContextResettingTestCase): + def setUp(self): + super().setUp() + self.context = devices.get_context() + + def tearDown(self): + del self.context + super(TestCudaMemoryFunctions, self).tearDown() + + def test_memcpy(self): + hstary = np.arange(100, dtype=np.uint32) + hstary2 = np.arange(100, dtype=np.uint32) + sz = hstary.size * hstary.dtype.itemsize + devary = self.context.memalloc(sz) + + driver.host_to_device(devary, hstary, sz) + driver.device_to_host(hstary2, devary, sz) + + self.assertTrue(np.all(hstary == hstary2)) + + def test_memset(self): + dtype = np.dtype('uint32') + n = 10 + sz = dtype.itemsize * 10 + devary = self.context.memalloc(sz) + driver.device_memset(devary, 0xab, sz) + + hstary = np.empty(n, dtype=dtype) + driver.device_to_host(hstary, devary, sz) + + hstary2 = np.array([0xabababab] * n, dtype=np.dtype('uint32')) + self.assertTrue(np.all(hstary == hstary2)) + + def test_d2d(self): + hst = np.arange(100, dtype=np.uint32) + hst2 = np.empty_like(hst) + sz = hst.size * hst.dtype.itemsize + dev1 = self.context.memalloc(sz) + dev2 = self.context.memalloc(sz) + driver.host_to_device(dev1, hst, sz) + driver.device_to_device(dev2, dev1, sz) + driver.device_to_host(hst2, dev2, sz) + self.assertTrue(np.all(hst == hst2)) + + +@skip_on_cudasim('CUDA Memory API unsupported in the simulator') +class TestMVExtent(ContextResettingTestCase): + def test_c_contiguous_array(self): + ary = np.arange(100) + arysz = ary.dtype.itemsize * ary.size + s, e = driver.host_memory_extents(ary) + self.assertTrue(ary.ctypes.data == s) + self.assertTrue(arysz == driver.host_memory_size(ary)) + + def test_f_contiguous_array(self): + ary = np.asfortranarray(np.arange(100).reshape(2, 50)) + arysz = ary.dtype.itemsize * np.prod(ary.shape) + s, e = driver.host_memory_extents(ary) + self.assertTrue(ary.ctypes.data == s) + self.assertTrue(arysz == driver.host_memory_size(ary)) + + def test_single_element_array(self): + ary = np.asarray(np.uint32(1234)) + arysz = ary.dtype.itemsize + s, e = driver.host_memory_extents(ary) + self.assertTrue(ary.ctypes.data == s) + self.assertTrue(arysz == driver.host_memory_size(ary)) + + def test_ctypes_struct(self): + class mystruct(ctypes.Structure): + _fields_ = [('x', ctypes.c_int), ('y', ctypes.c_int)] + + data = mystruct(x=123, y=432) + sz = driver.host_memory_size(data) + self.assertTrue(ctypes.sizeof(data) == sz) + + def test_ctypes_double(self): + data = ctypes.c_double(1.234) + sz = driver.host_memory_size(data) + self.assertTrue(ctypes.sizeof(data) == sz) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_ndarray.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9c9195eb4b63eaa6b6764657f2f127be8b9b88 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_cuda_ndarray.py @@ -0,0 +1,547 @@ +import itertools +import numpy as np +from numba.cuda.cudadrv import devicearray +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import skip_on_cudasim + + +class TestCudaNDArray(CUDATestCase): + def test_device_array_interface(self): + dary = cuda.device_array(shape=100) + devicearray.verify_cuda_ndarray_interface(dary) + + ary = np.empty(100) + dary = cuda.to_device(ary) + devicearray.verify_cuda_ndarray_interface(dary) + + ary = np.asarray(1.234) + dary = cuda.to_device(ary) + self.assertEqual(dary.ndim, 0) + devicearray.verify_cuda_ndarray_interface(dary) + + def test_device_array_from_readonly(self): + ary = np.arange(100, dtype=np.float32) + # Make the array readonly + ary.flags.writeable = False + self.assertFalse(ary.flags.writeable) + # Ensure that we can copy the readonly array + dary = cuda.to_device(ary) + retr = dary.copy_to_host() + np.testing.assert_array_equal(retr, ary) + + def test_devicearray_dtype(self): + dary = cuda.device_array(shape=(100,), dtype="f4") + self.assertEqual(dary.dtype, np.dtype("f4")) + + def test_devicearray_no_copy(self): + array = np.arange(100, dtype=np.float32) + cuda.to_device(array, copy=False) + + def test_devicearray_shape(self): + ary = np.arange(2 * 3 * 4).reshape(2, 3, 4) + dary = cuda.to_device(ary) + self.assertEqual(ary.shape, dary.shape) + self.assertEqual(ary.shape[1:], dary.shape[1:]) + + def test_devicearray(self): + array = np.arange(100, dtype=np.int32) + original = array.copy() + gpumem = cuda.to_device(array) + array[:] = 0 + gpumem.copy_to_host(array) + + np.testing.assert_array_equal(array, original) + + def test_stream_bind(self): + stream = cuda.stream() + with stream.auto_synchronize(): + arr = cuda.device_array( + (3, 3), + dtype=np.float64, + stream=stream) + self.assertEqual(arr.bind(stream).stream, stream) + self.assertEqual(arr.stream, stream) + + def test_len_1d(self): + ary = np.empty((3,)) + dary = cuda.device_array(3) + self.assertEqual(len(ary), len(dary)) + + def test_len_2d(self): + ary = np.empty((3, 5)) + dary = cuda.device_array((3, 5)) + self.assertEqual(len(ary), len(dary)) + + def test_len_3d(self): + ary = np.empty((3, 5, 7)) + dary = cuda.device_array((3, 5, 7)) + self.assertEqual(len(ary), len(dary)) + + def test_devicearray_partition(self): + N = 100 + array = np.arange(N, dtype=np.int32) + original = array.copy() + gpumem = cuda.to_device(array) + left, right = gpumem.split(N // 2) + + array[:] = 0 + + self.assertTrue(np.all(array == 0)) + + right.copy_to_host(array[N // 2:]) + left.copy_to_host(array[:N // 2]) + + self.assertTrue(np.all(array == original)) + + def test_devicearray_replace(self): + N = 100 + array = np.arange(N, dtype=np.int32) + original = array.copy() + gpumem = cuda.to_device(array) + cuda.to_device(array * 2, to=gpumem) + gpumem.copy_to_host(array) + np.testing.assert_array_equal(array, original * 2) + + @skip_on_cudasim('This works in the simulator') + def test_devicearray_transpose_wrongdim(self): + gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4, 1)) + + with self.assertRaises(NotImplementedError) as e: + np.transpose(gpumem) + + self.assertEqual( + "transposing a non-2D DeviceNDArray isn't supported", + str(e.exception)) + + def test_devicearray_transpose_identity(self): + # any-shape identities should work + original = np.array(np.arange(24)).reshape(3, 4, 2) + array = np.transpose(cuda.to_device(original), + axes=(0, 1, 2)).copy_to_host() + self.assertTrue(np.all(array == original)) + + def test_devicearray_transpose_duplicatedaxis(self): + gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4)) + + with self.assertRaises(ValueError) as e: + np.transpose(gpumem, axes=(0, 0)) + + self.assertIn( + str(e.exception), + container=[ + 'invalid axes list (0, 0)', # GPU + 'repeated axis in transpose', # sim + ]) + + def test_devicearray_transpose_wrongaxis(self): + gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4)) + + with self.assertRaises(ValueError) as e: + np.transpose(gpumem, axes=(0, 2)) + + self.assertIn( + str(e.exception), + container=[ + 'invalid axes list (0, 2)', # GPU + 'invalid axis for this array', + 'axis 2 is out of bounds for array of dimension 2', # sim + ]) + + def test_devicearray_view_ok(self): + original = np.array(np.arange(12), dtype="i2").reshape(3, 4) + array = cuda.to_device(original) + for dtype in ("i4", "u4", "i8", "f8"): + with self.subTest(dtype=dtype): + np.testing.assert_array_equal( + array.view(dtype).copy_to_host(), + original.view(dtype) + ) + + def test_devicearray_view_ok_not_c_contig(self): + original = np.array(np.arange(32), dtype="i2").reshape(4, 8) + array = cuda.to_device(original)[:, ::2] + original = original[:, ::2] + np.testing.assert_array_equal( + array.view("u2").copy_to_host(), + original.view("u2") + ) + + def test_devicearray_view_bad_not_c_contig(self): + original = np.array(np.arange(32), dtype="i2").reshape(4, 8) + array = cuda.to_device(original)[:, ::2] + with self.assertRaises(ValueError) as e: + array.view("i4") + + msg = str(e.exception) + self.assertIn('To change to a dtype of a different size,', msg) + + contiguous_pre_np123 = 'the array must be C-contiguous' in msg + contiguous_post_np123 = 'the last axis must be contiguous' in msg + self.assertTrue(contiguous_pre_np123 or contiguous_post_np123, + 'Expected message to mention contiguity') + + def test_devicearray_view_bad_itemsize(self): + original = np.array(np.arange(12), dtype="i2").reshape(4, 3) + array = cuda.to_device(original) + with self.assertRaises(ValueError) as e: + array.view("i4") + self.assertEqual( + "When changing to a larger dtype," + " its size must be a divisor of the total size in bytes" + " of the last axis of the array.", + str(e.exception)) + + def test_devicearray_transpose_ok(self): + original = np.array(np.arange(12)).reshape(3, 4) + array = np.transpose(cuda.to_device(original)).copy_to_host() + self.assertTrue(np.all(array == original.T)) + + def test_devicearray_transpose_T(self): + original = np.array(np.arange(12)).reshape(3, 4) + array = cuda.to_device(original).T.copy_to_host() + self.assertTrue(np.all(array == original.T)) + + def test_devicearray_contiguous_slice(self): + # memcpys are dumb ranges of bytes, so trying to + # copy to a non-contiguous range shouldn't work! + a = np.arange(25).reshape(5, 5, order='F') + s = np.full(fill_value=5, shape=(5,)) + + d = cuda.to_device(a) + a[2] = s + + # d is in F-order (not C-order), so d[2] is not contiguous + # (40-byte strides). This means we can't memcpy to it! + with self.assertRaises(ValueError) as e: + d[2].copy_to_device(s) + self.assertEqual( + devicearray.errmsg_contiguous_buffer, + str(e.exception)) + + # if d[2].copy_to_device(s), then this would pass: + # self.assertTrue((a == d.copy_to_host()).all()) + + def _test_devicearray_contiguous_host_copy(self, a_c, a_f): + """ + Checks host->device memcpys + """ + self.assertTrue(a_c.flags.c_contiguous) + self.assertTrue(a_f.flags.f_contiguous) + + for original, copy in [ + (a_f, a_f), + (a_f, a_c), + (a_c, a_f), + (a_c, a_c), + ]: + msg = '%s => %s' % ( + 'C' if original.flags.c_contiguous else 'F', + 'C' if copy.flags.c_contiguous else 'F', + ) + + d = cuda.to_device(original) + d.copy_to_device(copy) + self.assertTrue(np.all(d.copy_to_host() == a_c), msg=msg) + self.assertTrue(np.all(d.copy_to_host() == a_f), msg=msg) + + def test_devicearray_contiguous_copy_host_3d(self): + a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5) + a_f = np.array(a_c, order='F') + self._test_devicearray_contiguous_host_copy(a_c, a_f) + + def test_devicearray_contiguous_copy_host_1d(self): + a_c = np.arange(5) + a_f = np.array(a_c, order='F') + self._test_devicearray_contiguous_host_copy(a_c, a_f) + + def test_devicearray_contiguous_copy_device(self): + a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5) + a_f = np.array(a_c, order='F') + self.assertTrue(a_c.flags.c_contiguous) + self.assertTrue(a_f.flags.f_contiguous) + + d = cuda.to_device(a_c) + + with self.assertRaises(ValueError) as e: + d.copy_to_device(cuda.to_device(a_f)) + self.assertEqual( + "incompatible strides: {} vs. {}".format(a_c.strides, a_f.strides), + str(e.exception)) + + d.copy_to_device(cuda.to_device(a_c)) + self.assertTrue(np.all(d.copy_to_host() == a_c)) + + d = cuda.to_device(a_f) + + with self.assertRaises(ValueError) as e: + d.copy_to_device(cuda.to_device(a_c)) + self.assertEqual( + "incompatible strides: {} vs. {}".format(a_f.strides, a_c.strides), + str(e.exception)) + + d.copy_to_device(cuda.to_device(a_f)) + self.assertTrue(np.all(d.copy_to_host() == a_f)) + + def test_devicearray_broadcast_host_copy(self): + broadsize = 4 + coreshape = (2, 3) + coresize = np.prod(coreshape) + core_c = np.arange(coresize).reshape(coreshape, order='C') + core_f = np.arange(coresize).reshape(coreshape, order='F') + for dim in range(len(coreshape)): + newindex = (slice(None),) * dim + (np.newaxis,) + broadshape = coreshape[:dim] + (broadsize,) + coreshape[dim:] + broad_c = np.broadcast_to(core_c[newindex], broadshape) + broad_f = np.broadcast_to(core_f[newindex], broadshape) + dbroad_c = cuda.to_device(broad_c) + dbroad_f = cuda.to_device(broad_f) + np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_c) + np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_f) + # Also test copying across different core orderings + dbroad_c.copy_to_device(broad_f) + dbroad_f.copy_to_device(broad_c) + np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_f) + np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_c) + + def test_devicearray_contiguous_host_strided(self): + a_c = np.arange(10) + d = cuda.to_device(a_c) + arr = np.arange(20)[::2] + d.copy_to_device(arr) + np.testing.assert_array_equal(d.copy_to_host(), arr) + + def test_devicearray_contiguous_device_strided(self): + d = cuda.to_device(np.arange(20)) + arr = np.arange(20) + + with self.assertRaises(ValueError) as e: + d.copy_to_device(cuda.to_device(arr)[::2]) + self.assertEqual( + devicearray.errmsg_contiguous_buffer, + str(e.exception)) + + @skip_on_cudasim('DeviceNDArray class not present in simulator') + def test_devicearray_relaxed_strides(self): + # From the reproducer in Issue #6824. + + # Construct a device array that is contiguous even though + # the strides for the first axis (800) are not equal to + # the strides * size (10 * 8 = 80) for the previous axis, + # because the first axis size is 1. + arr = devicearray.DeviceNDArray((1, 10), (800, 8), np.float64) + + # Ensure we still believe the array to be contiguous because + # strides checking is relaxed. + self.assertTrue(arr.flags['C_CONTIGUOUS']) + self.assertTrue(arr.flags['F_CONTIGUOUS']) + + def test_c_f_contiguity_matches_numpy(self): + # From the reproducer in Issue #4943. + + shapes = ((1, 4), (4, 1)) + orders = ('C', 'F') + + for shape, order in itertools.product(shapes, orders): + arr = np.ndarray(shape, order=order) + d_arr = cuda.to_device(arr) + self.assertEqual(arr.flags['C_CONTIGUOUS'], + d_arr.flags['C_CONTIGUOUS']) + self.assertEqual(arr.flags['F_CONTIGUOUS'], + d_arr.flags['F_CONTIGUOUS']) + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_simple_c(self): + # C-order 1D array + a = np.zeros(10, order='C') + d = cuda.to_device(a) + self.assertEqual(d._numba_type_.layout, 'C') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_simple_f(self): + # F-order array that is also C layout. + a = np.zeros(10, order='F') + d = cuda.to_device(a) + self.assertEqual(d._numba_type_.layout, 'C') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_2d_c(self): + # C-order 2D array + a = np.zeros((2, 10), order='C') + d = cuda.to_device(a) + self.assertEqual(d._numba_type_.layout, 'C') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_2d_f(self): + # F-order array that can only be F layout + a = np.zeros((2, 10), order='F') + d = cuda.to_device(a) + self.assertEqual(d._numba_type_.layout, 'F') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_noncontig_slice_c(self): + # Non-contiguous slice of C-order array + a = np.zeros((5, 5), order='C') + d = cuda.to_device(a)[:,2] + self.assertEqual(d._numba_type_.layout, 'A') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_noncontig_slice_f(self): + # Non-contiguous slice of F-order array + a = np.zeros((5, 5), order='F') + d = cuda.to_device(a)[2,:] + self.assertEqual(d._numba_type_.layout, 'A') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_contig_slice_c(self): + # Contiguous slice of C-order array + a = np.zeros((5, 5), order='C') + d = cuda.to_device(a)[2,:] + self.assertEqual(d._numba_type_.layout, 'C') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_contig_slice_f(self): + # Contiguous slice of F-order array - is both C- and F-contiguous, so + # types as 'C' layout + a = np.zeros((5, 5), order='F') + d = cuda.to_device(a)[:,2] + self.assertEqual(d._numba_type_.layout, 'C') + + @skip_on_cudasim('Typing not done in the simulator') + def test_devicearray_typing_order_broadcasted(self): + # Broadcasted array, similar to that used for passing scalars to ufuncs + a = np.broadcast_to(np.array([1]), (10,)) + d = cuda.to_device(a) + self.assertEqual(d._numba_type_.layout, 'A') + + def test_bug6697(self): + ary = np.arange(10, dtype=np.int16) + dary = cuda.to_device(ary) + got = np.asarray(dary) + self.assertEqual(got.dtype, dary.dtype) + + @skip_on_cudasim('DeviceNDArray class not present in simulator') + def test_issue_8477(self): + # Ensure that we can copy a zero-length device array to a zero-length + # host array when the strides of the device and host arrays differ - + # this should be possible because the strides are irrelevant when the + # length is zero. For more info see + # https://github.com/numba/numba/issues/8477. + + # Create a device array with shape (0,) and strides (8,) + dev_array = devicearray.DeviceNDArray(shape=(0,), strides=(8,), + dtype=np.int8) + + # Create a host array with shape (0,) and strides (0,) + host_array = np.ndarray(shape=(0,), strides=(0,), dtype=np.int8) + + # Sanity check for this test - ensure our destination has the strides + # we expect, because strides can be ignored in some cases by the + # ndarray constructor - checking here ensures that we haven't failed to + # account for unexpected behaviour across different versions of NumPy + self.assertEqual(host_array.strides, (0,)) + + # Ensure that the copy succeeds in both directions + dev_array.copy_to_host(host_array) + dev_array.copy_to_device(host_array) + + # Ensure that a device-to-device copy also succeeds when the strides + # differ - one way of doing this is to copy the host array across and + # use that for copies in both directions. + dev_array_from_host = cuda.to_device(host_array) + self.assertEqual(dev_array_from_host.shape, (0,)) + self.assertEqual(dev_array_from_host.strides, (0,)) + + dev_array.copy_to_device(dev_array_from_host) + dev_array_from_host.copy_to_device(dev_array) + + +class TestRecarray(CUDATestCase): + def test_recarray(self): + # From issue #4111 + a = np.recarray((16,), dtype=[ + ("value1", np.int64), + ("value2", np.float64), + ]) + a.value1 = np.arange(a.size, dtype=np.int64) + a.value2 = np.arange(a.size, dtype=np.float64) / 100 + + expect1 = a.value1 + expect2 = a.value2 + + def test(x, out1, out2): + i = cuda.grid(1) + if i < x.size: + out1[i] = x.value1[i] + out2[i] = x.value2[i] + + got1 = np.zeros_like(expect1) + got2 = np.zeros_like(expect2) + cuda.jit(test)[1, a.size](a, got1, got2) + + np.testing.assert_array_equal(expect1, got1) + np.testing.assert_array_equal(expect2, got2) + + +class TestCoreContiguous(CUDATestCase): + def _test_against_array_core(self, view): + self.assertEqual( + devicearray.is_contiguous(view), + devicearray.array_core(view).flags['C_CONTIGUOUS'] + ) + + def test_device_array_like_1d(self): + d_a = cuda.device_array(10, order='C') + self._test_against_array_core(d_a) + + def test_device_array_like_2d(self): + d_a = cuda.device_array((10, 12), order='C') + self._test_against_array_core(d_a) + + def test_device_array_like_2d_transpose(self): + d_a = cuda.device_array((10, 12), order='C') + self._test_against_array_core(d_a.T) + + def test_device_array_like_3d(self): + d_a = cuda.device_array((10, 12, 14), order='C') + self._test_against_array_core(d_a) + + def test_device_array_like_1d_f(self): + d_a = cuda.device_array(10, order='F') + self._test_against_array_core(d_a) + + def test_device_array_like_2d_f(self): + d_a = cuda.device_array((10, 12), order='F') + self._test_against_array_core(d_a) + + def test_device_array_like_2d_f_transpose(self): + d_a = cuda.device_array((10, 12), order='F') + self._test_against_array_core(d_a.T) + + def test_device_array_like_3d_f(self): + d_a = cuda.device_array((10, 12, 14), order='F') + self._test_against_array_core(d_a) + + def test_1d_view(self): + shape = 10 + view = np.zeros(shape)[::2] + self._test_against_array_core(view) + + def test_1d_view_f(self): + shape = 10 + view = np.zeros(shape, order='F')[::2] + self._test_against_array_core(view) + + def test_2d_view(self): + shape = (10, 12) + view = np.zeros(shape)[::2, ::2] + self._test_against_array_core(view) + + def test_2d_view_f(self): + shape = (10, 12) + view = np.zeros(shape, order='F')[::2, ::2] + self._test_against_array_core(view) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_deallocations.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_deallocations.py new file mode 100644 index 0000000000000000000000000000000000000000..66fbbc372e9a1347dded4cff3e699175b5d9c80d --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_deallocations.py @@ -0,0 +1,249 @@ +from contextlib import contextmanager + +import numpy as np + +from numba import cuda +from numba.cuda.testing import (unittest, skip_on_cudasim, + skip_if_external_memmgr, CUDATestCase) +from numba.tests.support import captured_stderr +from numba.core import config + + +@skip_on_cudasim('not supported on CUDASIM') +@skip_if_external_memmgr('Deallocation specific to Numba memory management') +class TestDeallocation(CUDATestCase): + def test_max_pending_count(self): + # get deallocation manager and flush it + deallocs = cuda.current_context().memory_manager.deallocations + deallocs.clear() + self.assertEqual(len(deallocs), 0) + # deallocate to maximum count + for i in range(config.CUDA_DEALLOCS_COUNT): + cuda.to_device(np.arange(1)) + self.assertEqual(len(deallocs), i + 1) + # one more to trigger .clear() + cuda.to_device(np.arange(1)) + self.assertEqual(len(deallocs), 0) + + def test_max_pending_bytes(self): + # get deallocation manager and flush it + ctx = cuda.current_context() + deallocs = ctx.memory_manager.deallocations + deallocs.clear() + self.assertEqual(len(deallocs), 0) + + mi = ctx.get_memory_info() + + max_pending = 10**6 # 1MB + old_ratio = config.CUDA_DEALLOCS_RATIO + try: + # change to a smaller ratio + config.CUDA_DEALLOCS_RATIO = max_pending / mi.total + # due to round off error (floor is used in calculating + # _max_pending_bytes) it can be off by 1. + self.assertAlmostEqual(deallocs._max_pending_bytes, max_pending, + delta=1) + + # allocate half the max size + # this will not trigger deallocation + cuda.to_device(np.ones(max_pending // 2, dtype=np.int8)) + self.assertEqual(len(deallocs), 1) + + # allocate another remaining + # this will not trigger deallocation + cuda.to_device(np.ones(deallocs._max_pending_bytes - + deallocs._size, dtype=np.int8)) + self.assertEqual(len(deallocs), 2) + + # another byte to trigger .clear() + cuda.to_device(np.ones(1, dtype=np.int8)) + self.assertEqual(len(deallocs), 0) + finally: + # restore old ratio + config.CUDA_DEALLOCS_RATIO = old_ratio + + +@skip_on_cudasim("defer_cleanup has no effect in CUDASIM") +@skip_if_external_memmgr('Deallocation specific to Numba memory management') +class TestDeferCleanup(CUDATestCase): + def test_basic(self): + harr = np.arange(5) + darr1 = cuda.to_device(harr) + deallocs = cuda.current_context().memory_manager.deallocations + deallocs.clear() + self.assertEqual(len(deallocs), 0) + with cuda.defer_cleanup(): + darr2 = cuda.to_device(harr) + del darr1 + self.assertEqual(len(deallocs), 1) + del darr2 + self.assertEqual(len(deallocs), 2) + deallocs.clear() + self.assertEqual(len(deallocs), 2) + + deallocs.clear() + self.assertEqual(len(deallocs), 0) + + def test_nested(self): + harr = np.arange(5) + darr1 = cuda.to_device(harr) + deallocs = cuda.current_context().memory_manager.deallocations + deallocs.clear() + self.assertEqual(len(deallocs), 0) + with cuda.defer_cleanup(): + with cuda.defer_cleanup(): + darr2 = cuda.to_device(harr) + del darr1 + self.assertEqual(len(deallocs), 1) + del darr2 + self.assertEqual(len(deallocs), 2) + deallocs.clear() + self.assertEqual(len(deallocs), 2) + deallocs.clear() + self.assertEqual(len(deallocs), 2) + + deallocs.clear() + self.assertEqual(len(deallocs), 0) + + def test_exception(self): + harr = np.arange(5) + darr1 = cuda.to_device(harr) + deallocs = cuda.current_context().memory_manager.deallocations + deallocs.clear() + self.assertEqual(len(deallocs), 0) + + class CustomError(Exception): + pass + + with self.assertRaises(CustomError): + with cuda.defer_cleanup(): + darr2 = cuda.to_device(harr) + del darr2 + self.assertEqual(len(deallocs), 1) + deallocs.clear() + self.assertEqual(len(deallocs), 1) + raise CustomError + deallocs.clear() + self.assertEqual(len(deallocs), 0) + del darr1 + self.assertEqual(len(deallocs), 1) + deallocs.clear() + self.assertEqual(len(deallocs), 0) + + +class TestDeferCleanupAvail(CUDATestCase): + def test_context_manager(self): + # just make sure the API is available + with cuda.defer_cleanup(): + pass + + +@skip_on_cudasim('not supported on CUDASIM') +class TestDel(CUDATestCase): + """ + Ensure resources are deleted properly without ignored exception. + """ + @contextmanager + def check_ignored_exception(self, ctx): + with captured_stderr() as cap: + yield + ctx.deallocations.clear() + self.assertFalse(cap.getvalue()) + + def test_stream(self): + ctx = cuda.current_context() + stream = ctx.create_stream() + with self.check_ignored_exception(ctx): + del stream + + def test_event(self): + ctx = cuda.current_context() + event = ctx.create_event() + with self.check_ignored_exception(ctx): + del event + + def test_pinned_memory(self): + ctx = cuda.current_context() + mem = ctx.memhostalloc(32) + with self.check_ignored_exception(ctx): + del mem + + def test_mapped_memory(self): + ctx = cuda.current_context() + mem = ctx.memhostalloc(32, mapped=True) + with self.check_ignored_exception(ctx): + del mem + + def test_device_memory(self): + ctx = cuda.current_context() + mem = ctx.memalloc(32) + with self.check_ignored_exception(ctx): + del mem + + def test_managed_memory(self): + ctx = cuda.current_context() + mem = ctx.memallocmanaged(32) + with self.check_ignored_exception(ctx): + del mem + + def test_pinned_contextmanager(self): + # Check that temporarily pinned memory is unregistered immediately, + # such that it can be re-pinned at any time + class PinnedException(Exception): + pass + + arr = np.zeros(1) + ctx = cuda.current_context() + ctx.deallocations.clear() + with self.check_ignored_exception(ctx): + with cuda.pinned(arr): + pass + with cuda.pinned(arr): + pass + # Should also work inside a `defer_cleanup` block + with cuda.defer_cleanup(): + with cuda.pinned(arr): + pass + with cuda.pinned(arr): + pass + # Should also work when breaking out of the block due to an + # exception + try: + with cuda.pinned(arr): + raise PinnedException + except PinnedException: + with cuda.pinned(arr): + pass + + def test_mapped_contextmanager(self): + # Check that temporarily mapped memory is unregistered immediately, + # such that it can be re-mapped at any time + class MappedException(Exception): + pass + + arr = np.zeros(1) + ctx = cuda.current_context() + ctx.deallocations.clear() + with self.check_ignored_exception(ctx): + with cuda.mapped(arr): + pass + with cuda.mapped(arr): + pass + # Should also work inside a `defer_cleanup` block + with cuda.defer_cleanup(): + with cuda.mapped(arr): + pass + with cuda.mapped(arr): + pass + # Should also work when breaking out of the block due to an + # exception + try: + with cuda.mapped(arr): + raise MappedException + except MappedException: + with cuda.mapped(arr): + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_detect.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_detect.py new file mode 100644 index 0000000000000000000000000000000000000000..528e11bf848893026a9b7885c2a959190d455222 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_detect.py @@ -0,0 +1,81 @@ +import os +import sys +import subprocess +import threading +from numba import cuda +from numba.cuda.testing import (unittest, CUDATestCase, skip_on_cudasim, + skip_under_cuda_memcheck) +from numba.tests.support import captured_stdout + + +class TestCudaDetect(CUDATestCase): + def test_cuda_detect(self): + # exercise the code path + with captured_stdout() as out: + cuda.detect() + output = out.getvalue() + self.assertIn('Found', output) + self.assertIn('CUDA devices', output) + + +@skip_under_cuda_memcheck('Hangs cuda-memcheck') +class TestCUDAFindLibs(CUDATestCase): + + def run_cmd(self, cmdline, env): + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + + # finish in 5 minutes or kill it + timeout = threading.Timer(5 * 60., popen.kill) + try: + timeout.start() + out, err = popen.communicate() + # the process should exit with an error + return out.decode(), err.decode() + finally: + timeout.cancel() + return None, None + + def run_test_in_separate_process(self, envvar, envvar_value): + env_copy = os.environ.copy() + env_copy[envvar] = str(envvar_value) + code = """if 1: + from numba import cuda + @cuda.jit('(int64,)') + def kernel(x): + pass + kernel(1,) + """ + cmdline = [sys.executable, "-c", code] + return self.run_cmd(cmdline, env_copy) + + @skip_on_cudasim('Simulator does not hit device library search code path') + @unittest.skipIf(not sys.platform.startswith('linux'), "linux only") + def test_cuda_find_lib_errors(self): + """ + This tests that the find_libs works as expected in the case of an + environment variable being used to set the path. + """ + # one of these is likely to exist on linux, it's also unlikely that + # someone has extracted the contents of libdevice into here! + locs = ['lib', 'lib64'] + + looking_for = None + for l in locs: + looking_for = os.path.join(os.path.sep, l) + if os.path.exists(looking_for): + break + + # This is the testing part, the test will only run if there's a valid + # path in which to look + if looking_for is not None: + out, err = self.run_test_in_separate_process("NUMBA_CUDA_DRIVER", + looking_for) + self.assertTrue(out is not None) + self.assertTrue(err is not None) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..209355ed69935920c4dbbe1fdc7ad84b3f9c1a11 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py @@ -0,0 +1,192 @@ +import ctypes +import numpy as np +import weakref + +from numba import cuda +from numba.core import config +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim +from numba.tests.support import linux_only + +if not config.ENABLE_CUDASIM: + class DeviceOnlyEMMPlugin(cuda.HostOnlyCUDAMemoryManager): + """ + Dummy EMM Plugin implementation for testing. It memorises which plugin + API methods have been called so that the tests can check that Numba + called into the plugin as expected. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # For tracking our dummy allocations + self.allocations = {} + self.count = 0 + + # For tracking which methods have been called + self.initialized = False + self.memalloc_called = False + self.reset_called = False + self.get_memory_info_called = False + self.get_ipc_handle_called = False + + def memalloc(self, size): + # We maintain a list of allocations and keep track of them, so that + # we can test that the finalizers of objects returned by memalloc + # get called. + + # Numba should have initialized the memory manager when preparing + # the context for use, prior to any memalloc call. + if not self.initialized: + raise RuntimeError("memalloc called before initialize") + self.memalloc_called = True + + # Create an allocation and record it + self.count += 1 + alloc_count = self.count + self.allocations[alloc_count] = size + + # The finalizer deletes the record from our internal dict of + # allocations. + finalizer_allocs = self.allocations + + def finalizer(): + del finalizer_allocs[alloc_count] + + # We use an AutoFreePointer so that the finalizer will be run when + # the reference count drops to zero. + ctx = weakref.proxy(self.context) + ptr = ctypes.c_void_p(alloc_count) + return cuda.cudadrv.driver.AutoFreePointer(ctx, ptr, size, + finalizer=finalizer) + + def initialize(self): + # No special initialization needed. + self.initialized = True + + def reset(self): + # We remove all allocations on reset, just as a real EMM Plugin + # would do. Note that our finalizers in memalloc don't check + # whether the allocations are still alive, so running them after + # reset will detect any allocations that are floating around at + # exit time; however, the atexit finalizer for weakref will only + # print a traceback, not terminate the interpreter abnormally. + self.reset_called = True + + def get_memory_info(self): + # Return some dummy memory information + self.get_memory_info_called = True + return cuda.MemoryInfo(free=32, total=64) + + def get_ipc_handle(self, memory): + # The dummy IPC handle is only a string, so it is important that + # the tests don't try to do too much with it (e.g. open / close + # it). + self.get_ipc_handle_called = True + return "Dummy IPC handle for alloc %s" % memory.device_pointer.value + + @property + def interface_version(self): + # The expected version for an EMM Plugin. + return 1 + + class BadVersionEMMPlugin(DeviceOnlyEMMPlugin): + """A plugin that claims to implement a different interface version""" + + @property + def interface_version(self): + return 2 + + +@skip_on_cudasim('EMM Plugins not supported on CUDA simulator') +class TestDeviceOnlyEMMPlugin(CUDATestCase): + """ + Tests that the API of an EMM Plugin that implements device allocations + only is used correctly by Numba. + """ + + def setUp(self): + super().setUp() + # Always start afresh with a new context and memory manager + cuda.close() + cuda.set_memory_manager(DeviceOnlyEMMPlugin) + + def tearDown(self): + super().tearDown() + # Unset the memory manager for subsequent tests + cuda.close() + cuda.cudadrv.driver._memory_manager = None + + def test_memalloc(self): + mgr = cuda.current_context().memory_manager + + # Allocate an array and check that memalloc was called with the correct + # size. + arr_1 = np.arange(10) + d_arr_1 = cuda.device_array_like(arr_1) + self.assertTrue(mgr.memalloc_called) + self.assertEqual(mgr.count, 1) + self.assertEqual(mgr.allocations[1], arr_1.nbytes) + + # Allocate again, with a different size, and check that it is also + # correct. + arr_2 = np.arange(5) + d_arr_2 = cuda.device_array_like(arr_2) + self.assertEqual(mgr.count, 2) + self.assertEqual(mgr.allocations[2], arr_2.nbytes) + + # Remove the first array, and check that our finalizer was called for + # the first array only. + del d_arr_1 + self.assertNotIn(1, mgr.allocations) + self.assertIn(2, mgr.allocations) + + # Remove the second array and check that its finalizer was also + # called. + del d_arr_2 + self.assertNotIn(2, mgr.allocations) + + def test_initialized_in_context(self): + # If we have a CUDA context, it should already have initialized its + # memory manager. + self.assertTrue(cuda.current_context().memory_manager.initialized) + + def test_reset(self): + ctx = cuda.current_context() + ctx.reset() + self.assertTrue(ctx.memory_manager.reset_called) + + def test_get_memory_info(self): + ctx = cuda.current_context() + meminfo = ctx.get_memory_info() + self.assertTrue(ctx.memory_manager.get_memory_info_called) + self.assertEqual(meminfo.free, 32) + self.assertEqual(meminfo.total, 64) + + @linux_only + def test_get_ipc_handle(self): + # We don't attempt to close the IPC handle in this test because Numba + # will be expecting a real IpcHandle object to have been returned from + # get_ipc_handle, and it would cause problems to do so. + arr = np.arange(2) + d_arr = cuda.device_array_like(arr) + ipch = d_arr.get_ipc_handle() + ctx = cuda.current_context() + self.assertTrue(ctx.memory_manager.get_ipc_handle_called) + self.assertIn("Dummy IPC handle for alloc 1", ipch._ipc_handle) + + +@skip_on_cudasim('EMM Plugins not supported on CUDA simulator') +class TestBadEMMPluginVersion(CUDATestCase): + """ + Ensure that Numba rejects EMM Plugins with incompatible version + numbers. + """ + + def test_bad_plugin_version(self): + with self.assertRaises(RuntimeError) as raises: + cuda.set_memory_manager(BadVersionEMMPlugin) + self.assertIn('version 1 required', str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_events.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_events.py new file mode 100644 index 0000000000000000000000000000000000000000..b611a4a75fddfec427cd61f5d4db515066690548 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_events.py @@ -0,0 +1,38 @@ +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase + + +class TestCudaEvent(CUDATestCase): + def test_event_elapsed(self): + N = 32 + dary = cuda.device_array(N, dtype=np.double) + evtstart = cuda.event() + evtend = cuda.event() + + evtstart.record() + cuda.to_device(np.arange(N, dtype=np.double), to=dary) + evtend.record() + evtend.wait() + evtend.synchronize() + # Exercise the code path + evtstart.elapsed_time(evtend) + + def test_event_elapsed_stream(self): + N = 32 + stream = cuda.stream() + dary = cuda.device_array(N, dtype=np.double) + evtstart = cuda.event() + evtend = cuda.event() + + evtstart.record(stream=stream) + cuda.to_device(np.arange(N, dtype=np.double), to=dary, stream=stream) + evtend.record(stream=stream) + evtend.wait(stream=stream) + evtend.synchronize() + # Exercise the code path + evtstart.elapsed_time(evtend) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_host_alloc.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_host_alloc.py new file mode 100644 index 0000000000000000000000000000000000000000..62c4ecafe6c04a85ff52c022f1417bfe5fef9c68 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_host_alloc.py @@ -0,0 +1,65 @@ +import numpy as np +from numba.cuda.cudadrv import driver +from numba import cuda +from numba.cuda.testing import unittest, ContextResettingTestCase + + +class TestHostAlloc(ContextResettingTestCase): + def test_host_alloc_driver(self): + n = 32 + mem = cuda.current_context().memhostalloc(n, mapped=True) + + dtype = np.dtype(np.uint8) + ary = np.ndarray(shape=n // dtype.itemsize, dtype=dtype, + buffer=mem) + + magic = 0xab + driver.device_memset(mem, magic, n) + + self.assertTrue(np.all(ary == magic)) + + ary.fill(n) + + recv = np.empty_like(ary) + + driver.device_to_host(recv, mem, ary.size) + + self.assertTrue(np.all(ary == recv)) + self.assertTrue(np.all(recv == n)) + + def test_host_alloc_pinned(self): + ary = cuda.pinned_array(10, dtype=np.uint32) + ary.fill(123) + self.assertTrue(all(ary == 123)) + devary = cuda.to_device(ary) + driver.device_memset(devary, 0, driver.device_memory_size(devary)) + self.assertTrue(all(ary == 123)) + devary.copy_to_host(ary) + self.assertTrue(all(ary == 0)) + + def test_host_alloc_mapped(self): + ary = cuda.mapped_array(10, dtype=np.uint32) + ary.fill(123) + self.assertTrue(all(ary == 123)) + driver.device_memset(ary, 0, driver.device_memory_size(ary)) + self.assertTrue(all(ary == 0)) + self.assertTrue(sum(ary != 0) == 0) + + def test_host_operators(self): + for ary in [cuda.mapped_array(10, dtype=np.uint32), + cuda.pinned_array(10, dtype=np.uint32)]: + ary[:] = range(10) + self.assertTrue(sum(ary + 1) == 55) + self.assertTrue(sum((ary + 1) * 2 - 1) == 100) + self.assertTrue(sum(ary < 5) == 5) + self.assertTrue(sum(ary <= 5) == 6) + self.assertTrue(sum(ary > 6) == 3) + self.assertTrue(sum(ary >= 6) == 4) + self.assertTrue(sum(ary ** 2) == 285) + self.assertTrue(sum(ary // 2) == 20) + self.assertTrue(sum(ary / 2.0) == 22.5) + self.assertTrue(sum(ary % 2) == 5) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_init.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_init.py new file mode 100644 index 0000000000000000000000000000000000000000..600687fd52aa515b5adca075f1510726d914d9b4 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_init.py @@ -0,0 +1,139 @@ +import multiprocessing as mp +import os + +from numba import cuda +from numba.cuda.cudadrv.driver import CudaAPIError, driver +from numba.cuda.cudadrv.error import CudaSupportError +from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase + + +# A mock of cuInit that always raises a CudaAPIError +def cuInit_raising(arg): + raise CudaAPIError(999, 'CUDA_ERROR_UNKNOWN') + + +# Test code to run in a child that patches driver.cuInit to a variant that +# always raises. We can't use mock.patch.object here because driver.cuInit is +# not assigned until we attempt to initialize - mock.patch.object cannot locate +# the non-existent original method, and so fails. Instead we patch +# driver.cuInit with our raising version prior to any attempt to initialize. +def cuInit_raising_test(result_queue): + driver.cuInit = cuInit_raising + + success = False + msg = None + + try: + # A CUDA operation that forces initialization of the device + cuda.device_array(1) + except CudaSupportError as e: + success = True + msg = e.msg + + result_queue.put((success, msg)) + + +# Similar to cuInit_raising_test above, but for testing that the string +# returned by cuda_error() is as expected. +def initialization_error_test(result_queue): + driver.cuInit = cuInit_raising + + success = False + msg = None + + try: + # A CUDA operation that forces initialization of the device + cuda.device_array(1) + except CudaSupportError: + success = True + + msg = cuda.cuda_error() + result_queue.put((success, msg)) + + +# For testing the path where Driver.__init__() catches a CudaSupportError +def cuda_disabled_test(result_queue): + success = False + msg = None + + try: + # A CUDA operation that forces initialization of the device + cuda.device_array(1) + except CudaSupportError as e: + success = True + msg = e.msg + + result_queue.put((success, msg)) + + +# Similar to cuda_disabled_test, but checks cuda.cuda_error() instead of the +# exception raised on initialization +def cuda_disabled_error_test(result_queue): + success = False + msg = None + + try: + # A CUDA operation that forces initialization of the device + cuda.device_array(1) + except CudaSupportError: + success = True + + msg = cuda.cuda_error() + result_queue.put((success, msg)) + + +@skip_on_cudasim('CUDA Simulator does not initialize driver') +class TestInit(CUDATestCase): + def _test_init_failure(self, target, expected): + # Run the initialization failure test in a separate subprocess + ctx = mp.get_context('spawn') + result_queue = ctx.Queue() + proc = ctx.Process(target=target, args=(result_queue,)) + proc.start() + proc.join(30) # should complete within 30s + success, msg = result_queue.get() + + # Ensure the child process raised an exception during initialization + # before checking the message + if not success: + self.fail('CudaSupportError not raised') + + self.assertIn(expected, msg) + + def test_init_failure_raising(self): + expected = 'Error at driver init: CUDA_ERROR_UNKNOWN (999)' + self._test_init_failure(cuInit_raising_test, expected) + + def test_init_failure_error(self): + expected = 'CUDA_ERROR_UNKNOWN (999)' + self._test_init_failure(initialization_error_test, expected) + + def _test_cuda_disabled(self, target): + # Uses _test_init_failure to launch the test in a separate subprocess + # with CUDA disabled. + cuda_disabled = os.environ.get('NUMBA_DISABLE_CUDA') + os.environ['NUMBA_DISABLE_CUDA'] = "1" + try: + expected = 'CUDA is disabled due to setting NUMBA_DISABLE_CUDA=1' + self._test_init_failure(cuda_disabled_test, expected) + finally: + if cuda_disabled is not None: + os.environ['NUMBA_DISABLE_CUDA'] = cuda_disabled + else: + os.environ.pop('NUMBA_DISABLE_CUDA') + + def test_cuda_disabled_raising(self): + self._test_cuda_disabled(cuda_disabled_test) + + def test_cuda_disabled_error(self): + self._test_cuda_disabled(cuda_disabled_error_test) + + def test_init_success(self): + # Here we assume that initialization is successful (because many bad + # things will happen with the test suite if it is not) and check that + # there is no error recorded. + self.assertIsNone(cuda.cuda_error()) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_inline_ptx.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_inline_ptx.py new file mode 100644 index 0000000000000000000000000000000000000000..40a6fa599e2bc7f7813a9d2c3f2ce6d89397bb39 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_inline_ptx.py @@ -0,0 +1,37 @@ +from llvmlite import ir + +from numba.cuda.cudadrv import nvvm +from numba.cuda.testing import unittest, ContextResettingTestCase +from numba.cuda.testing import skip_on_cudasim + + +@skip_on_cudasim('Inline PTX cannot be used in the simulator') +class TestCudaInlineAsm(ContextResettingTestCase): + def test_inline_rsqrt(self): + mod = ir.Module(__name__) + mod.triple = 'nvptx64-nvidia-cuda' + nvvm.add_ir_version(mod) + fnty = ir.FunctionType(ir.VoidType(), [ir.PointerType(ir.FloatType())]) + fn = ir.Function(mod, fnty, 'cu_rsqrt') + bldr = ir.IRBuilder(fn.append_basic_block('entry')) + + rsqrt_approx_fnty = ir.FunctionType(ir.FloatType(), [ir.FloatType()]) + inlineasm = ir.InlineAsm(rsqrt_approx_fnty, + 'rsqrt.approx.f32 $0, $1;', + '=f,f', side_effect=True) + val = bldr.load(fn.args[0]) + res = bldr.call(inlineasm, [val]) + + bldr.store(res, fn.args[0]) + bldr.ret_void() + + # generate ptx + mod.data_layout = nvvm.NVVM().data_layout + nvvm.set_cuda_kernel(fn) + nvvmir = str(mod) + ptx = nvvm.compile_ir(nvvmir) + self.assertTrue('rsqrt.approx.f32' in str(ptx)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_is_fp16.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_is_fp16.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc73fa155f2ee73dd39ab363df56782656f8798 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_is_fp16.py @@ -0,0 +1,12 @@ +from numba import cuda +from numba.cuda.testing import CUDATestCase, skip_on_cudasim, skip_unless_cc_53 + + +class TestIsFP16Supported(CUDATestCase): + def test_is_fp16_supported(self): + self.assertTrue(cuda.is_float16_supported()) + + @skip_on_cudasim + @skip_unless_cc_53 + def test_device_supports_float16(self): + self.assertTrue(cuda.get_current_device().supports_float16) diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_linker.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_linker.py new file mode 100644 index 0000000000000000000000000000000000000000..22e2ee8375c8c742e0636b8aa4760674e3c8f01b --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_linker.py @@ -0,0 +1,317 @@ +import numpy as np +import warnings +from numba.cuda.testing import unittest +from numba.cuda.testing import (skip_on_cudasim, skip_if_cuda_includes_missing) +from numba.cuda.testing import CUDATestCase, test_data_dir +from numba.cuda.cudadrv.driver import (CudaAPIError, Linker, + LinkerError) +from numba.cuda.cudadrv.error import NvrtcError +from numba.cuda import require_context +from numba.tests.support import ignore_internal_warnings +from numba import cuda, void, float64, int64, int32, typeof, float32 + + +CONST1D = np.arange(10, dtype=np.float64) + + +def simple_const_mem(A): + C = cuda.const.array_like(CONST1D) + i = cuda.grid(1) + + A[i] = C[i] + 1.0 + + +def func_with_lots_of_registers(x, a, b, c, d, e, f): + a1 = 1.0 + a2 = 1.0 + a3 = 1.0 + a4 = 1.0 + a5 = 1.0 + b1 = 1.0 + b2 = 1.0 + b3 = 1.0 + b4 = 1.0 + b5 = 1.0 + c1 = 1.0 + c2 = 1.0 + c3 = 1.0 + c4 = 1.0 + c5 = 1.0 + d1 = 10 + d2 = 10 + d3 = 10 + d4 = 10 + d5 = 10 + for i in range(a): + a1 += b + a2 += c + a3 += d + a4 += e + a5 += f + b1 *= b + b2 *= c + b3 *= d + b4 *= e + b5 *= f + c1 /= b + c2 /= c + c3 /= d + c4 /= e + c5 /= f + d1 <<= b + d2 <<= c + d3 <<= d + d4 <<= e + d5 <<= f + x[cuda.grid(1)] = a1 + a2 + a3 + a4 + a5 + x[cuda.grid(1)] += b1 + b2 + b3 + b4 + b5 + x[cuda.grid(1)] += c1 + c2 + c3 + c4 + c5 + x[cuda.grid(1)] += d1 + d2 + d3 + d4 + d5 + + +def simple_smem(ary, dty): + sm = cuda.shared.array(100, dty) + i = cuda.grid(1) + if i == 0: + for j in range(100): + sm[j] = j + cuda.syncthreads() + ary[i] = sm[i] + + +def coop_smem2d(ary): + i, j = cuda.grid(2) + sm = cuda.shared.array((10, 20), float32) + sm[i, j] = (i + 1) / (j + 1) + cuda.syncthreads() + ary[i, j] = sm[i, j] + + +def simple_maxthreads(ary): + i = cuda.grid(1) + ary[i] = i + + +LMEM_SIZE = 1000 + + +def simple_lmem(A, B, dty): + C = cuda.local.array(LMEM_SIZE, dty) + for i in range(C.shape[0]): + C[i] = A[i] + for i in range(C.shape[0]): + B[i] = C[i] + + +@skip_on_cudasim('Linking unsupported in the simulator') +class TestLinker(CUDATestCase): + _NUMBA_NVIDIA_BINDING_0_ENV = {'NUMBA_CUDA_USE_NVIDIA_BINDING': '0'} + + @require_context + def test_linker_basic(self): + '''Simply go through the constructor and destructor + ''' + linker = Linker.new(cc=(5, 3)) + del linker + + def _test_linking(self, eager): + global bar # must be a global; other it is recognized as a freevar + bar = cuda.declare_device('bar', 'int32(int32)') + + link = str(test_data_dir / 'jitlink.ptx') + + if eager: + args = ['void(int32[:], int32[:])'] + else: + args = [] + + @cuda.jit(*args, link=[link]) + def foo(x, y): + i = cuda.grid(1) + x[i] += bar(y[i]) + + A = np.array([123], dtype=np.int32) + B = np.array([321], dtype=np.int32) + + foo[1, 1](A, B) + + self.assertTrue(A[0] == 123 + 2 * 321) + + def test_linking_lazy_compile(self): + self._test_linking(eager=False) + + def test_linking_eager_compile(self): + self._test_linking(eager=True) + + def test_linking_cu(self): + bar = cuda.declare_device('bar', 'int32(int32)') + + link = str(test_data_dir / 'jitlink.cu') + + @cuda.jit(link=[link]) + def kernel(r, x): + i = cuda.grid(1) + + if i < len(r): + r[i] = bar(x[i]) + + x = np.arange(10, dtype=np.int32) + r = np.zeros_like(x) + + kernel[1, 32](r, x) + + # Matches the operation of bar() in jitlink.cu + expected = x * 2 + np.testing.assert_array_equal(r, expected) + + def test_linking_cu_log_warning(self): + bar = cuda.declare_device('bar', 'int32(int32)') + + link = str(test_data_dir / 'warn.cu') + + with warnings.catch_warnings(record=True) as w: + ignore_internal_warnings() + + @cuda.jit('void(int32)', link=[link]) + def kernel(x): + bar(x) + + self.assertEqual(len(w), 1, 'Expected warnings from NVRTC') + # Check the warning refers to the log messages + self.assertIn('NVRTC log messages', str(w[0].message)) + # Check the message pertaining to the unused variable is provided + self.assertIn('declared but never referenced', str(w[0].message)) + + def test_linking_cu_error(self): + bar = cuda.declare_device('bar', 'int32(int32)') + + link = str(test_data_dir / 'error.cu') + + with self.assertRaises(NvrtcError) as e: + @cuda.jit('void(int32)', link=[link]) + def kernel(x): + bar(x) + + msg = e.exception.args[0] + # Check the error message refers to the NVRTC compile + self.assertIn('NVRTC Compilation failure', msg) + # Check the expected error in the CUDA source is reported + self.assertIn('identifier "SYNTAX" is undefined', msg) + # Check the filename is reported correctly + self.assertIn('in the compilation of "error.cu"', msg) + + def test_linking_unknown_filetype_error(self): + expected_err = "Don't know how to link file with extension .cuh" + with self.assertRaisesRegex(RuntimeError, expected_err): + @cuda.jit('void()', link=['header.cuh']) + def kernel(): + pass + + def test_linking_file_with_no_extension_error(self): + expected_err = "Don't know how to link file with no extension" + with self.assertRaisesRegex(RuntimeError, expected_err): + @cuda.jit('void()', link=['data']) + def kernel(): + pass + + @skip_if_cuda_includes_missing + def test_linking_cu_cuda_include(self): + link = str(test_data_dir / 'cuda_include.cu') + + # An exception will be raised when linking this kernel due to the + # compile failure if CUDA includes cannot be found by Nvrtc. + @cuda.jit('void()', link=[link]) + def kernel(): + pass + + def test_try_to_link_nonexistent(self): + with self.assertRaises(LinkerError) as e: + @cuda.jit('void(int32[::1])', link=['nonexistent.a']) + def f(x): + x[0] = 0 + self.assertIn('nonexistent.a not found', e.exception.args) + + def test_set_registers_no_max(self): + """Ensure that the jitted kernel used in the test_set_registers_* tests + uses more than 57 registers - this ensures that test_set_registers_* + are really checking that they reduced the number of registers used from + something greater than the maximum.""" + compiled = cuda.jit(func_with_lots_of_registers) + compiled = compiled.specialize(np.empty(32), *range(6)) + self.assertGreater(compiled.get_regs_per_thread(), 57) + + def test_set_registers_57(self): + compiled = cuda.jit(max_registers=57)(func_with_lots_of_registers) + compiled = compiled.specialize(np.empty(32), *range(6)) + self.assertLessEqual(compiled.get_regs_per_thread(), 57) + + def test_set_registers_38(self): + compiled = cuda.jit(max_registers=38)(func_with_lots_of_registers) + compiled = compiled.specialize(np.empty(32), *range(6)) + self.assertLessEqual(compiled.get_regs_per_thread(), 38) + + def test_set_registers_eager(self): + sig = void(float64[::1], int64, int64, int64, int64, int64, int64) + compiled = cuda.jit(sig, max_registers=38)(func_with_lots_of_registers) + self.assertLessEqual(compiled.get_regs_per_thread(), 38) + + def test_get_const_mem_size(self): + sig = void(float64[::1]) + compiled = cuda.jit(sig)(simple_const_mem) + const_mem_size = compiled.get_const_mem_size() + self.assertGreaterEqual(const_mem_size, CONST1D.nbytes) + + def test_get_no_shared_memory(self): + compiled = cuda.jit(func_with_lots_of_registers) + compiled = compiled.specialize(np.empty(32), *range(6)) + shared_mem_size = compiled.get_shared_mem_per_block() + self.assertEqual(shared_mem_size, 0) + + def test_get_shared_mem_per_block(self): + sig = void(int32[::1], typeof(np.int32)) + compiled = cuda.jit(sig)(simple_smem) + shared_mem_size = compiled.get_shared_mem_per_block() + self.assertEqual(shared_mem_size, 400) + + def test_get_shared_mem_per_specialized(self): + compiled = cuda.jit(simple_smem) + compiled_specialized = compiled.specialize( + np.zeros(100, dtype=np.int32), np.float64) + shared_mem_size = compiled_specialized.get_shared_mem_per_block() + self.assertEqual(shared_mem_size, 800) + + def test_get_max_threads_per_block(self): + compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d) + max_threads = compiled.get_max_threads_per_block() + self.assertGreater(max_threads, 0) + + def test_max_threads_exceeded(self): + compiled = cuda.jit("void(int32[::1])")(simple_maxthreads) + max_threads = compiled.get_max_threads_per_block() + nelem = max_threads + 1 + ary = np.empty(nelem, dtype=np.int32) + try: + compiled[1, nelem](ary) + except CudaAPIError as e: + self.assertIn("cuLaunchKernel", e.msg) + + def test_get_local_mem_per_thread(self): + sig = void(int32[::1], int32[::1], typeof(np.int32)) + compiled = cuda.jit(sig)(simple_lmem) + local_mem_size = compiled.get_local_mem_per_thread() + calc_size = np.dtype(np.int32).itemsize * LMEM_SIZE + self.assertGreaterEqual(local_mem_size, calc_size) + + def test_get_local_mem_per_specialized(self): + compiled = cuda.jit(simple_lmem) + compiled_specialized = compiled.specialize( + np.zeros(LMEM_SIZE, dtype=np.int32), + np.zeros(LMEM_SIZE, dtype=np.int32), + np.float64) + local_mem_size = compiled_specialized.get_local_mem_per_thread() + calc_size = np.dtype(np.float64).itemsize * LMEM_SIZE + self.assertGreaterEqual(local_mem_size, calc_size) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_managed_alloc.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_managed_alloc.py new file mode 100644 index 0000000000000000000000000000000000000000..e9cc37ca84ad31274b226fc8841cca2093703194 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_managed_alloc.py @@ -0,0 +1,127 @@ +import numpy as np +from ctypes import byref, c_size_t +from numba.cuda.cudadrv.driver import device_memset, driver, USE_NV_BINDING +from numba import cuda +from numba.cuda.testing import unittest, ContextResettingTestCase +from numba.cuda.testing import skip_on_cudasim, skip_on_arm +from numba.tests.support import linux_only + + +@skip_on_cudasim('CUDA Driver API unsupported in the simulator') +@linux_only +@skip_on_arm('Managed Alloc support is experimental/untested on ARM') +class TestManagedAlloc(ContextResettingTestCase): + + def get_total_gpu_memory(self): + # We use a driver function to directly get the total GPU memory because + # an EMM plugin may report something different (or not implement + # get_memory_info at all). + if USE_NV_BINDING: + free, total = driver.cuMemGetInfo() + return total + else: + free = c_size_t() + total = c_size_t() + driver.cuMemGetInfo(byref(free), byref(total)) + return total.value + + def skip_if_cc_major_lt(self, min_required, reason): + """ + Skip the current test if the compute capability of the device is + less than `min_required`. + """ + ctx = cuda.current_context() + cc_major = ctx.device.compute_capability[0] + if cc_major < min_required: + self.skipTest(reason) + + # CUDA Unified Memory comes in two flavors. For GPUs in the Kepler and + # Maxwell generations, managed memory allocations work as opaque, + # contiguous segments that can either be on the device or the host. For + # GPUs in the Pascal or later generations, managed memory operates on a + # per-page basis, so we can have arrays larger than GPU memory, where only + # part of them is resident on the device at one time. To ensure that this + # test works correctly on all supported GPUs, we'll select the size of our + # memory such that we only oversubscribe the GPU memory if we're on a + # Pascal or newer GPU (compute capability at least 6.0). + + def test_managed_alloc_driver_undersubscribe(self): + msg = "Managed memory unsupported prior to CC 3.0" + self.skip_if_cc_major_lt(3, msg) + self._test_managed_alloc_driver(0.5) + + # This test is skipped by default because it is easy to hang the machine + # for a very long time or get OOM killed if the GPU memory size is >50% of + # the system memory size. Even if the system does have more than 2x the RAM + # of the GPU, this test runs for a very long time (in comparison to the + # rest of the tests in the suite). + # + # However, it is left in here for manual testing as required. + + @unittest.skip + def test_managed_alloc_driver_oversubscribe(self): + msg = "Oversubscription of managed memory unsupported prior to CC 6.0" + self.skip_if_cc_major_lt(6, msg) + self._test_managed_alloc_driver(2.0) + + def test_managed_alloc_driver_host_attach(self): + msg = "Host attached managed memory is not accessible prior to CC 6.0" + self.skip_if_cc_major_lt(6, msg) + # Only test with a small array (0.01 * memory size) to keep the test + # quick. + self._test_managed_alloc_driver(0.01, attach_global=False) + + def _test_managed_alloc_driver(self, memory_factor, attach_global=True): + # Verify that we can allocate and operate on managed + # memory through the CUDA driver interface. + + total_mem_size = self.get_total_gpu_memory() + n_bytes = int(memory_factor * total_mem_size) + + ctx = cuda.current_context() + mem = ctx.memallocmanaged(n_bytes, attach_global=attach_global) + + dtype = np.dtype(np.uint8) + n_elems = n_bytes // dtype.itemsize + ary = np.ndarray(shape=n_elems, dtype=dtype, buffer=mem) + + magic = 0xab + device_memset(mem, magic, n_bytes) + ctx.synchronize() + + # Note that this assertion operates on the CPU, so this + # test effectively drives both the CPU and the GPU on + # managed memory. + + self.assertTrue(np.all(ary == magic)) + + def _test_managed_array(self, attach_global=True): + # Check the managed_array interface on both host and device. + + ary = cuda.managed_array(100, dtype=np.double) + ary.fill(123.456) + self.assertTrue(all(ary == 123.456)) + + @cuda.jit('void(double[:])') + def kernel(x): + i = cuda.grid(1) + if i < x.shape[0]: + x[i] = 1.0 + + kernel[10, 10](ary) + cuda.current_context().synchronize() + + self.assertTrue(all(ary == 1.0)) + + def test_managed_array_attach_global(self): + self._test_managed_array() + + def test_managed_array_attach_host(self): + self._test_managed_array() + msg = "Host attached managed memory is not accessible prior to CC 6.0" + self.skip_if_cc_major_lt(6, msg) + self._test_managed_array(attach_global=False) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_mvc.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_mvc.py new file mode 100644 index 0000000000000000000000000000000000000000..c25bc5ae2d1f46b3f873e921e92c7e53ede33fa6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_mvc.py @@ -0,0 +1,54 @@ +import multiprocessing as mp +import traceback +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck, + skip_if_mvc_libraries_unavailable) +from numba.tests.support import linux_only + + +def child_test(): + from numba import config, cuda + + # Change the MVC config after importing numba.cuda + config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY = 1 + + @cuda.jit + def f(): + pass + + f[1, 1]() + + +def child_test_wrapper(result_queue): + try: + output = child_test() + success = True + # Catch anything raised so it can be propagated + except: # noqa: E722 + output = traceback.format_exc() + success = False + + result_queue.put((success, output)) + + +@linux_only +@skip_under_cuda_memcheck('May hang CUDA memcheck') +@skip_on_cudasim('Simulator does not require or implement MVC') +@skip_if_mvc_libraries_unavailable +class TestMinorVersionCompatibility(CUDATestCase): + def test_mvc(self): + # Run test with Minor Version Compatibility enabled in a child process + ctx = mp.get_context('spawn') + result_queue = ctx.Queue() + proc = ctx.Process(target=child_test_wrapper, args=(result_queue,)) + proc.start() + proc.join() + success, output = result_queue.get() + + # Ensure the child process ran to completion before checking its output + if not success: + self.fail(output) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_nvvm_driver.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_nvvm_driver.py new file mode 100644 index 0000000000000000000000000000000000000000..6e560764c50566579f83c84c6f23d9043b1bab37 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_nvvm_driver.py @@ -0,0 +1,199 @@ +import warnings + +from llvmlite import ir +from numba.cuda.cudadrv import nvvm, runtime +from numba.cuda.testing import unittest +from numba.cuda.cudadrv.nvvm import LibDevice, NvvmError, NVVM +from numba.cuda.testing import skip_on_cudasim + + +@skip_on_cudasim('NVVM Driver unsupported in the simulator') +class TestNvvmDriver(unittest.TestCase): + def get_nvvmir(self): + versions = NVVM().get_ir_version() + data_layout = NVVM().data_layout + return nvvmir_generic.format(data_layout=data_layout, v=versions) + + def test_nvvm_compile_simple(self): + nvvmir = self.get_nvvmir() + ptx = nvvm.compile_ir(nvvmir).decode('utf8') + self.assertTrue('simple' in ptx) + self.assertTrue('ave' in ptx) + + def test_nvvm_compile_nullary_option(self): + # Tests compilation with an option that doesn't take an argument + # ("-gen-lto") - all other NVVM options are of the form + # "-=" + + # -gen-lto is not available prior to CUDA 11.5 + if runtime.get_version() < (11, 5): + self.skipTest("-gen-lto unavailable in this toolkit version") + + nvvmir = self.get_nvvmir() + ltoir = nvvm.compile_ir(nvvmir, opt=3, gen_lto=None, arch="compute_52") + + # Verify we correctly passed the option by checking if we got LTOIR + # from NVVM (by looking for the expected magic number for LTOIR) + self.assertEqual(ltoir[:4], b'\xed\x43\x4e\x7f') + + def test_nvvm_bad_option(self): + # Ensure that unsupported / non-existent options are reported as such + # to the user / caller + msg = "-made-up-option=2 is an unsupported option" + with self.assertRaisesRegex(NvvmError, msg): + nvvm.compile_ir("", made_up_option=2) + + def test_nvvm_from_llvm(self): + m = ir.Module("test_nvvm_from_llvm") + m.triple = 'nvptx64-nvidia-cuda' + nvvm.add_ir_version(m) + fty = ir.FunctionType(ir.VoidType(), [ir.IntType(32)]) + kernel = ir.Function(m, fty, name='mycudakernel') + bldr = ir.IRBuilder(kernel.append_basic_block('entry')) + bldr.ret_void() + nvvm.set_cuda_kernel(kernel) + + m.data_layout = NVVM().data_layout + ptx = nvvm.compile_ir(str(m)).decode('utf8') + self.assertTrue('mycudakernel' in ptx) + self.assertTrue('.address_size 64' in ptx) + + def test_used_list(self): + # Construct a module + m = ir.Module("test_used_list") + m.triple = 'nvptx64-nvidia-cuda' + m.data_layout = NVVM().data_layout + nvvm.add_ir_version(m) + + # Add a function and mark it as a kernel + fty = ir.FunctionType(ir.VoidType(), [ir.IntType(32)]) + kernel = ir.Function(m, fty, name='mycudakernel') + bldr = ir.IRBuilder(kernel.append_basic_block('entry')) + bldr.ret_void() + nvvm.set_cuda_kernel(kernel) + + # Verify that the used list was correctly constructed + used_lines = [line for line in str(m).splitlines() + if 'llvm.used' in line] + msg = 'Expected exactly one @"llvm.used" array' + self.assertEqual(len(used_lines), 1, msg) + + used_line = used_lines[0] + # Kernel should be referenced in the used list + self.assertIn("mycudakernel", used_line) + # Check linkage of the used list + self.assertIn("appending global", used_line) + # Ensure used list is in the metadata section + self.assertIn('section "llvm.metadata"', used_line) + + def test_nvvm_ir_verify_fail(self): + m = ir.Module("test_bad_ir") + m.triple = "unknown-unknown-unknown" + m.data_layout = NVVM().data_layout + nvvm.add_ir_version(m) + with self.assertRaisesRegex(NvvmError, 'Invalid target triple'): + nvvm.compile_ir(str(m)) + + def _test_nvvm_support(self, arch): + compute_xx = 'compute_{0}{1}'.format(*arch) + nvvmir = self.get_nvvmir() + ptx = nvvm.compile_ir(nvvmir, arch=compute_xx, ftz=1, prec_sqrt=0, + prec_div=0).decode('utf8') + self.assertIn(".target sm_{0}{1}".format(*arch), ptx) + self.assertIn('simple', ptx) + self.assertIn('ave', ptx) + + def test_nvvm_support(self): + """Test supported CC by NVVM + """ + for arch in nvvm.get_supported_ccs(): + self._test_nvvm_support(arch=arch) + + def test_nvvm_warning(self): + m = ir.Module("test_nvvm_warning") + m.triple = 'nvptx64-nvidia-cuda' + m.data_layout = NVVM().data_layout + nvvm.add_ir_version(m) + + fty = ir.FunctionType(ir.VoidType(), []) + kernel = ir.Function(m, fty, name='inlinekernel') + builder = ir.IRBuilder(kernel.append_basic_block('entry')) + builder.ret_void() + nvvm.set_cuda_kernel(kernel) + + # Add the noinline attribute to trigger NVVM to generate a warning + kernel.attributes.add('noinline') + + with warnings.catch_warnings(record=True) as w: + nvvm.compile_ir(str(m)) + + self.assertEqual(len(w), 1) + self.assertIn('overriding noinline attribute', str(w[0])) + + +@skip_on_cudasim('NVVM Driver unsupported in the simulator') +class TestArchOption(unittest.TestCase): + def test_get_arch_option(self): + # Test returning the nearest lowest arch. + self.assertEqual(nvvm.get_arch_option(5, 3), 'compute_53') + self.assertEqual(nvvm.get_arch_option(7, 5), 'compute_75') + self.assertEqual(nvvm.get_arch_option(7, 7), 'compute_75') + # Test known arch. + supported_cc = nvvm.get_supported_ccs() + for arch in supported_cc: + self.assertEqual(nvvm.get_arch_option(*arch), 'compute_%d%d' % arch) + self.assertEqual(nvvm.get_arch_option(1000, 0), + 'compute_%d%d' % supported_cc[-1]) + + +@skip_on_cudasim('NVVM Driver unsupported in the simulator') +class TestLibDevice(unittest.TestCase): + def test_libdevice_load(self): + # Test that constructing LibDevice gives a bitcode file + libdevice = LibDevice() + self.assertEqual(libdevice.bc[:4], b'BC\xc0\xde') + + +nvvmir_generic = '''\ +target triple="nvptx64-nvidia-cuda" +target datalayout = "{data_layout}" + +define i32 @ave(i32 %a, i32 %b) {{ +entry: +%add = add nsw i32 %a, %b +%div = sdiv i32 %add, 2 +ret i32 %div +}} + +define void @simple(i32* %data) {{ +entry: +%0 = call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() +%1 = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() +%mul = mul i32 %0, %1 +%2 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() +%add = add i32 %mul, %2 +%call = call i32 @ave(i32 %add, i32 %add) +%idxprom = sext i32 %add to i64 +%arrayidx = getelementptr inbounds i32, i32* %data, i64 %idxprom +store i32 %call, i32* %arrayidx, align 4 +ret void +}} + +declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() nounwind readnone + +declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x() nounwind readnone + +declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() nounwind readnone + +!nvvmir.version = !{{!1}} +!1 = !{{i32 {v[0]}, i32 {v[1]}, i32 {v[2]}, i32 {v[3]}}} + +!nvvm.annotations = !{{!2}} +!2 = !{{void (i32*)* @simple, !"kernel", i32 1}} + +@"llvm.used" = appending global [1 x i8*] [i8* bitcast (void (i32*)* @simple to i8*)], section "llvm.metadata" +''' # noqa: E501 + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_pinned.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_pinned.py new file mode 100644 index 0000000000000000000000000000000000000000..ef727c5a89cbcd84ef813a7227b0654ab3070f68 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_pinned.py @@ -0,0 +1,37 @@ +import numpy as np +import platform + +from numba import cuda +from numba.cuda.testing import unittest, ContextResettingTestCase + + +class TestPinned(ContextResettingTestCase): + + def _run_copies(self, A): + A0 = np.copy(A) + + stream = cuda.stream() + ptr = cuda.to_device(A, copy=False, stream=stream) + ptr.copy_to_device(A, stream=stream) + ptr.copy_to_host(A, stream=stream) + stream.synchronize() + + self.assertTrue(np.allclose(A, A0)) + + def test_pinned(self): + machine = platform.machine() + if machine.startswith('arm') or machine.startswith('aarch64'): + count = 262144 # 2MB + else: + count = 2097152 # 16MB + A = np.arange(count) + with cuda.pinned(A): + self._run_copies(A) + + def test_unpinned(self): + A = np.arange(2 * 1024 * 1024) # 16 MB + self._run_copies(A) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_profiler.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..1660d4d42fc1de142d4762a0ed7859278f99f2fb --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_profiler.py @@ -0,0 +1,20 @@ +import unittest +from numba.cuda.testing import ContextResettingTestCase +from numba import cuda +from numba.cuda.testing import skip_on_cudasim + + +@skip_on_cudasim('CUDA Profiler unsupported in the simulator') +class TestProfiler(ContextResettingTestCase): + def test_profiling(self): + with cuda.profiling(): + a = cuda.device_array(10) + del a + + with cuda.profiling(): + a = cuda.device_array(100) + del a + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_ptds.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_ptds.py new file mode 100644 index 0000000000000000000000000000000000000000..b03fd3647aeac35c0375c5947b072aaefa56bf1c --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_ptds.py @@ -0,0 +1,149 @@ +import multiprocessing as mp +import logging +import traceback +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import (skip_on_cudasim, skip_with_cuda_python, + skip_under_cuda_memcheck) +from numba.tests.support import linux_only + + +def child_test(): + from numba import cuda, int32, void + from numba.core import config + import io + import numpy as np + import threading + + # Enable PTDS before we make any CUDA driver calls. Enabling it first + # ensures that PTDS APIs are used because the CUDA driver looks up API + # functions on first use and memoizes them. + config.CUDA_PER_THREAD_DEFAULT_STREAM = 1 + + # Set up log capture for the Driver API so we can see what API calls were + # used. + logbuf = io.StringIO() + handler = logging.StreamHandler(logbuf) + cudadrv_logger = logging.getLogger('numba.cuda.cudadrv.driver') + cudadrv_logger.addHandler(handler) + cudadrv_logger.setLevel(logging.DEBUG) + + # Set up data for our test, and copy over to the device + N = 2 ** 16 + N_THREADS = 10 + N_ADDITIONS = 4096 + + # Seed the RNG for repeatability + np.random.seed(1) + x = np.random.randint(low=0, high=1000, size=N, dtype=np.int32) + r = np.zeros_like(x) + + # One input and output array for each thread + xs = [cuda.to_device(x) for _ in range(N_THREADS)] + rs = [cuda.to_device(r) for _ in range(N_THREADS)] + + # Compute the grid size and get the [per-thread] default stream + n_threads = 256 + n_blocks = N // n_threads + stream = cuda.default_stream() + + # A simple multiplication-by-addition kernel. What it does exactly is not + # too important; only that we have a kernel that does something. + @cuda.jit(void(int32[::1], int32[::1])) + def f(r, x): + i = cuda.grid(1) + + if i > len(r): + return + + # Accumulate x into r + for j in range(N_ADDITIONS): + r[i] += x[i] + + # This function will be used to launch the kernel from each thread on its + # own unique data. + def kernel_thread(n): + f[n_blocks, n_threads, stream](rs[n], xs[n]) + + # Create threads + threads = [threading.Thread(target=kernel_thread, args=(i,)) + for i in range(N_THREADS)] + + # Start all threads + for thread in threads: + thread.start() + + # Wait for all threads to finish, to ensure that we don't synchronize with + # the device until all kernels are scheduled. + for thread in threads: + thread.join() + + # Synchronize with the device + cuda.synchronize() + + # Check output is as expected + expected = x * N_ADDITIONS + for i in range(N_THREADS): + np.testing.assert_equal(rs[i].copy_to_host(), expected) + + # Return the driver log output to the calling process for checking + handler.flush() + return logbuf.getvalue() + + +def child_test_wrapper(result_queue): + try: + output = child_test() + success = True + # Catch anything raised so it can be propagated + except: # noqa: E722 + output = traceback.format_exc() + success = False + + result_queue.put((success, output)) + + +# Run on Linux only until the reason for test hangs on Windows (Issue #8635, +# https://github.com/numba/numba/issues/8635) is diagnosed +@linux_only +@skip_under_cuda_memcheck('Hangs cuda-memcheck') +@skip_on_cudasim('Streams not supported on the simulator') +class TestPTDS(CUDATestCase): + @skip_with_cuda_python('Function names unchanged for PTDS with NV Binding') + def test_ptds(self): + # Run a test with PTDS enabled in a child process + ctx = mp.get_context('spawn') + result_queue = ctx.Queue() + proc = ctx.Process(target=child_test_wrapper, args=(result_queue,)) + proc.start() + proc.join() + success, output = result_queue.get() + + # Ensure the child process ran to completion before checking its output + if not success: + self.fail(output) + + # Functions with a per-thread default stream variant that we expect to + # see in the output + ptds_functions = ('cuMemcpyHtoD_v2_ptds', 'cuLaunchKernel_ptsz', + 'cuMemcpyDtoH_v2_ptds') + + for fn in ptds_functions: + with self.subTest(fn=fn, expected=True): + self.assertIn(fn, output) + + # Non-PTDS versions of the functions that we should not see in the + # output: + legacy_functions = ('cuMemcpyHtoD_v2', 'cuLaunchKernel', + 'cuMemcpyDtoH_v2') + + for fn in legacy_functions: + with self.subTest(fn=fn, expected=False): + # Ensure we only spot these function names appearing without a + # _ptds or _ptsz suffix by checking including the end of the + # line in the log + fn_at_end = f'{fn}\n' + self.assertNotIn(fn_at_end, output) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_reset_device.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_reset_device.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e0b6d108dda4cdf58ec340ede7a429b556cefc --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_reset_device.py @@ -0,0 +1,36 @@ +import threading +from numba import cuda +from numba.cuda.cudadrv.driver import driver +from numba.cuda.testing import unittest, ContextResettingTestCase +from queue import Queue + + +class TestResetDevice(ContextResettingTestCase): + def test_reset_device(self): + + def newthread(exception_queue): + try: + devices = range(driver.get_device_count()) + for _ in range(2): + for d in devices: + cuda.select_device(d) + cuda.close() + except Exception as e: + exception_queue.put(e) + + # Do test on a separate thread so that we don't affect + # the current context in the main thread. + + exception_queue = Queue() + t = threading.Thread(target=newthread, args=(exception_queue,)) + t.start() + t.join() + + exceptions = [] + while not exception_queue.empty(): + exceptions.append(exception_queue.get()) + self.assertEqual(exceptions, []) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_runtime.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..51e0722eca4eb2f3868ebdff47e1b85ac224efcc --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_runtime.py @@ -0,0 +1,85 @@ +import multiprocessing +import os +from numba.core import config +from numba.cuda.cudadrv.runtime import runtime +from numba.cuda.testing import unittest, SerialMixin, skip_on_cudasim +from unittest.mock import patch + + +def set_visible_devices_and_check(q): + try: + from numba import cuda + import os + + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + q.put(len(cuda.gpus.lst)) + except: # noqa: E722 + # Sentinel value for error executing test code + q.put(-1) + + +if config.ENABLE_CUDASIM: + SUPPORTED_VERSIONS = (-1, -1), +else: + SUPPORTED_VERSIONS = ((11, 0), (11, 1), (11, 2), (11, 3), (11, 4), (11, 5), + (11, 6), (11, 7)) + + +class TestRuntime(unittest.TestCase): + def test_is_supported_version_true(self): + for v in SUPPORTED_VERSIONS: + with patch.object(runtime, 'get_version', return_value=v): + self.assertTrue(runtime.is_supported_version()) + + @skip_on_cudasim('The simulator always simulates a supported runtime') + def test_is_supported_version_false(self): + # Check with an old unsupported version and some potential future + # versions + for v in ((10, 2), (11, 8), (12, 0)): + with patch.object(runtime, 'get_version', return_value=v): + self.assertFalse(runtime.is_supported_version()) + + def test_supported_versions(self): + self.assertEqual(SUPPORTED_VERSIONS, runtime.supported_versions) + + +class TestVisibleDevices(unittest.TestCase, SerialMixin): + def test_visible_devices_set_after_import(self): + # See Issue #6149. This test checks that we can set + # CUDA_VISIBLE_DEVICES after importing Numba and have the value + # reflected in the available list of GPUs. Prior to the fix for this + # issue, Numba made a call to runtime.get_version() on import that + # initialized the driver and froze the list of available devices before + # CUDA_VISIBLE_DEVICES could be set by the user. + + # Avoid importing cuda at the top level so that + # set_visible_devices_and_check gets to import it first in its process + from numba import cuda + + if len(cuda.gpus.lst) in (0, 1): + self.skipTest('This test requires multiple GPUs') + + if os.environ.get('CUDA_VISIBLE_DEVICES'): + msg = 'Cannot test when CUDA_VISIBLE_DEVICES already set' + self.skipTest(msg) + + ctx = multiprocessing.get_context('spawn') + q = ctx.Queue() + p = ctx.Process(target=set_visible_devices_and_check, args=(q,)) + p.start() + try: + visible_gpu_count = q.get() + finally: + p.join() + + # Make an obvious distinction between an error running the test code + # and an incorrect number of GPUs in the list + msg = 'Error running set_visible_devices_and_check' + self.assertNotEqual(visible_gpu_count, -1, msg=msg) + + # The actual check that we see only one GPU + self.assertEqual(visible_gpu_count, 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_select_device.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_select_device.py new file mode 100644 index 0000000000000000000000000000000000000000..aca78d94bff59d41966b24ceca026284692c8a51 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_select_device.py @@ -0,0 +1,41 @@ +# +# Test does not work on some cards. +# +import threading +from queue import Queue + +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, ContextResettingTestCase + + +def newthread(exception_queue): + try: + cuda.select_device(0) + stream = cuda.stream() + A = np.arange(100) + dA = cuda.to_device(A, stream=stream) + stream.synchronize() + del dA + del stream + cuda.close() + except Exception as e: + exception_queue.put(e) + + +class TestSelectDevice(ContextResettingTestCase): + def test_select_device(self): + exception_queue = Queue() + for i in range(10): + t = threading.Thread(target=newthread, args=(exception_queue,)) + t.start() + t.join() + + exceptions = [] + while not exception_queue.empty(): + exceptions.append(exception_queue.get()) + self.assertEqual(exceptions, []) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_streams.py b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_streams.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fbec19f7a7c64bd09b820a854af07f180ca1b3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudadrv/test_streams.py @@ -0,0 +1,122 @@ +import asyncio +import functools +import threading +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim + + +def with_asyncio_loop(f): + @functools.wraps(f) + def runner(*args, **kwds): + loop = asyncio.new_event_loop() + loop.set_debug(True) + try: + return loop.run_until_complete(f(*args, **kwds)) + finally: + loop.close() + return runner + + +@skip_on_cudasim('CUDA Driver API unsupported in the simulator') +class TestCudaStream(CUDATestCase): + def test_add_callback(self): + def callback(stream, status, event): + event.set() + + stream = cuda.stream() + callback_event = threading.Event() + stream.add_callback(callback, callback_event) + self.assertTrue(callback_event.wait(1.0)) + + def test_add_callback_with_default_arg(self): + callback_event = threading.Event() + + def callback(stream, status, arg): + self.assertIsNone(arg) + callback_event.set() + + stream = cuda.stream() + stream.add_callback(callback) + self.assertTrue(callback_event.wait(1.0)) + + @with_asyncio_loop + async def test_async_done(self): + stream = cuda.stream() + await stream.async_done() + + @with_asyncio_loop + async def test_parallel_tasks(self): + async def async_cuda_fn(value_in: float) -> float: + stream = cuda.stream() + h_src, h_dst = cuda.pinned_array(8), cuda.pinned_array(8) + h_src[:] = value_in + d_ary = cuda.to_device(h_src, stream=stream) + d_ary.copy_to_host(h_dst, stream=stream) + done_result = await stream.async_done() + self.assertEqual(done_result, stream) + return h_dst.mean() + + values_in = [1, 2, 3, 4] + tasks = [asyncio.create_task(async_cuda_fn(v)) for v in values_in] + values_out = await asyncio.gather(*tasks) + self.assertTrue(np.allclose(values_in, values_out)) + + @with_asyncio_loop + async def test_multiple_async_done(self): + stream = cuda.stream() + done_aws = [stream.async_done() for _ in range(4)] + done = await asyncio.gather(*done_aws) + for d in done: + self.assertEqual(d, stream) + + @with_asyncio_loop + async def test_multiple_async_done_multiple_streams(self): + streams = [cuda.stream() for _ in range(4)] + done_aws = [stream.async_done() for stream in streams] + done = await asyncio.gather(*done_aws) + + # Ensure we got the four original streams in done + self.assertSetEqual(set(done), set(streams)) + + @with_asyncio_loop + async def test_cancelled_future(self): + stream = cuda.stream() + done1, done2 = stream.async_done(), stream.async_done() + done1.cancel() + await done2 + self.assertTrue(done1.cancelled()) + self.assertTrue(done2.done()) + + +@skip_on_cudasim('CUDA Driver API unsupported in the simulator') +class TestFailingStream(CUDATestCase): + # This test can only be run in isolation because it corrupts the CUDA + # context, which cannot be recovered from within the same process. It is + # left here so that it can be run manually for debugging / testing purposes + # - or may be re-enabled if in future there is infrastructure added for + # running tests in a separate process (a subprocess cannot be used because + # CUDA will have been initialized before the fork, so it cannot be used in + # the child process). + @unittest.skip + @with_asyncio_loop + async def test_failed_stream(self): + ctx = cuda.current_context() + module = ctx.create_module_ptx(""" + .version 6.5 + .target sm_30 + .address_size 64 + .visible .entry failing_kernel() { trap; } + """) + failing_kernel = module.get_function("failing_kernel") + + stream = cuda.stream() + failing_kernel.configure((1,), (1,), stream=stream).__call__() + done = stream.async_done() + with self.assertRaises(Exception): + await done + self.assertIsNotNone(done.exception()) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_array_methods.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_array_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..7f129b5df03121032c8f7252926eace3eab5c5d8 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_array_methods.py @@ -0,0 +1,35 @@ +import numpy as np +from numba import cuda +from numba.cuda.testing import CUDATestCase +import unittest + + +def reinterpret_array_type(byte_arr, start, stop, output): + # Tested with just one thread + val = byte_arr[start:stop].view(np.int32)[0] + output[0] = val + + +class TestCudaArrayMethods(CUDATestCase): + def test_reinterpret_array_type(self): + """ + Reinterpret byte array as int32 in the GPU. + """ + pyfunc = reinterpret_array_type + kernel = cuda.jit(pyfunc) + + byte_arr = np.arange(256, dtype=np.uint8) + itemsize = np.dtype(np.int32).itemsize + for start in range(0, 256, itemsize): + stop = start + itemsize + expect = byte_arr[start:stop].view(np.int32)[0] + + output = np.zeros(1, dtype=np.int32) + kernel[1, 1](byte_arr, start, stop, output) + + got = output[0] + self.assertEqual(expect, got) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_blackscholes.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_blackscholes.py new file mode 100644 index 0000000000000000000000000000000000000000..1375162d9e1015009ea2a1c7ce098340231074f3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_blackscholes.py @@ -0,0 +1,120 @@ +import numpy as np +import math +from numba import cuda, double, void +from numba.cuda.testing import unittest, CUDATestCase + + +RISKFREE = 0.02 +VOLATILITY = 0.30 + +A1 = 0.31938153 +A2 = -0.356563782 +A3 = 1.781477937 +A4 = -1.821255978 +A5 = 1.330274429 +RSQRT2PI = 0.39894228040143267793994605993438 + + +def cnd(d): + K = 1.0 / (1.0 + 0.2316419 * np.abs(d)) + ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) * + (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) + return np.where(d > 0, 1.0 - ret_val, ret_val) + + +def black_scholes(callResult, putResult, stockPrice, optionStrike, optionYears, + Riskfree, Volatility): + S = stockPrice + X = optionStrike + T = optionYears + R = Riskfree + V = Volatility + sqrtT = np.sqrt(T) + d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT) + d2 = d1 - V * sqrtT + cndd1 = cnd(d1) + cndd2 = cnd(d2) + + expRT = np.exp(- R * T) + callResult[:] = (S * cndd1 - X * expRT * cndd2) + putResult[:] = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1)) + + +def randfloat(rand_var, low, high): + return (1.0 - rand_var) * low + rand_var * high + + +class TestBlackScholes(CUDATestCase): + def test_blackscholes(self): + OPT_N = 400 + iterations = 2 + + stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0) + optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0) + optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0) + + callResultNumpy = np.zeros(OPT_N) + putResultNumpy = -np.ones(OPT_N) + + callResultNumba = np.zeros(OPT_N) + putResultNumba = -np.ones(OPT_N) + + # numpy + for i in range(iterations): + black_scholes(callResultNumpy, putResultNumpy, stockPrice, + optionStrike, optionYears, RISKFREE, VOLATILITY) + + @cuda.jit(double(double), device=True, inline=True) + def cnd_cuda(d): + K = 1.0 / (1.0 + 0.2316419 * math.fabs(d)) + ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) * + (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) + if d > 0: + ret_val = 1.0 - ret_val + return ret_val + + @cuda.jit(void(double[:], double[:], double[:], double[:], double[:], + double, double)) + def black_scholes_cuda(callResult, putResult, S, X, T, R, V): + i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x + if i >= S.shape[0]: + return + sqrtT = math.sqrt(T[i]) + d1 = ((math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) + / (V * sqrtT)) + d2 = d1 - V * sqrtT + cndd1 = cnd_cuda(d1) + cndd2 = cnd_cuda(d2) + + expRT = math.exp((-1. * R) * T[i]) + callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2) + putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1)) + + # numba + blockdim = 512, 1 + griddim = int(math.ceil(float(OPT_N) / blockdim[0])), 1 + stream = cuda.stream() + d_callResult = cuda.to_device(callResultNumba, stream) + d_putResult = cuda.to_device(putResultNumba, stream) + d_stockPrice = cuda.to_device(stockPrice, stream) + d_optionStrike = cuda.to_device(optionStrike, stream) + d_optionYears = cuda.to_device(optionYears, stream) + + for i in range(iterations): + black_scholes_cuda[griddim, blockdim, stream]( + d_callResult, d_putResult, d_stockPrice, d_optionStrike, + d_optionYears, RISKFREE, VOLATILITY) + d_callResult.copy_to_host(callResultNumba, stream) + d_putResult.copy_to_host(putResultNumba, stream) + stream.synchronize() + + delta = np.abs(callResultNumpy - callResultNumba) + L1norm = delta.sum() / np.abs(callResultNumpy).sum() + + max_abs_err = delta.max() + self.assertTrue(L1norm < 1e-13) + self.assertTrue(max_abs_err < 1e-13) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_const_string.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_const_string.py new file mode 100644 index 0000000000000000000000000000000000000000..173319cb223c11bd0cb1866926d50b63dee9a36e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_const_string.py @@ -0,0 +1,129 @@ +import re +import numpy as np +from numba import cuda +from numba.cuda.testing import unittest, skip_on_cudasim, CUDATestCase +from llvmlite import ir + + +@skip_on_cudasim("This is testing CUDA backend code generation") +class TestConstStringCodegen(unittest.TestCase): + def test_const_string(self): + # These imports are incompatible with CUDASIM + from numba.cuda.descriptor import cuda_target + from numba.cuda.cudadrv.nvvm import compile_ir + + targetctx = cuda_target.target_context + mod = targetctx.create_module("") + textstring = 'A Little Brown Fox' + gv0 = targetctx.insert_const_string(mod, textstring) + # Insert the same const string a second time - the first should be + # reused. + targetctx.insert_const_string(mod, textstring) + + res = re.findall(r"@\"__conststring__.*internal.*constant.*\[" + r"19\s+x\s+i8\]", str(mod)) + # Ensure that the const string was only inserted once + self.assertEqual(len(res), 1) + + fnty = ir.FunctionType(ir.IntType(8).as_pointer(), []) + + # Using insert_const_string + fn = ir.Function(mod, fnty, "test_insert_const_string") + builder = ir.IRBuilder(fn.append_basic_block()) + res = builder.addrspacecast(gv0, ir.PointerType(ir.IntType(8)), + 'generic') + builder.ret(res) + + matches = re.findall(r"@\"__conststring__.*internal.*constant.*\[" + r"19\s+x\s+i8\]", str(mod)) + self.assertEqual(len(matches), 1) + + # Using insert_string_const_addrspace + fn = ir.Function(mod, fnty, "test_insert_string_const_addrspace") + builder = ir.IRBuilder(fn.append_basic_block()) + res = targetctx.insert_string_const_addrspace(builder, textstring) + builder.ret(res) + + matches = re.findall(r"@\"__conststring__.*internal.*constant.*\[" + r"19\s+x\s+i8\]", str(mod)) + self.assertEqual(len(matches), 1) + + ptx = compile_ir(str(mod)).decode('ascii') + matches = list(re.findall(r"\.const.*__conststring__", ptx)) + + self.assertEqual(len(matches), 1) + + +# Inspired by the reproducer from Issue #7041. +class TestConstString(CUDATestCase): + def test_assign_const_unicode_string(self): + @cuda.jit + def str_assign(arr): + i = cuda.grid(1) + if i < len(arr): + arr[i] = "XYZ" + + n_strings = 8 + arr = np.zeros(n_strings + 1, dtype=" mb: + unittest.skip("GPU cannot support enough cooperative grid blocks") + + c_sequential_rows[griddim, blockdim](A) + + reference = np.tile(np.arange(shape[0]), (shape[1], 1)).T + np.testing.assert_equal(A, reference) + + @skip_unless_cc_60 + def test_max_cooperative_grid_blocks(self): + # The maximum number of blocks will vary based on the device so we + # can't test for an expected value, but we can check that the function + # doesn't error, and that varying the number of dimensions of the block + # whilst keeping the total number of threads constant doesn't change + # the maximum to validate some of the logic. + sig = (int32[:,::1],) + c_sequential_rows = cuda.jit(sig)(sequential_rows) + overload = c_sequential_rows.overloads[sig] + blocks1d = overload.max_cooperative_grid_blocks(256) + blocks2d = overload.max_cooperative_grid_blocks((16, 16)) + blocks3d = overload.max_cooperative_grid_blocks((16, 4, 4)) + self.assertEqual(blocks1d, blocks2d) + self.assertEqual(blocks1d, blocks3d) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_errors.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..c20fb8dccdf844ead9e4b46a3651b54a40b9efde --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_errors.py @@ -0,0 +1,79 @@ +from numba import cuda +from numba.core.errors import TypingError +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim + + +def noop(x): + pass + + +class TestJitErrors(CUDATestCase): + """ + Test compile-time errors with @jit. + """ + + def test_too_many_dims(self): + kernfunc = cuda.jit(noop) + + with self.assertRaises(ValueError) as raises: + kernfunc[(1, 2, 3, 4), (5, 6)] + self.assertIn("griddim must be a sequence of 1, 2 or 3 integers, " + "got [1, 2, 3, 4]", + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + kernfunc[(1, 2,), (3, 4, 5, 6)] + self.assertIn("blockdim must be a sequence of 1, 2 or 3 integers, " + "got [3, 4, 5, 6]", + str(raises.exception)) + + def test_non_integral_dims(self): + kernfunc = cuda.jit(noop) + + with self.assertRaises(TypeError) as raises: + kernfunc[2.0, 3] + self.assertIn("griddim must be a sequence of integers, got [2.0]", + str(raises.exception)) + + with self.assertRaises(TypeError) as raises: + kernfunc[2, 3.0] + self.assertIn("blockdim must be a sequence of integers, got [3.0]", + str(raises.exception)) + + def _test_unconfigured(self, kernfunc): + with self.assertRaises(ValueError) as raises: + kernfunc(0) + self.assertIn("launch configuration was not specified", + str(raises.exception)) + + def test_unconfigured_typed_cudakernel(self): + kernfunc = cuda.jit("void(int32)")(noop) + self._test_unconfigured(kernfunc) + + def test_unconfigured_untyped_cudakernel(self): + kernfunc = cuda.jit(noop) + self._test_unconfigured(kernfunc) + + @skip_on_cudasim('TypingError does not occur on simulator') + def test_typing_error(self): + # see #5860, this is present to catch changes to error reporting + # accidentally breaking the CUDA target + + @cuda.jit(device=True) + def dev_func(x): + # floor is deliberately not imported for the purpose of this test. + return floor(x) # noqa: F821 + + @cuda.jit + def kernel_func(): + dev_func(1.5) + + with self.assertRaises(TypingError) as raises: + kernel_func[1, 1]() + excstr = str(raises.exception) + self.assertIn("resolving callee type: type(CUDADispatcher", excstr) + self.assertIn("NameError: name 'floor' is not defined", excstr) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_idiv.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_idiv.py new file mode 100644 index 0000000000000000000000000000000000000000..44b770f422deb7adefa7192982be9925e1ed291a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_idiv.py @@ -0,0 +1,37 @@ +import numpy as np +from numba import cuda, float32, float64, int32, void +from numba.cuda.testing import unittest, CUDATestCase + + +class TestCudaIDiv(CUDATestCase): + def test_inplace_div(self): + + @cuda.jit(void(float32[:, :], int32, int32)) + def div(grid, l_x, l_y): + for x in range(l_x): + for y in range(l_y): + grid[x, y] /= 2.0 + + x = np.ones((2, 2), dtype=np.float32) + grid = cuda.to_device(x) + div[1, 1](grid, 2, 2) + y = grid.copy_to_host() + self.assertTrue(np.all(y == 0.5)) + + def test_inplace_div_double(self): + + @cuda.jit(void(float64[:, :], int32, int32)) + def div_double(grid, l_x, l_y): + for x in range(l_x): + for y in range(l_y): + grid[x, y] /= 2.0 + + x = np.ones((2, 2), dtype=np.float64) + grid = cuda.to_device(x) + div_double[1, 1](grid, 2, 2) + y = grid.copy_to_host() + self.assertTrue(np.all(y == 0.5)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_lang.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_lang.py new file mode 100644 index 0000000000000000000000000000000000000000..0241c1e408ce2715dffebbf27d06d7a3083da136 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_lang.py @@ -0,0 +1,64 @@ +""" +Test basic language features + +""" + +import numpy as np +from numba import cuda, float64 +from numba.cuda.testing import unittest, CUDATestCase + + +class TestLang(CUDATestCase): + def test_enumerate(self): + tup = (1., 2.5, 3.) + + @cuda.jit("void(float64[:])") + def foo(a): + for i, v in enumerate(tup): + a[i] = v + + a = np.zeros(len(tup)) + foo[1, 1](a) + self.assertTrue(np.all(a == tup)) + + def test_zip(self): + t1 = (1, 2, 3) + t2 = (4.5, 5.6, 6.7) + + @cuda.jit("void(float64[:])") + def foo(a): + c = 0 + for i, j in zip(t1, t2): + c += i + j + a[0] = c + + a = np.zeros(1) + foo[1, 1](a) + b = np.array(t1) + c = np.array(t2) + self.assertTrue(np.all(a == (b + c).sum())) + + def test_issue_872(self): + ''' + Ensure that typing and lowering of CUDA kernel API primitives works in + more than one block. Was originally to ensure that macro expansion works + for more than one block (issue #872), but macro expansion has been + replaced by a "proper" implementation of all kernel API functions. + ''' + + @cuda.jit("void(float64[:,:])") + def cuda_kernel_api_in_multiple_blocks(ary): + for i in range(2): + tx = cuda.threadIdx.x + for j in range(3): + ty = cuda.threadIdx.y + sm = cuda.shared.array((2, 3), float64) + sm[tx, ty] = 1.0 + ary[tx, ty] = sm[tx, ty] + + a = np.zeros((2, 3)) + cuda_kernel_api_in_multiple_blocks[1, (2, 3)](a) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_matmul.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..51f1181a3a8ad46c38708eb215dfa26570354ab1 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_matmul.py @@ -0,0 +1,74 @@ +import numpy as np + +from numba import cuda, float32, void +from numba.cuda.testing import unittest, CUDATestCase +from numba.core import config + +# Ensure the test takes a reasonable amount of time in the simulator +if config.ENABLE_CUDASIM: + bpg, tpb = 2, 8 +else: + bpg, tpb = 50, 32 + +n = bpg * tpb +SM_SIZE = (tpb, tpb) + + +class TestCudaMatMul(CUDATestCase): + + def test_func(self): + + @cuda.jit(void(float32[:, ::1], float32[:, ::1], float32[:, ::1])) + def cu_square_matrix_mul(A, B, C): + sA = cuda.shared.array(shape=SM_SIZE, dtype=float32) + sB = cuda.shared.array(shape=(tpb, tpb), dtype=float32) + + tx = cuda.threadIdx.x + ty = cuda.threadIdx.y + bx = cuda.blockIdx.x + by = cuda.blockIdx.y + bw = cuda.blockDim.x + bh = cuda.blockDim.y + + x = tx + bx * bw + y = ty + by * bh + + acc = float32(0) # forces all the math to be f32 + for i in range(bpg): + if x < n and y < n: + sA[ty, tx] = A[y, tx + i * tpb] + sB[ty, tx] = B[ty + i * tpb, x] + + cuda.syncthreads() + + if x < n and y < n: + for j in range(tpb): + acc += sA[ty, j] * sB[j, tx] + + cuda.syncthreads() + + if x < n and y < n: + C[y, x] = acc + + np.random.seed(42) + A = np.array(np.random.random((n, n)), dtype=np.float32) + B = np.array(np.random.random((n, n)), dtype=np.float32) + C = np.empty_like(A) + + stream = cuda.stream() + with stream.auto_synchronize(): + dA = cuda.to_device(A, stream) + dB = cuda.to_device(B, stream) + dC = cuda.to_device(C, stream) + cu_square_matrix_mul[(bpg, bpg), (tpb, tpb), stream](dA, dB, dC) + dC.copy_to_host(C, stream) + + # Host compute + Cans = np.dot(A, B) + + # Check result + np.testing.assert_allclose(C, Cans, rtol=1e-5) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_minmax.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_minmax.py new file mode 100644 index 0000000000000000000000000000000000000000..aee97fd63e0c7a2dfb7b4bf2280d86ea39d6e260 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_minmax.py @@ -0,0 +1,113 @@ +import numpy as np + +from numba import cuda, float64 +from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim + + +def builtin_max(A, B, C): + i = cuda.grid(1) + + if i >= len(C): + return + + C[i] = float64(max(A[i], B[i])) + + +def builtin_min(A, B, C): + i = cuda.grid(1) + + if i >= len(C): + return + + C[i] = float64(min(A[i], B[i])) + + +@skip_on_cudasim('Tests PTX emission') +class TestCudaMinMax(CUDATestCase): + def _run( + self, + kernel, + numpy_equivalent, + ptx_instruction, + dtype_left, + dtype_right, + n=5): + kernel = cuda.jit(kernel) + + c = np.zeros(n, dtype=np.float64) + a = np.arange(n, dtype=dtype_left) + .5 + b = np.full(n, fill_value=2, dtype=dtype_right) + + kernel[1, c.shape](a, b, c) + np.testing.assert_allclose(c, numpy_equivalent(a, b)) + + ptx = next(p for p in kernel.inspect_asm().values()) + self.assertIn(ptx_instruction, ptx) + + def test_max_f8f8(self): + self._run( + builtin_max, + np.maximum, + 'max.f64', + np.float64, + np.float64) + + def test_max_f4f8(self): + self._run( + builtin_max, + np.maximum, + 'max.f64', + np.float32, + np.float64) + + def test_max_f8f4(self): + self._run( + builtin_max, + np.maximum, + 'max.f64', + np.float64, + np.float32) + + def test_max_f4f4(self): + self._run( + builtin_max, + np.maximum, + 'max.f32', + np.float32, + np.float32) + + def test_min_f8f8(self): + self._run( + builtin_min, + np.minimum, + 'min.f64', + np.float64, + np.float64) + + def test_min_f4f8(self): + self._run( + builtin_min, + np.minimum, + 'min.f64', + np.float32, + np.float64) + + def test_min_f8f4(self): + self._run( + builtin_min, + np.minimum, + 'min.f64', + np.float64, + np.float32) + + def test_min_f4f4(self): + self._run( + builtin_min, + np.minimum, + 'min.f32', + np.float32, + np.float32) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sync.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..d4d9326f0357e1c299d4bd9c5781e5e2a22b7002 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sync.py @@ -0,0 +1,271 @@ +import numpy as np +from numba import cuda, int32, float32 +from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase +from numba.core.config import ENABLE_CUDASIM + + +def useless_syncthreads(ary): + i = cuda.grid(1) + cuda.syncthreads() + ary[i] = i + + +def useless_syncwarp(ary): + i = cuda.grid(1) + cuda.syncwarp() + ary[i] = i + + +def useless_syncwarp_with_mask(ary): + i = cuda.grid(1) + cuda.syncwarp(0xFFFF) + ary[i] = i + + +def coop_syncwarp(res): + sm = cuda.shared.array(32, int32) + i = cuda.grid(1) + + sm[i] = i + cuda.syncwarp() + + if i < 16: + sm[i] = sm[i] + sm[i + 16] + cuda.syncwarp(0xFFFF) + + if i < 8: + sm[i] = sm[i] + sm[i + 8] + cuda.syncwarp(0xFF) + + if i < 4: + sm[i] = sm[i] + sm[i + 4] + cuda.syncwarp(0xF) + + if i < 2: + sm[i] = sm[i] + sm[i + 2] + cuda.syncwarp(0x3) + + if i == 0: + res[0] = sm[0] + sm[1] + + +def simple_smem(ary): + N = 100 + sm = cuda.shared.array(N, int32) + i = cuda.grid(1) + if i == 0: + for j in range(N): + sm[j] = j + cuda.syncthreads() + ary[i] = sm[i] + + +def coop_smem2d(ary): + i, j = cuda.grid(2) + sm = cuda.shared.array((10, 20), float32) + sm[i, j] = (i + 1) / (j + 1) + cuda.syncthreads() + ary[i, j] = sm[i, j] + + +def dyn_shared_memory(ary): + i = cuda.grid(1) + sm = cuda.shared.array(0, float32) + sm[i] = i * 2 + cuda.syncthreads() + ary[i] = sm[i] + + +def use_threadfence(ary): + ary[0] += 123 + cuda.threadfence() + ary[0] += 321 + + +def use_threadfence_block(ary): + ary[0] += 123 + cuda.threadfence_block() + ary[0] += 321 + + +def use_threadfence_system(ary): + ary[0] += 123 + cuda.threadfence_system() + ary[0] += 321 + + +def use_syncthreads_count(ary_in, ary_out): + i = cuda.grid(1) + ary_out[i] = cuda.syncthreads_count(ary_in[i]) + + +def use_syncthreads_and(ary_in, ary_out): + i = cuda.grid(1) + ary_out[i] = cuda.syncthreads_and(ary_in[i]) + + +def use_syncthreads_or(ary_in, ary_out): + i = cuda.grid(1) + ary_out[i] = cuda.syncthreads_or(ary_in[i]) + + +def _safe_cc_check(cc): + if ENABLE_CUDASIM: + return True + else: + return cuda.get_current_device().compute_capability >= cc + + +class TestCudaSync(CUDATestCase): + def _test_useless(self, kernel): + compiled = cuda.jit("void(int32[::1])")(kernel) + nelem = 10 + ary = np.empty(nelem, dtype=np.int32) + exp = np.arange(nelem, dtype=np.int32) + compiled[1, nelem](ary) + np.testing.assert_equal(ary, exp) + + def test_useless_syncthreads(self): + self._test_useless(useless_syncthreads) + + @skip_on_cudasim("syncwarp not implemented on cudasim") + def test_useless_syncwarp(self): + self._test_useless(useless_syncwarp) + + @skip_on_cudasim("syncwarp not implemented on cudasim") + @unittest.skipUnless(_safe_cc_check((7, 0)), + "Partial masks require CC 7.0 or greater") + def test_useless_syncwarp_with_mask(self): + self._test_useless(useless_syncwarp_with_mask) + + @skip_on_cudasim("syncwarp not implemented on cudasim") + @unittest.skipUnless(_safe_cc_check((7, 0)), + "Partial masks require CC 7.0 or greater") + def test_coop_syncwarp(self): + # coop_syncwarp computes the sum of all integers from 0 to 31 (496) + # using a single warp + expected = 496 + nthreads = 32 + nblocks = 1 + + compiled = cuda.jit("void(int32[::1])")(coop_syncwarp) + res = np.zeros(1, dtype=np.int32) + compiled[nblocks, nthreads](res) + np.testing.assert_equal(expected, res[0]) + + def test_simple_smem(self): + compiled = cuda.jit("void(int32[::1])")(simple_smem) + nelem = 100 + ary = np.empty(nelem, dtype=np.int32) + compiled[1, nelem](ary) + self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32))) + + def test_coop_smem2d(self): + compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d) + shape = 10, 20 + ary = np.empty(shape, dtype=np.float32) + compiled[1, shape](ary) + exp = np.empty_like(ary) + for i in range(ary.shape[0]): + for j in range(ary.shape[1]): + exp[i, j] = (i + 1) / (j + 1) + self.assertTrue(np.allclose(ary, exp)) + + def test_dyn_shared_memory(self): + compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory) + shape = 50 + ary = np.empty(shape, dtype=np.float32) + compiled[1, shape, 0, ary.size * 4](ary) + self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32))) + + def test_threadfence_codegen(self): + # Does not test runtime behavior, just the code generation. + sig = (int32[:],) + compiled = cuda.jit(sig)(use_threadfence) + ary = np.zeros(10, dtype=np.int32) + compiled[1, 1](ary) + self.assertEqual(123 + 321, ary[0]) + if not ENABLE_CUDASIM: + self.assertIn("membar.gl;", compiled.inspect_asm(sig)) + + def test_threadfence_block_codegen(self): + # Does not test runtime behavior, just the code generation. + sig = (int32[:],) + compiled = cuda.jit(sig)(use_threadfence_block) + ary = np.zeros(10, dtype=np.int32) + compiled[1, 1](ary) + self.assertEqual(123 + 321, ary[0]) + if not ENABLE_CUDASIM: + self.assertIn("membar.cta;", compiled.inspect_asm(sig)) + + def test_threadfence_system_codegen(self): + # Does not test runtime behavior, just the code generation. + sig = (int32[:],) + compiled = cuda.jit(sig)(use_threadfence_system) + ary = np.zeros(10, dtype=np.int32) + compiled[1, 1](ary) + self.assertEqual(123 + 321, ary[0]) + if not ENABLE_CUDASIM: + self.assertIn("membar.sys;", compiled.inspect_asm(sig)) + + def _test_syncthreads_count(self, in_dtype): + compiled = cuda.jit(use_syncthreads_count) + ary_in = np.ones(72, dtype=in_dtype) + ary_out = np.zeros(72, dtype=np.int32) + ary_in[31] = 0 + ary_in[42] = 0 + compiled[1, 72](ary_in, ary_out) + self.assertTrue(np.all(ary_out == 70)) + + def test_syncthreads_count(self): + self._test_syncthreads_count(np.int32) + + def test_syncthreads_count_upcast(self): + self._test_syncthreads_count(np.int16) + + def test_syncthreads_count_downcast(self): + self._test_syncthreads_count(np.int64) + + def _test_syncthreads_and(self, in_dtype): + compiled = cuda.jit(use_syncthreads_and) + nelem = 100 + ary_in = np.ones(nelem, dtype=in_dtype) + ary_out = np.zeros(nelem, dtype=np.int32) + compiled[1, nelem](ary_in, ary_out) + self.assertTrue(np.all(ary_out == 1)) + ary_in[31] = 0 + compiled[1, nelem](ary_in, ary_out) + self.assertTrue(np.all(ary_out == 0)) + + def test_syncthreads_and(self): + self._test_syncthreads_and(np.int32) + + def test_syncthreads_and_upcast(self): + self._test_syncthreads_and(np.int16) + + def test_syncthreads_and_downcast(self): + self._test_syncthreads_and(np.int64) + + def _test_syncthreads_or(self, in_dtype): + compiled = cuda.jit(use_syncthreads_or) + nelem = 100 + ary_in = np.zeros(nelem, dtype=in_dtype) + ary_out = np.zeros(nelem, dtype=np.int32) + compiled[1, nelem](ary_in, ary_out) + self.assertTrue(np.all(ary_out == 0)) + ary_in[31] = 1 + compiled[1, nelem](ary_in, ary_out) + self.assertTrue(np.all(ary_out == 1)) + + def test_syncthreads_or(self): + self._test_syncthreads_or(np.int32) + + def test_syncthreads_or_upcast(self): + self._test_syncthreads_or(np.int16) + + def test_syncthreads_or_downcast(self): + self._test_syncthreads_or(np.int64) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_ufuncs.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..7a98abde74fe68c1788d6ee9876777dc12b06330 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_ufuncs.py @@ -0,0 +1,277 @@ +import functools +import numpy as np +import unittest + +from numba import config, cuda, types +from numba.tests.support import TestCase +from numba.tests.test_ufuncs import BasicUFuncTest + + +def _make_ufunc_usecase(ufunc): + ldict = {} + arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)]) + func_str = f'def fn({arg_str}):\n np.{ufunc.__name__}({arg_str})' + exec(func_str, globals(), ldict) + fn = ldict['fn'] + fn.__name__ = '{0}_usecase'.format(ufunc.__name__) + return fn + + +# This test would also be a CUDATestCase, but to avoid a confusing and +# potentially dangerous inheritance diamond with setUp methods that modify +# global state, we implement the necessary parts of CUDATestCase within this +# class instead. These are: +# +# - Disable parallel testing with _numba_parallel_test_. +# - Disabling CUDA performance warnings for the duration of tests. +class TestUFuncs(BasicUFuncTest, TestCase): + _numba_parallel_test_ = False + + def setUp(self): + BasicUFuncTest.setUp(self) + + # The basic ufunc test does not set up complex inputs, so we'll add + # some here for testing with CUDA. + self.inputs.extend([ + (np.complex64(-0.5 - 0.5j), types.complex64), + (np.complex64(0.0), types.complex64), + (np.complex64(0.5 + 0.5j), types.complex64), + + (np.complex128(-0.5 - 0.5j), types.complex128), + (np.complex128(0.0), types.complex128), + (np.complex128(0.5 + 0.5j), types.complex128), + + (np.array([-0.5 - 0.5j, 0.0, 0.5 + 0.5j], dtype='c8'), + types.Array(types.complex64, 1, 'C')), + (np.array([-0.5 - 0.5j, 0.0, 0.5 + 0.5j], dtype='c16'), + types.Array(types.complex128, 1, 'C')), + ]) + + # Test with multiple dimensions + self.inputs.extend([ + # Basic 2D and 3D arrays + (np.linspace(0, 1).reshape((5, -1)), + types.Array(types.float64, 2, 'C')), + (np.linspace(0, 1).reshape((2, 5, -1)), + types.Array(types.float64, 3, 'C')), + # Complex data (i.e. interleaved) + (np.linspace(0, 1 + 1j).reshape(5, -1), + types.Array(types.complex128, 2, 'C')), + # F-ordered + (np.asfortranarray(np.linspace(0, 1).reshape((5, -1))), + types.Array(types.float64, 2, 'F')), + ]) + + # Add tests for other integer types + self.inputs.extend([ + (np.uint8(0), types.uint8), + (np.uint8(1), types.uint8), + (np.int8(-1), types.int8), + (np.int8(0), types.int8), + + (np.uint16(0), types.uint16), + (np.uint16(1), types.uint16), + (np.int16(-1), types.int16), + (np.int16(0), types.int16), + + (np.ulonglong(0), types.ulonglong), + (np.ulonglong(1), types.ulonglong), + (np.longlong(-1), types.longlong), + (np.longlong(0), types.longlong), + + (np.array([0,1], dtype=np.ulonglong), + types.Array(types.ulonglong, 1, 'C')), + (np.array([0,1], dtype=np.longlong), + types.Array(types.longlong, 1, 'C')), + ]) + + self._low_occupancy_warnings = config.CUDA_LOW_OCCUPANCY_WARNINGS + self._warn_on_implicit_copy = config.CUDA_WARN_ON_IMPLICIT_COPY + + # Disable warnings about low gpu utilization in the test suite + config.CUDA_LOW_OCCUPANCY_WARNINGS = 0 + # Disable warnings about host arrays in the test suite + config.CUDA_WARN_ON_IMPLICIT_COPY = 0 + + def tearDown(self): + # Restore original warning settings + config.CUDA_LOW_OCCUPANCY_WARNINGS = self._low_occupancy_warnings + config.CUDA_WARN_ON_IMPLICIT_COPY = self._warn_on_implicit_copy + + def _make_ufunc_usecase(self, ufunc): + return _make_ufunc_usecase(ufunc) + + @functools.lru_cache(maxsize=None) + def _compile(self, pyfunc, args): + # We return an already-configured kernel so that basic_ufunc_test can + # call it just like it does for a CPU function + return cuda.jit(args)(pyfunc)[1, 1] + + def basic_int_ufunc_test(self, name=None): + skip_inputs = [ + types.float32, + types.float64, + types.Array(types.float32, 1, 'C'), + types.Array(types.float32, 2, 'C'), + types.Array(types.float64, 1, 'C'), + types.Array(types.float64, 2, 'C'), + types.Array(types.float64, 3, 'C'), + types.Array(types.float64, 2, 'F'), + types.complex64, + types.complex128, + types.Array(types.complex64, 1, 'C'), + types.Array(types.complex64, 2, 'C'), + types.Array(types.complex128, 1, 'C'), + types.Array(types.complex128, 2, 'C'), + ] + self.basic_ufunc_test(name, skip_inputs=skip_inputs) + + ############################################################################ + # Trigonometric Functions + + def test_sin_ufunc(self): + self.basic_ufunc_test(np.sin, kinds='cf') + + def test_cos_ufunc(self): + self.basic_ufunc_test(np.cos, kinds='cf') + + def test_tan_ufunc(self): + self.basic_ufunc_test(np.tan, kinds='cf') + + def test_arcsin_ufunc(self): + self.basic_ufunc_test(np.arcsin, kinds='cf') + + def test_arccos_ufunc(self): + self.basic_ufunc_test(np.arccos, kinds='cf') + + def test_arctan_ufunc(self): + self.basic_ufunc_test(np.arctan, kinds='cf') + + def test_arctan2_ufunc(self): + self.basic_ufunc_test(np.arctan2, kinds='f') + + def test_hypot_ufunc(self): + self.basic_ufunc_test(np.hypot, kinds='f') + + def test_sinh_ufunc(self): + self.basic_ufunc_test(np.sinh, kinds='cf') + + def test_cosh_ufunc(self): + self.basic_ufunc_test(np.cosh, kinds='cf') + + def test_tanh_ufunc(self): + self.basic_ufunc_test(np.tanh, kinds='cf') + + def test_arcsinh_ufunc(self): + self.basic_ufunc_test(np.arcsinh, kinds='cf') + + def test_arccosh_ufunc(self): + self.basic_ufunc_test(np.arccosh, kinds='cf') + + def test_arctanh_ufunc(self): + # arctanh is only valid is only finite in the range ]-1, 1[ + # This means that for any of the integer types it will produce + # conversion from infinity/-infinity to integer. That's undefined + # behavior in C, so the results may vary from implementation to + # implementation. This means that the result from the compiler + # used to compile NumPy may differ from the result generated by + # llvm. Skipping the integer types in this test avoids failed + # tests because of this. + to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32, + types.Array(types.int32, 1, 'C'), types.int32, + types.Array(types.uint64, 1, 'C'), types.uint64, + types.Array(types.int64, 1, 'C'), types.int64] + + self.basic_ufunc_test(np.arctanh, skip_inputs=to_skip, kinds='cf') + + def test_deg2rad_ufunc(self): + self.basic_ufunc_test(np.deg2rad, kinds='f') + + def test_rad2deg_ufunc(self): + self.basic_ufunc_test(np.rad2deg, kinds='f') + + def test_degrees_ufunc(self): + self.basic_ufunc_test(np.degrees, kinds='f') + + def test_radians_ufunc(self): + self.basic_ufunc_test(np.radians, kinds='f') + + ############################################################################ + # Comparison functions + def test_greater_ufunc(self): + self.signed_unsigned_cmp_test(np.greater) + + def test_greater_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.greater_equal) + + def test_less_ufunc(self): + self.signed_unsigned_cmp_test(np.less) + + def test_less_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.less_equal) + + def test_not_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.not_equal) + + def test_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.equal) + + def test_logical_and_ufunc(self): + self.basic_ufunc_test(np.logical_and) + + def test_logical_or_ufunc(self): + self.basic_ufunc_test(np.logical_or) + + def test_logical_xor_ufunc(self): + self.basic_ufunc_test(np.logical_xor) + + def test_logical_not_ufunc(self): + self.basic_ufunc_test(np.logical_not) + + def test_maximum_ufunc(self): + self.basic_ufunc_test(np.maximum) + + def test_minimum_ufunc(self): + self.basic_ufunc_test(np.minimum) + + def test_fmax_ufunc(self): + self.basic_ufunc_test(np.fmax) + + def test_fmin_ufunc(self): + self.basic_ufunc_test(np.fmin) + + def test_bitwise_and_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_and) + + def test_bitwise_or_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_or) + + def test_bitwise_xor_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_xor) + + def test_invert_ufunc(self): + self.basic_int_ufunc_test(np.invert) + + def test_bitwise_not_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_not) + + # Note: there is no entry for np.left_shift and np.right_shift + # because their implementations in NumPy have undefined behavior + # when the second argument is a negative. See the comment in + # numba/tests/test_ufuncs.py for more details. + + ############################################################################ + # Mathematical Functions + + def test_log_ufunc(self): + self.basic_ufunc_test(np.log, kinds='cf') + + def test_log2_ufunc(self): + self.basic_ufunc_test(np.log2, kinds='cf') + + def test_log10_ufunc(self): + self.basic_ufunc_test(np.log10, kinds='cf') + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_vectorize_device.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_vectorize_device.py new file mode 100644 index 0000000000000000000000000000000000000000..e33598d8b7bccc3ced0ef7f9eb27fe93a6a84dce --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_vectorize_device.py @@ -0,0 +1,36 @@ +from numba import vectorize +from numba import cuda, float32 +import numpy as np +from numba.cuda.testing import skip_on_cudasim, CUDATestCase +import unittest + + +@skip_on_cudasim('ufunc API unsupported in the simulator') +class TestCudaVectorizeDeviceCall(CUDATestCase): + def test_cuda_vectorize_device_call(self): + + @cuda.jit(float32(float32, float32, float32), device=True) + def cu_device_fn(x, y, z): + return x ** y / z + + def cu_ufunc(x, y, z): + return cu_device_fn(x, y, z) + + ufunc = vectorize([float32(float32, float32, float32)], target='cuda')( + cu_ufunc) + + N = 100 + + X = np.array(np.random.sample(N), dtype=np.float32) + Y = np.array(np.random.sample(N), dtype=np.float32) + Z = np.array(np.random.sample(N), dtype=np.float32) + 0.1 + + out = ufunc(X, Y, Z) + + gold = (X ** Y) / Z + + self.assertTrue(np.allclose(out, gold)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudasim/__init__.py b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0465337eb70062fc004a0973c45d0be07803812a --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/__init__.py @@ -0,0 +1,6 @@ +from numba.testing import load_testsuite +import os + + +def load_tests(loader, tests, pattern): + return load_testsuite(loader, os.path.dirname(__file__)) diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudasim/support.py b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/support.py new file mode 100644 index 0000000000000000000000000000000000000000..4fca39cadd70bb9201ebef9716f52ca8aa22e7fd --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/support.py @@ -0,0 +1,6 @@ +from numba import cuda + + +@cuda.jit(device=True) +def cuda_module_in_device_function(): + return cuda.threadIdx.x diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudasim/test_cudasim_issues.py b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/test_cudasim_issues.py new file mode 100644 index 0000000000000000000000000000000000000000..0f544821ab8066351a04a82b1c5c84bba5389c59 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudasim/test_cudasim_issues.py @@ -0,0 +1,102 @@ +import threading + +import numpy as np + +from numba import cuda +from numba.cuda.testing import CUDATestCase, skip_unless_cudasim +import numba.cuda.simulator as simulator +import unittest + + +class TestCudaSimIssues(CUDATestCase): + def test_record_access(self): + backyard_type = [('statue', np.float64), + ('newspaper', np.float64, (6,))] + + goose_type = [('garden', np.float64, (12,)), + ('town', np.float64, (42,)), + ('backyard', backyard_type)] + + goose_np_type = np.dtype(goose_type, align=True) + + @cuda.jit + def simple_kernel(f): + f.garden[0] = 45.0 + f.backyard.newspaper[3] = 2.0 + f.backyard.newspaper[3] = f.backyard.newspaper[3] + 3.0 + + item = np.recarray(1, dtype=goose_np_type) + simple_kernel[1, 1](item[0]) + np.testing.assert_equal(item[0]['garden'][0], 45) + np.testing.assert_equal(item[0]['backyard']['newspaper'][3], 5) + + def test_recarray_setting(self): + recordwith2darray = np.dtype([('i', np.int32), + ('j', np.float32, (3, 2))]) + rec = np.recarray(2, dtype=recordwith2darray) + rec[0]['i'] = 45 + + @cuda.jit + def simple_kernel(f): + f[1] = f[0] + simple_kernel[1, 1](rec) + np.testing.assert_equal(rec[0]['i'], rec[1]['i']) + + def test_cuda_module_in_device_function(self): + """ + Discovered in https://github.com/numba/numba/issues/1837. + When the `cuda` module is referenced in a device function, + it does not have the kernel API (e.g. cuda.threadIdx, cuda.shared) + """ + from numba.cuda.tests.cudasim import support + + inner = support.cuda_module_in_device_function + + @cuda.jit + def outer(out): + tid = inner() + if tid < out.size: + out[tid] = tid + + arr = np.zeros(10, dtype=np.int32) + outer[1, 11](arr) + expected = np.arange(arr.size, dtype=np.int32) + np.testing.assert_equal(expected, arr) + + @skip_unless_cudasim('Only works on CUDASIM') + def test_deadlock_on_exception(self): + def assert_no_blockthreads(): + blockthreads = [] + for t in threading.enumerate(): + if not isinstance(t, simulator.kernel.BlockThread): + continue + + # join blockthreads with a short timeout to allow aborted + # threads to exit + t.join(1) + if t.is_alive(): + self.fail("Blocked kernel thread: %s" % t) + + self.assertListEqual(blockthreads, []) + + @simulator.jit + def assign_with_sync(x, y): + i = cuda.grid(1) + y[i] = x[i] + + cuda.syncthreads() + cuda.syncthreads() + + x = np.arange(3) + y = np.empty(3) + assign_with_sync[1, 3](x, y) + np.testing.assert_array_equal(x, y) + assert_no_blockthreads() + + with self.assertRaises(IndexError): + assign_with_sync[1, 6](x, y) + assert_no_blockthreads() + + +if __name__ == '__main__': + unittest.main()