Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- deepseek/lib/python3.10/site-packages/numba/np/__pycache__/extensions.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/__pycache__/npyfuncs.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/__pycache__/numpy_support.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/arraymath.py +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/extensions.py +10 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/__init__.py +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/cmathimpl.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/mathimpl.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/numbers.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/mathimpl.py +452 -0
- deepseek/lib/python3.10/site-packages/numba/np/math/numbers.py +1390 -0
- deepseek/lib/python3.10/site-packages/numba/np/npyimpl.py +873 -0
- deepseek/lib/python3.10/site-packages/numba/np/polynomial/__init__.py +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_core.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/polynomial/polynomial_functions.py +375 -0
- deepseek/lib/python3.10/site-packages/numba/np/random/__init__.py +0 -0
- deepseek/lib/python3.10/site-packages/numba/np/random/generator_core.py +120 -0
- deepseek/lib/python3.10/site-packages/numba/np/random/random_methods.py +365 -0
- deepseek/lib/python3.10/site-packages/numba/np/ufunc/__init__.py +32 -0
- deepseek/lib/python3.10/site-packages/numba/np/ufunc/decorators.py +208 -0
- deepseek/lib/python3.10/site-packages/numba/np/ufunc/gufunc.py +279 -0
- deepseek/lib/python3.10/site-packages/numba/np/ufunc/ufuncbuilder.py +434 -0
- deepseek/lib/python3.10/site-packages/numba/np/ufunc/wrappers.py +743 -0
- deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/INSTALLER +1 -0
- deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/METADATA +75 -0
- deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/WHEEL +4 -0
- deepseekvl2/lib/python3.10/site-packages/lit-18.1.8.dist-info/top_level.txt +1 -0
- deepseekvl2/lib/python3.10/site-packages/triton/__init__.py +52 -0
- deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/compiler.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/utils.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/compiler.py +1854 -0
- deepseekvl2/lib/python3.10/site-packages/triton/impl/__init__.py +18 -0
- deepseekvl2/lib/python3.10/site-packages/triton/impl/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/impl/__pycache__/base.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/impl/base.py +36 -0
- deepseekvl2/lib/python3.10/site-packages/triton/language/__pycache__/extern.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/language/random.py +177 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__init__.py +14 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc +0 -0
- deepseekvl2/lib/python3.10/site-packages/triton/ops/blocksparse/__init__.py +7 -0
.gitattributes
CHANGED
|
@@ -671,3 +671,4 @@ deepseekvl2/lib/python3.10/site-packages/pillow.libs/liblzma-a5872208.so.5.6.3 f
|
|
| 671 |
deepseekvl2/lib/python3.10/site-packages/pillow.libs/libbrotlicommon-5b2eba61.so.1.1.0 filter=lfs diff=lfs merge=lfs -text
|
| 672 |
deepseekvl2/lib/python3.10/site-packages/pillow.libs/libopenjp2-ca16f087.so.2.5.3 filter=lfs diff=lfs merge=lfs -text
|
| 673 |
deepseek/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 671 |
deepseekvl2/lib/python3.10/site-packages/pillow.libs/libbrotlicommon-5b2eba61.so.1.1.0 filter=lfs diff=lfs merge=lfs -text
|
| 672 |
deepseekvl2/lib/python3.10/site-packages/pillow.libs/libopenjp2-ca16f087.so.2.5.3 filter=lfs diff=lfs merge=lfs -text
|
| 673 |
deepseek/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 674 |
+
deepseekvl2/lib/python3.10/site-packages/triton/third_party/cuda/lib/libdevice.10.bc filter=lfs diff=lfs merge=lfs -text
|
deepseek/lib/python3.10/site-packages/numba/np/__pycache__/extensions.cpython-310.pyc
ADDED
|
Binary file (274 Bytes). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime.cpython-310.pyc
ADDED
|
Binary file (21.1 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/__pycache__/npyfuncs.cpython-310.pyc
ADDED
|
Binary file (37.1 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/__pycache__/numpy_support.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/arraymath.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/extensions.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NumPy extensions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from numba.np.arraymath import cross2d
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'cross2d'
|
| 10 |
+
]
|
deepseek/lib/python3.10/site-packages/numba/np/math/__init__.py
ADDED
|
File without changes
|
deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (167 Bytes). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/cmathimpl.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/mathimpl.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/math/__pycache__/numbers.cpython-310.pyc
ADDED
|
Binary file (28.4 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/math/mathimpl.py
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Provide math calls that uses intrinsics or libc math functions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import operator
|
| 7 |
+
import sys
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
import llvmlite.ir
|
| 11 |
+
from llvmlite.ir import Constant
|
| 12 |
+
|
| 13 |
+
from numba.core.imputils import impl_ret_untracked
|
| 14 |
+
from numba.core import types, config, cgutils
|
| 15 |
+
from numba.core.extending import overload
|
| 16 |
+
from numba.core.typing import signature
|
| 17 |
+
from numba.cpython.unsafe.numbers import trailing_zeros
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# registry = Registry('mathimpl')
|
| 21 |
+
# lower = registry.lower
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Helpers, shared with cmathimpl.
|
| 25 |
+
_NP_FLT_FINFO = np.finfo(np.dtype('float32'))
|
| 26 |
+
FLT_MAX = _NP_FLT_FINFO.max
|
| 27 |
+
FLT_MIN = _NP_FLT_FINFO.tiny
|
| 28 |
+
|
| 29 |
+
_NP_DBL_FINFO = np.finfo(np.dtype('float64'))
|
| 30 |
+
DBL_MAX = _NP_DBL_FINFO.max
|
| 31 |
+
DBL_MIN = _NP_DBL_FINFO.tiny
|
| 32 |
+
|
| 33 |
+
FLOAT_ABS_MASK = 0x7fffffff
|
| 34 |
+
FLOAT_SIGN_MASK = 0x80000000
|
| 35 |
+
DOUBLE_ABS_MASK = 0x7fffffffffffffff
|
| 36 |
+
DOUBLE_SIGN_MASK = 0x8000000000000000
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def is_nan(builder, val):
|
| 40 |
+
"""
|
| 41 |
+
Return a condition testing whether *val* is a NaN.
|
| 42 |
+
"""
|
| 43 |
+
return builder.fcmp_unordered('uno', val, val)
|
| 44 |
+
|
| 45 |
+
def is_inf(builder, val):
|
| 46 |
+
"""
|
| 47 |
+
Return a condition testing whether *val* is an infinite.
|
| 48 |
+
"""
|
| 49 |
+
pos_inf = Constant(val.type, float("+inf"))
|
| 50 |
+
neg_inf = Constant(val.type, float("-inf"))
|
| 51 |
+
isposinf = builder.fcmp_ordered('==', val, pos_inf)
|
| 52 |
+
isneginf = builder.fcmp_ordered('==', val, neg_inf)
|
| 53 |
+
return builder.or_(isposinf, isneginf)
|
| 54 |
+
|
| 55 |
+
def is_finite(builder, val):
|
| 56 |
+
"""
|
| 57 |
+
Return a condition testing whether *val* is a finite.
|
| 58 |
+
"""
|
| 59 |
+
# is_finite(x) <=> x - x != NaN
|
| 60 |
+
val_minus_val = builder.fsub(val, val)
|
| 61 |
+
return builder.fcmp_ordered('ord', val_minus_val, val_minus_val)
|
| 62 |
+
|
| 63 |
+
def f64_as_int64(builder, val):
|
| 64 |
+
"""
|
| 65 |
+
Bitcast a double into a 64-bit integer.
|
| 66 |
+
"""
|
| 67 |
+
assert val.type == llvmlite.ir.DoubleType()
|
| 68 |
+
return builder.bitcast(val, llvmlite.ir.IntType(64))
|
| 69 |
+
|
| 70 |
+
def int64_as_f64(builder, val):
|
| 71 |
+
"""
|
| 72 |
+
Bitcast a 64-bit integer into a double.
|
| 73 |
+
"""
|
| 74 |
+
assert val.type == llvmlite.ir.IntType(64)
|
| 75 |
+
return builder.bitcast(val, llvmlite.ir.DoubleType())
|
| 76 |
+
|
| 77 |
+
def f32_as_int32(builder, val):
|
| 78 |
+
"""
|
| 79 |
+
Bitcast a float into a 32-bit integer.
|
| 80 |
+
"""
|
| 81 |
+
assert val.type == llvmlite.ir.FloatType()
|
| 82 |
+
return builder.bitcast(val, llvmlite.ir.IntType(32))
|
| 83 |
+
|
| 84 |
+
def int32_as_f32(builder, val):
|
| 85 |
+
"""
|
| 86 |
+
Bitcast a 32-bit integer into a float.
|
| 87 |
+
"""
|
| 88 |
+
assert val.type == llvmlite.ir.IntType(32)
|
| 89 |
+
return builder.bitcast(val, llvmlite.ir.FloatType())
|
| 90 |
+
|
| 91 |
+
def negate_real(builder, val):
|
| 92 |
+
"""
|
| 93 |
+
Negate real number *val*, with proper handling of zeros.
|
| 94 |
+
"""
|
| 95 |
+
# The negative zero forces LLVM to handle signed zeros properly.
|
| 96 |
+
return builder.fsub(Constant(val.type, -0.0), val)
|
| 97 |
+
|
| 98 |
+
def call_fp_intrinsic(builder, name, args):
|
| 99 |
+
"""
|
| 100 |
+
Call a LLVM intrinsic floating-point operation.
|
| 101 |
+
"""
|
| 102 |
+
mod = builder.module
|
| 103 |
+
intr = mod.declare_intrinsic(name, [a.type for a in args])
|
| 104 |
+
return builder.call(intr, args)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _unary_int_input_wrapper_impl(wrapped_impl):
|
| 108 |
+
"""
|
| 109 |
+
Return an implementation factory to convert the single integral input
|
| 110 |
+
argument to a float64, then defer to the *wrapped_impl*.
|
| 111 |
+
"""
|
| 112 |
+
def implementer(context, builder, sig, args):
|
| 113 |
+
val, = args
|
| 114 |
+
input_type = sig.args[0]
|
| 115 |
+
fpval = context.cast(builder, val, input_type, types.float64)
|
| 116 |
+
inner_sig = signature(types.float64, types.float64)
|
| 117 |
+
res = wrapped_impl(context, builder, inner_sig, (fpval,))
|
| 118 |
+
return context.cast(builder, res, types.float64, sig.return_type)
|
| 119 |
+
|
| 120 |
+
return implementer
|
| 121 |
+
|
| 122 |
+
def unary_math_int_impl(fn, float_impl):
|
| 123 |
+
impl = _unary_int_input_wrapper_impl(float_impl)
|
| 124 |
+
# lower(fn, types.Integer)(impl)
|
| 125 |
+
|
| 126 |
+
def unary_math_intr(fn, intrcode):
|
| 127 |
+
"""
|
| 128 |
+
Implement the math function *fn* using the LLVM intrinsic *intrcode*.
|
| 129 |
+
"""
|
| 130 |
+
# @lower(fn, types.Float)
|
| 131 |
+
def float_impl(context, builder, sig, args):
|
| 132 |
+
res = call_fp_intrinsic(builder, intrcode, args)
|
| 133 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 134 |
+
|
| 135 |
+
unary_math_int_impl(fn, float_impl)
|
| 136 |
+
return float_impl
|
| 137 |
+
|
| 138 |
+
def unary_math_extern(fn, f32extern, f64extern, int_restype=False):
|
| 139 |
+
"""
|
| 140 |
+
Register implementations of Python function *fn* using the
|
| 141 |
+
external function named *f32extern* and *f64extern* (for float32
|
| 142 |
+
and float64 inputs, respectively).
|
| 143 |
+
If *int_restype* is true, then the function's return value should be
|
| 144 |
+
integral, otherwise floating-point.
|
| 145 |
+
"""
|
| 146 |
+
f_restype = types.int64 if int_restype else None
|
| 147 |
+
|
| 148 |
+
def float_impl(context, builder, sig, args):
|
| 149 |
+
"""
|
| 150 |
+
Implement *fn* for a types.Float input.
|
| 151 |
+
"""
|
| 152 |
+
[val] = args
|
| 153 |
+
mod = builder.module
|
| 154 |
+
input_type = sig.args[0]
|
| 155 |
+
lty = context.get_value_type(input_type)
|
| 156 |
+
func_name = {
|
| 157 |
+
types.float32: f32extern,
|
| 158 |
+
types.float64: f64extern,
|
| 159 |
+
}[input_type]
|
| 160 |
+
fnty = llvmlite.ir.FunctionType(lty, [lty])
|
| 161 |
+
fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name)
|
| 162 |
+
res = builder.call(fn, (val,))
|
| 163 |
+
res = context.cast(builder, res, input_type, sig.return_type)
|
| 164 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 165 |
+
|
| 166 |
+
# lower(fn, types.Float)(float_impl)
|
| 167 |
+
|
| 168 |
+
# Implement wrapper for integer inputs
|
| 169 |
+
unary_math_int_impl(fn, float_impl)
|
| 170 |
+
|
| 171 |
+
return float_impl
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
unary_math_intr(math.fabs, 'llvm.fabs')
|
| 175 |
+
exp_impl = unary_math_intr(math.exp, 'llvm.exp')
|
| 176 |
+
log_impl = unary_math_intr(math.log, 'llvm.log')
|
| 177 |
+
log10_impl = unary_math_intr(math.log10, 'llvm.log10')
|
| 178 |
+
sin_impl = unary_math_intr(math.sin, 'llvm.sin')
|
| 179 |
+
cos_impl = unary_math_intr(math.cos, 'llvm.cos')
|
| 180 |
+
|
| 181 |
+
log1p_impl = unary_math_extern(math.log1p, "log1pf", "log1p")
|
| 182 |
+
expm1_impl = unary_math_extern(math.expm1, "expm1f", "expm1")
|
| 183 |
+
erf_impl = unary_math_extern(math.erf, "erff", "erf")
|
| 184 |
+
erfc_impl = unary_math_extern(math.erfc, "erfcf", "erfc")
|
| 185 |
+
|
| 186 |
+
tan_impl = unary_math_extern(math.tan, "tanf", "tan")
|
| 187 |
+
asin_impl = unary_math_extern(math.asin, "asinf", "asin")
|
| 188 |
+
acos_impl = unary_math_extern(math.acos, "acosf", "acos")
|
| 189 |
+
atan_impl = unary_math_extern(math.atan, "atanf", "atan")
|
| 190 |
+
|
| 191 |
+
asinh_impl = unary_math_extern(math.asinh, "asinhf", "asinh")
|
| 192 |
+
acosh_impl = unary_math_extern(math.acosh, "acoshf", "acosh")
|
| 193 |
+
atanh_impl = unary_math_extern(math.atanh, "atanhf", "atanh")
|
| 194 |
+
sinh_impl = unary_math_extern(math.sinh, "sinhf", "sinh")
|
| 195 |
+
cosh_impl = unary_math_extern(math.cosh, "coshf", "cosh")
|
| 196 |
+
tanh_impl = unary_math_extern(math.tanh, "tanhf", "tanh")
|
| 197 |
+
|
| 198 |
+
log2_impl = unary_math_extern(math.log2, "log2f", "log2")
|
| 199 |
+
ceil_impl = unary_math_extern(math.ceil, "ceilf", "ceil", True)
|
| 200 |
+
floor_impl = unary_math_extern(math.floor, "floorf", "floor", True)
|
| 201 |
+
|
| 202 |
+
gamma_impl = unary_math_extern(math.gamma, "numba_gammaf", "numba_gamma") # work-around
|
| 203 |
+
sqrt_impl = unary_math_extern(math.sqrt, "sqrtf", "sqrt")
|
| 204 |
+
trunc_impl = unary_math_extern(math.trunc, "truncf", "trunc", True)
|
| 205 |
+
lgamma_impl = unary_math_extern(math.lgamma, "lgammaf", "lgamma")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
# @lower(math.isnan, types.Float)
|
| 209 |
+
def isnan_float_impl(context, builder, sig, args):
|
| 210 |
+
[val] = args
|
| 211 |
+
res = is_nan(builder, val)
|
| 212 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 213 |
+
|
| 214 |
+
# @lower(math.isnan, types.Integer)
|
| 215 |
+
def isnan_int_impl(context, builder, sig, args):
|
| 216 |
+
res = cgutils.false_bit
|
| 217 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
# @lower(math.isinf, types.Float)
|
| 221 |
+
def isinf_float_impl(context, builder, sig, args):
|
| 222 |
+
[val] = args
|
| 223 |
+
res = is_inf(builder, val)
|
| 224 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 225 |
+
|
| 226 |
+
# @lower(math.isinf, types.Integer)
|
| 227 |
+
def isinf_int_impl(context, builder, sig, args):
|
| 228 |
+
res = cgutils.false_bit
|
| 229 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
# @lower(math.isfinite, types.Float)
|
| 233 |
+
def isfinite_float_impl(context, builder, sig, args):
|
| 234 |
+
[val] = args
|
| 235 |
+
res = is_finite(builder, val)
|
| 236 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# @lower(math.isfinite, types.Integer)
|
| 240 |
+
def isfinite_int_impl(context, builder, sig, args):
|
| 241 |
+
res = cgutils.true_bit
|
| 242 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
# @lower(math.copysign, types.Float, types.Float)
|
| 246 |
+
def copysign_float_impl(context, builder, sig, args):
|
| 247 |
+
lty = args[0].type
|
| 248 |
+
mod = builder.module
|
| 249 |
+
fn = cgutils.get_or_insert_function(mod, llvmlite.ir.FunctionType(lty, (lty, lty)),
|
| 250 |
+
'llvm.copysign.%s' % lty.intrinsic_name)
|
| 251 |
+
res = builder.call(fn, args)
|
| 252 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# -----------------------------------------------------------------------------
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
# @lower(math.frexp, types.Float)
|
| 259 |
+
def frexp_impl(context, builder, sig, args):
|
| 260 |
+
val, = args
|
| 261 |
+
fltty = context.get_data_type(sig.args[0])
|
| 262 |
+
intty = context.get_data_type(sig.return_type[1])
|
| 263 |
+
expptr = cgutils.alloca_once(builder, intty, name='exp')
|
| 264 |
+
fnty = llvmlite.ir.FunctionType(fltty, (fltty, llvmlite.ir.PointerType(intty)))
|
| 265 |
+
fname = {
|
| 266 |
+
"float": "numba_frexpf",
|
| 267 |
+
"double": "numba_frexp",
|
| 268 |
+
}[str(fltty)]
|
| 269 |
+
fn = cgutils.get_or_insert_function(builder.module, fnty, fname)
|
| 270 |
+
res = builder.call(fn, (val, expptr))
|
| 271 |
+
res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr)))
|
| 272 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# @lower(math.ldexp, types.Float, types.intc)
|
| 276 |
+
def ldexp_impl(context, builder, sig, args):
|
| 277 |
+
val, exp = args
|
| 278 |
+
fltty, intty = map(context.get_data_type, sig.args)
|
| 279 |
+
fnty = llvmlite.ir.FunctionType(fltty, (fltty, intty))
|
| 280 |
+
fname = {
|
| 281 |
+
"float": "numba_ldexpf",
|
| 282 |
+
"double": "numba_ldexp",
|
| 283 |
+
}[str(fltty)]
|
| 284 |
+
fn = cgutils.insert_pure_function(builder.module, fnty, name=fname)
|
| 285 |
+
res = builder.call(fn, (val, exp))
|
| 286 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# -----------------------------------------------------------------------------
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# @lower(math.atan2, types.int64, types.int64)
|
| 293 |
+
def atan2_s64_impl(context, builder, sig, args):
|
| 294 |
+
[y, x] = args
|
| 295 |
+
y = builder.sitofp(y, llvmlite.ir.DoubleType())
|
| 296 |
+
x = builder.sitofp(x, llvmlite.ir.DoubleType())
|
| 297 |
+
fsig = signature(types.float64, types.float64, types.float64)
|
| 298 |
+
return atan2_float_impl(context, builder, fsig, (y, x))
|
| 299 |
+
|
| 300 |
+
# @lower(math.atan2, types.uint64, types.uint64)
|
| 301 |
+
def atan2_u64_impl(context, builder, sig, args):
|
| 302 |
+
[y, x] = args
|
| 303 |
+
y = builder.uitofp(y, llvmlite.ir.DoubleType())
|
| 304 |
+
x = builder.uitofp(x, llvmlite.ir.DoubleType())
|
| 305 |
+
fsig = signature(types.float64, types.float64, types.float64)
|
| 306 |
+
return atan2_float_impl(context, builder, fsig, (y, x))
|
| 307 |
+
|
| 308 |
+
# @lower(math.atan2, types.Float, types.Float)
|
| 309 |
+
def atan2_float_impl(context, builder, sig, args):
|
| 310 |
+
assert len(args) == 2
|
| 311 |
+
mod = builder.module
|
| 312 |
+
ty = sig.args[0]
|
| 313 |
+
lty = context.get_value_type(ty)
|
| 314 |
+
func_name = {
|
| 315 |
+
types.float32: "atan2f",
|
| 316 |
+
types.float64: "atan2"
|
| 317 |
+
}[ty]
|
| 318 |
+
fnty = llvmlite.ir.FunctionType(lty, (lty, lty))
|
| 319 |
+
fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name)
|
| 320 |
+
res = builder.call(fn, args)
|
| 321 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
# -----------------------------------------------------------------------------
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
# @lower(math.hypot, types.int64, types.int64)
|
| 328 |
+
def hypot_s64_impl(context, builder, sig, args):
|
| 329 |
+
[x, y] = args
|
| 330 |
+
y = builder.sitofp(y, llvmlite.ir.DoubleType())
|
| 331 |
+
x = builder.sitofp(x, llvmlite.ir.DoubleType())
|
| 332 |
+
fsig = signature(types.float64, types.float64, types.float64)
|
| 333 |
+
res = hypot_float_impl(context, builder, fsig, (x, y))
|
| 334 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
# @lower(math.hypot, types.uint64, types.uint64)
|
| 338 |
+
def hypot_u64_impl(context, builder, sig, args):
|
| 339 |
+
[x, y] = args
|
| 340 |
+
y = builder.sitofp(y, llvmlite.ir.DoubleType())
|
| 341 |
+
x = builder.sitofp(x, llvmlite.ir.DoubleType())
|
| 342 |
+
fsig = signature(types.float64, types.float64, types.float64)
|
| 343 |
+
res = hypot_float_impl(context, builder, fsig, (x, y))
|
| 344 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
# @lower(math.hypot, types.Float, types.Float)
|
| 348 |
+
def hypot_float_impl(context, builder, sig, args):
|
| 349 |
+
xty, yty = sig.args
|
| 350 |
+
assert xty == yty == sig.return_type
|
| 351 |
+
x, y = args
|
| 352 |
+
|
| 353 |
+
# Windows has alternate names for hypot/hypotf, see
|
| 354 |
+
# https://msdn.microsoft.com/fr-fr/library/a9yb3dbt%28v=vs.80%29.aspx
|
| 355 |
+
fname = {
|
| 356 |
+
types.float32: "_hypotf" if sys.platform == 'win32' else "hypotf",
|
| 357 |
+
types.float64: "_hypot" if sys.platform == 'win32' else "hypot",
|
| 358 |
+
}[xty]
|
| 359 |
+
plat_hypot = types.ExternalFunction(fname, sig)
|
| 360 |
+
|
| 361 |
+
if sys.platform == 'win32' and config.MACHINE_BITS == 32:
|
| 362 |
+
inf = xty(float('inf'))
|
| 363 |
+
|
| 364 |
+
def hypot_impl(x, y):
|
| 365 |
+
if math.isinf(x) or math.isinf(y):
|
| 366 |
+
return inf
|
| 367 |
+
return plat_hypot(x, y)
|
| 368 |
+
else:
|
| 369 |
+
def hypot_impl(x, y):
|
| 370 |
+
return plat_hypot(x, y)
|
| 371 |
+
|
| 372 |
+
res = context.compile_internal(builder, hypot_impl, sig, args)
|
| 373 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
# -----------------------------------------------------------------------------
|
| 377 |
+
|
| 378 |
+
# @lower(math.radians, types.Float)
|
| 379 |
+
def radians_float_impl(context, builder, sig, args):
|
| 380 |
+
[x] = args
|
| 381 |
+
coef = context.get_constant(sig.return_type, math.pi / 180)
|
| 382 |
+
res = builder.fmul(x, coef)
|
| 383 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 384 |
+
|
| 385 |
+
unary_math_int_impl(math.radians, radians_float_impl)
|
| 386 |
+
|
| 387 |
+
# -----------------------------------------------------------------------------
|
| 388 |
+
|
| 389 |
+
# @lower(math.degrees, types.Float)
|
| 390 |
+
def degrees_float_impl(context, builder, sig, args):
|
| 391 |
+
[x] = args
|
| 392 |
+
coef = context.get_constant(sig.return_type, 180 / math.pi)
|
| 393 |
+
res = builder.fmul(x, coef)
|
| 394 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 395 |
+
|
| 396 |
+
unary_math_int_impl(math.degrees, degrees_float_impl)
|
| 397 |
+
|
| 398 |
+
# -----------------------------------------------------------------------------
|
| 399 |
+
|
| 400 |
+
# @lower(math.pow, types.Float, types.Float)
|
| 401 |
+
# @lower(math.pow, types.Float, types.Integer)
|
| 402 |
+
def pow_impl(context, builder, sig, args):
|
| 403 |
+
impl = context.get_function(operator.pow, sig)
|
| 404 |
+
return impl(builder, args)
|
| 405 |
+
|
| 406 |
+
# -----------------------------------------------------------------------------
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def _unsigned(T):
|
| 410 |
+
"""Convert integer to unsigned integer of equivalent width."""
|
| 411 |
+
pass
|
| 412 |
+
|
| 413 |
+
@overload(_unsigned)
|
| 414 |
+
def _unsigned_impl(T):
|
| 415 |
+
if T in types.unsigned_domain:
|
| 416 |
+
return lambda T: T
|
| 417 |
+
elif T in types.signed_domain:
|
| 418 |
+
newT = getattr(types, 'uint{}'.format(T.bitwidth))
|
| 419 |
+
return lambda T: newT(T)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def gcd_impl(context, builder, sig, args):
|
| 423 |
+
xty, yty = sig.args
|
| 424 |
+
assert xty == yty == sig.return_type
|
| 425 |
+
x, y = args
|
| 426 |
+
|
| 427 |
+
def gcd(a, b):
|
| 428 |
+
"""
|
| 429 |
+
Stein's algorithm, heavily cribbed from Julia implementation.
|
| 430 |
+
"""
|
| 431 |
+
T = type(a)
|
| 432 |
+
if a == 0: return abs(b)
|
| 433 |
+
if b == 0: return abs(a)
|
| 434 |
+
za = trailing_zeros(a)
|
| 435 |
+
zb = trailing_zeros(b)
|
| 436 |
+
k = min(za, zb)
|
| 437 |
+
# Uses np.*_shift instead of operators due to return types
|
| 438 |
+
u = _unsigned(abs(np.right_shift(a, za)))
|
| 439 |
+
v = _unsigned(abs(np.right_shift(b, zb)))
|
| 440 |
+
while u != v:
|
| 441 |
+
if u > v:
|
| 442 |
+
u, v = v, u
|
| 443 |
+
v -= u
|
| 444 |
+
v = np.right_shift(v, trailing_zeros(v))
|
| 445 |
+
r = np.left_shift(T(u), k)
|
| 446 |
+
return r
|
| 447 |
+
|
| 448 |
+
res = context.compile_internal(builder, gcd, sig, args)
|
| 449 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
# lower(math.gcd, types.Integer, types.Integer)(gcd_impl)
|
deepseek/lib/python3.10/site-packages/numba/np/math/numbers.py
ADDED
|
@@ -0,0 +1,1390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numbers
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from llvmlite import ir
|
| 7 |
+
from llvmlite.ir import Constant
|
| 8 |
+
|
| 9 |
+
from numba.core.imputils import impl_ret_untracked
|
| 10 |
+
from numba.core import typing, types, errors, cgutils
|
| 11 |
+
from numba.cpython.unsafe.numbers import viewer
|
| 12 |
+
|
| 13 |
+
def _int_arith_flags(rettype):
|
| 14 |
+
"""
|
| 15 |
+
Return the modifier flags for integer arithmetic.
|
| 16 |
+
"""
|
| 17 |
+
if rettype.signed:
|
| 18 |
+
# Ignore the effects of signed overflow. This is important for
|
| 19 |
+
# optimization of some indexing operations. For example
|
| 20 |
+
# array[i+1] could see `i+1` trigger a signed overflow and
|
| 21 |
+
# give a negative number. With Python's indexing, a negative
|
| 22 |
+
# index is treated differently: its resolution has a runtime cost.
|
| 23 |
+
# Telling LLVM to ignore signed overflows allows it to optimize
|
| 24 |
+
# away the check for a negative `i+1` if it knows `i` is positive.
|
| 25 |
+
return ['nsw']
|
| 26 |
+
else:
|
| 27 |
+
return []
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def int_add_impl(context, builder, sig, args):
|
| 31 |
+
[va, vb] = args
|
| 32 |
+
[ta, tb] = sig.args
|
| 33 |
+
a = context.cast(builder, va, ta, sig.return_type)
|
| 34 |
+
b = context.cast(builder, vb, tb, sig.return_type)
|
| 35 |
+
res = builder.add(a, b, flags=_int_arith_flags(sig.return_type))
|
| 36 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def int_sub_impl(context, builder, sig, args):
|
| 40 |
+
[va, vb] = args
|
| 41 |
+
[ta, tb] = sig.args
|
| 42 |
+
a = context.cast(builder, va, ta, sig.return_type)
|
| 43 |
+
b = context.cast(builder, vb, tb, sig.return_type)
|
| 44 |
+
res = builder.sub(a, b, flags=_int_arith_flags(sig.return_type))
|
| 45 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def int_mul_impl(context, builder, sig, args):
|
| 49 |
+
[va, vb] = args
|
| 50 |
+
[ta, tb] = sig.args
|
| 51 |
+
a = context.cast(builder, va, ta, sig.return_type)
|
| 52 |
+
b = context.cast(builder, vb, tb, sig.return_type)
|
| 53 |
+
res = builder.mul(a, b, flags=_int_arith_flags(sig.return_type))
|
| 54 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def int_divmod_signed(context, builder, ty, x, y):
|
| 58 |
+
"""
|
| 59 |
+
Reference Objects/intobject.c
|
| 60 |
+
xdivy = x / y;
|
| 61 |
+
xmody = (long)(x - (unsigned long)xdivy * y);
|
| 62 |
+
/* If the signs of x and y differ, and the remainder is non-0,
|
| 63 |
+
* C89 doesn't define whether xdivy is now the floor or the
|
| 64 |
+
* ceiling of the infinitely precise quotient. We want the floor,
|
| 65 |
+
* and we have it iff the remainder's sign matches y's.
|
| 66 |
+
*/
|
| 67 |
+
if (xmody && ((y ^ xmody) < 0) /* i.e. and signs differ */) {
|
| 68 |
+
xmody += y;
|
| 69 |
+
--xdivy;
|
| 70 |
+
assert(xmody && ((y ^ xmody) >= 0));
|
| 71 |
+
}
|
| 72 |
+
*p_xdivy = xdivy;
|
| 73 |
+
*p_xmody = xmody;
|
| 74 |
+
"""
|
| 75 |
+
assert x.type == y.type
|
| 76 |
+
|
| 77 |
+
ZERO = y.type(0)
|
| 78 |
+
ONE = y.type(1)
|
| 79 |
+
|
| 80 |
+
# NOTE: On x86 at least, dividing the lowest representable integer
|
| 81 |
+
# (e.g. 0x80000000 for int32) by -1 causes a SIFGPE (division overflow),
|
| 82 |
+
# causing the process to crash.
|
| 83 |
+
# We return 0, 0 instead (more or less like Numpy).
|
| 84 |
+
|
| 85 |
+
resdiv = cgutils.alloca_once_value(builder, ZERO)
|
| 86 |
+
resmod = cgutils.alloca_once_value(builder, ZERO)
|
| 87 |
+
|
| 88 |
+
is_overflow = builder.and_(
|
| 89 |
+
builder.icmp_signed('==', x, x.type(ty.minval)),
|
| 90 |
+
builder.icmp_signed('==', y, y.type(-1)))
|
| 91 |
+
|
| 92 |
+
with builder.if_then(builder.not_(is_overflow), likely=True):
|
| 93 |
+
# Note LLVM will optimize this to a single divmod instruction,
|
| 94 |
+
# if available on the target CPU (e.g. x86).
|
| 95 |
+
xdivy = builder.sdiv(x, y)
|
| 96 |
+
xmody = builder.srem(x, y)
|
| 97 |
+
|
| 98 |
+
y_xor_xmody_ltz = builder.icmp_signed('<', builder.xor(y, xmody), ZERO)
|
| 99 |
+
xmody_istrue = builder.icmp_signed('!=', xmody, ZERO)
|
| 100 |
+
cond = builder.and_(xmody_istrue, y_xor_xmody_ltz)
|
| 101 |
+
|
| 102 |
+
with builder.if_else(cond) as (if_different_signs, if_same_signs):
|
| 103 |
+
with if_same_signs:
|
| 104 |
+
builder.store(xdivy, resdiv)
|
| 105 |
+
builder.store(xmody, resmod)
|
| 106 |
+
|
| 107 |
+
with if_different_signs:
|
| 108 |
+
builder.store(builder.sub(xdivy, ONE), resdiv)
|
| 109 |
+
builder.store(builder.add(xmody, y), resmod)
|
| 110 |
+
|
| 111 |
+
return builder.load(resdiv), builder.load(resmod)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def int_divmod(context, builder, ty, x, y):
|
| 115 |
+
"""
|
| 116 |
+
Integer divmod(x, y). The caller must ensure that y != 0.
|
| 117 |
+
"""
|
| 118 |
+
if ty.signed:
|
| 119 |
+
return int_divmod_signed(context, builder, ty, x, y)
|
| 120 |
+
else:
|
| 121 |
+
return builder.udiv(x, y), builder.urem(x, y)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def _int_divmod_impl(context, builder, sig, args, zerodiv_message):
|
| 125 |
+
va, vb = args
|
| 126 |
+
ta, tb = sig.args
|
| 127 |
+
|
| 128 |
+
ty = sig.return_type
|
| 129 |
+
if isinstance(ty, types.UniTuple):
|
| 130 |
+
ty = ty.dtype
|
| 131 |
+
a = context.cast(builder, va, ta, ty)
|
| 132 |
+
b = context.cast(builder, vb, tb, ty)
|
| 133 |
+
quot = cgutils.alloca_once(builder, a.type, name="quot")
|
| 134 |
+
rem = cgutils.alloca_once(builder, a.type, name="rem")
|
| 135 |
+
|
| 136 |
+
with builder.if_else(cgutils.is_scalar_zero(builder, b), likely=False
|
| 137 |
+
) as (if_zero, if_non_zero):
|
| 138 |
+
with if_zero:
|
| 139 |
+
if not context.error_model.fp_zero_division(
|
| 140 |
+
builder, (zerodiv_message,)):
|
| 141 |
+
# No exception raised => return 0
|
| 142 |
+
# XXX We should also set the FPU exception status, but
|
| 143 |
+
# there's no easy way to do that from LLVM.
|
| 144 |
+
builder.store(b, quot)
|
| 145 |
+
builder.store(b, rem)
|
| 146 |
+
with if_non_zero:
|
| 147 |
+
q, r = int_divmod(context, builder, ty, a, b)
|
| 148 |
+
builder.store(q, quot)
|
| 149 |
+
builder.store(r, rem)
|
| 150 |
+
|
| 151 |
+
return quot, rem
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# @lower_builtin(divmod, types.Integer, types.Integer)
|
| 155 |
+
def int_divmod_impl(context, builder, sig, args):
|
| 156 |
+
quot, rem = _int_divmod_impl(context, builder, sig, args,
|
| 157 |
+
"integer divmod by zero")
|
| 158 |
+
|
| 159 |
+
return cgutils.pack_array(builder,
|
| 160 |
+
(builder.load(quot), builder.load(rem)))
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# @lower_builtin(operator.floordiv, types.Integer, types.Integer)
|
| 164 |
+
# @lower_builtin(operator.ifloordiv, types.Integer, types.Integer)
|
| 165 |
+
def int_floordiv_impl(context, builder, sig, args):
|
| 166 |
+
quot, rem = _int_divmod_impl(context, builder, sig, args,
|
| 167 |
+
"integer division by zero")
|
| 168 |
+
return builder.load(quot)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# @lower_builtin(operator.truediv, types.Integer, types.Integer)
|
| 172 |
+
# @lower_builtin(operator.itruediv, types.Integer, types.Integer)
|
| 173 |
+
def int_truediv_impl(context, builder, sig, args):
|
| 174 |
+
[va, vb] = args
|
| 175 |
+
[ta, tb] = sig.args
|
| 176 |
+
a = context.cast(builder, va, ta, sig.return_type)
|
| 177 |
+
b = context.cast(builder, vb, tb, sig.return_type)
|
| 178 |
+
with cgutils.if_zero(builder, b):
|
| 179 |
+
context.error_model.fp_zero_division(builder, ("division by zero",))
|
| 180 |
+
res = builder.fdiv(a, b)
|
| 181 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
# @lower_builtin(operator.mod, types.Integer, types.Integer)
|
| 185 |
+
# @lower_builtin(operator.imod, types.Integer, types.Integer)
|
| 186 |
+
def int_rem_impl(context, builder, sig, args):
|
| 187 |
+
quot, rem = _int_divmod_impl(context, builder, sig, args,
|
| 188 |
+
"integer modulo by zero")
|
| 189 |
+
return builder.load(rem)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _get_power_zerodiv_return(context, return_type):
|
| 193 |
+
if (isinstance(return_type, types.Integer)
|
| 194 |
+
and not context.error_model.raise_on_fp_zero_division):
|
| 195 |
+
# If not raising, return 0x8000... when computing 0 ** <negative number>
|
| 196 |
+
return -1 << (return_type.bitwidth - 1)
|
| 197 |
+
else:
|
| 198 |
+
return False
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def int_power_impl(context, builder, sig, args):
|
| 202 |
+
"""
|
| 203 |
+
a ^ b, where a is an integer or real, and b an integer
|
| 204 |
+
"""
|
| 205 |
+
is_integer = isinstance(sig.args[0], types.Integer)
|
| 206 |
+
tp = sig.return_type
|
| 207 |
+
zerodiv_return = _get_power_zerodiv_return(context, tp)
|
| 208 |
+
|
| 209 |
+
def int_power(a, b):
|
| 210 |
+
# Ensure computations are done with a large enough width
|
| 211 |
+
r = tp(1)
|
| 212 |
+
a = tp(a)
|
| 213 |
+
if b < 0:
|
| 214 |
+
invert = True
|
| 215 |
+
exp = -b
|
| 216 |
+
if exp < 0:
|
| 217 |
+
raise OverflowError
|
| 218 |
+
if is_integer:
|
| 219 |
+
if a == 0:
|
| 220 |
+
if zerodiv_return:
|
| 221 |
+
return zerodiv_return
|
| 222 |
+
else:
|
| 223 |
+
raise ZeroDivisionError("0 cannot be raised to a negative power")
|
| 224 |
+
if a != 1 and a != -1:
|
| 225 |
+
return 0
|
| 226 |
+
else:
|
| 227 |
+
invert = False
|
| 228 |
+
exp = b
|
| 229 |
+
if exp > 0x10000:
|
| 230 |
+
# Optimization cutoff: fallback on the generic algorithm
|
| 231 |
+
return math.pow(a, float(b))
|
| 232 |
+
while exp != 0:
|
| 233 |
+
if exp & 1:
|
| 234 |
+
r *= a
|
| 235 |
+
exp >>= 1
|
| 236 |
+
a *= a
|
| 237 |
+
|
| 238 |
+
return 1.0 / r if invert else r
|
| 239 |
+
|
| 240 |
+
res = context.compile_internal(builder, int_power, sig, args)
|
| 241 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# @lower_builtin(operator.pow, types.Integer, types.IntegerLiteral)
|
| 245 |
+
# @lower_builtin(operator.ipow, types.Integer, types.IntegerLiteral)
|
| 246 |
+
# @lower_builtin(operator.pow, types.Float, types.IntegerLiteral)
|
| 247 |
+
# @lower_builtin(operator.ipow, types.Float, types.IntegerLiteral)
|
| 248 |
+
def static_power_impl(context, builder, sig, args):
|
| 249 |
+
"""
|
| 250 |
+
a ^ b, where a is an integer or real, and b a constant integer
|
| 251 |
+
"""
|
| 252 |
+
exp = sig.args[1].value
|
| 253 |
+
if not isinstance(exp, numbers.Integral):
|
| 254 |
+
raise NotImplementedError
|
| 255 |
+
if abs(exp) > 0x10000:
|
| 256 |
+
# Optimization cutoff: fallback on the generic algorithm above
|
| 257 |
+
raise NotImplementedError
|
| 258 |
+
invert = exp < 0
|
| 259 |
+
exp = abs(exp)
|
| 260 |
+
|
| 261 |
+
tp = sig.return_type
|
| 262 |
+
is_integer = isinstance(tp, types.Integer)
|
| 263 |
+
zerodiv_return = _get_power_zerodiv_return(context, tp)
|
| 264 |
+
|
| 265 |
+
val = context.cast(builder, args[0], sig.args[0], tp)
|
| 266 |
+
lty = val.type
|
| 267 |
+
|
| 268 |
+
def mul(a, b):
|
| 269 |
+
if is_integer:
|
| 270 |
+
return builder.mul(a, b)
|
| 271 |
+
else:
|
| 272 |
+
return builder.fmul(a, b)
|
| 273 |
+
|
| 274 |
+
# Unroll the exponentiation loop
|
| 275 |
+
res = lty(1)
|
| 276 |
+
a = val
|
| 277 |
+
while exp != 0:
|
| 278 |
+
if exp & 1:
|
| 279 |
+
res = mul(res, val)
|
| 280 |
+
exp >>= 1
|
| 281 |
+
val = mul(val, val)
|
| 282 |
+
|
| 283 |
+
if invert:
|
| 284 |
+
# If the exponent was negative, fix the result by inverting it
|
| 285 |
+
if is_integer:
|
| 286 |
+
# Integer inversion
|
| 287 |
+
def invert_impl(a):
|
| 288 |
+
if a == 0:
|
| 289 |
+
if zerodiv_return:
|
| 290 |
+
return zerodiv_return
|
| 291 |
+
else:
|
| 292 |
+
raise ZeroDivisionError("0 cannot be raised to a negative power")
|
| 293 |
+
if a != 1 and a != -1:
|
| 294 |
+
return 0
|
| 295 |
+
else:
|
| 296 |
+
return a
|
| 297 |
+
|
| 298 |
+
else:
|
| 299 |
+
# Real inversion
|
| 300 |
+
def invert_impl(a):
|
| 301 |
+
return 1.0 / a
|
| 302 |
+
|
| 303 |
+
res = context.compile_internal(builder, invert_impl,
|
| 304 |
+
typing.signature(tp, tp), (res,))
|
| 305 |
+
|
| 306 |
+
return res
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def int_slt_impl(context, builder, sig, args):
|
| 310 |
+
res = builder.icmp_signed('<', *args)
|
| 311 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def int_sle_impl(context, builder, sig, args):
|
| 315 |
+
res = builder.icmp_signed('<=', *args)
|
| 316 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def int_sgt_impl(context, builder, sig, args):
|
| 320 |
+
res = builder.icmp_signed('>', *args)
|
| 321 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def int_sge_impl(context, builder, sig, args):
|
| 325 |
+
res = builder.icmp_signed('>=', *args)
|
| 326 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def int_ult_impl(context, builder, sig, args):
|
| 330 |
+
res = builder.icmp_unsigned('<', *args)
|
| 331 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def int_ule_impl(context, builder, sig, args):
|
| 335 |
+
res = builder.icmp_unsigned('<=', *args)
|
| 336 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def int_ugt_impl(context, builder, sig, args):
|
| 340 |
+
res = builder.icmp_unsigned('>', *args)
|
| 341 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def int_uge_impl(context, builder, sig, args):
|
| 345 |
+
res = builder.icmp_unsigned('>=', *args)
|
| 346 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def int_eq_impl(context, builder, sig, args):
|
| 350 |
+
res = builder.icmp_unsigned('==', *args)
|
| 351 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def int_ne_impl(context, builder, sig, args):
|
| 355 |
+
res = builder.icmp_unsigned('!=', *args)
|
| 356 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def int_signed_unsigned_cmp(op):
|
| 360 |
+
def impl(context, builder, sig, args):
|
| 361 |
+
(left, right) = args
|
| 362 |
+
# This code is translated from the NumPy source.
|
| 363 |
+
# What we're going to do is divide the range of a signed value at zero.
|
| 364 |
+
# If the signed value is less than zero, then we can treat zero as the
|
| 365 |
+
# unsigned value since the unsigned value is necessarily zero or larger
|
| 366 |
+
# and any signed comparison between a negative value and zero/infinity
|
| 367 |
+
# will yield the same result. If the signed value is greater than or
|
| 368 |
+
# equal to zero, then we can safely cast it to an unsigned value and do
|
| 369 |
+
# the expected unsigned-unsigned comparison operation.
|
| 370 |
+
# Original: https://github.com/numpy/numpy/pull/23713
|
| 371 |
+
cmp_zero = builder.icmp_signed('<', left, Constant(left.type, 0))
|
| 372 |
+
lt_zero = builder.icmp_signed(op, left, Constant(left.type, 0))
|
| 373 |
+
ge_zero = builder.icmp_unsigned(op, left, right)
|
| 374 |
+
res = builder.select(cmp_zero, lt_zero, ge_zero)
|
| 375 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 376 |
+
return impl
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def int_unsigned_signed_cmp(op):
|
| 380 |
+
def impl(context, builder, sig, args):
|
| 381 |
+
(left, right) = args
|
| 382 |
+
# See the function `int_signed_unsigned_cmp` for implementation notes.
|
| 383 |
+
cmp_zero = builder.icmp_signed('<', right, Constant(right.type, 0))
|
| 384 |
+
lt_zero = builder.icmp_signed(op, Constant(right.type, 0), right)
|
| 385 |
+
ge_zero = builder.icmp_unsigned(op, left, right)
|
| 386 |
+
res = builder.select(cmp_zero, lt_zero, ge_zero)
|
| 387 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 388 |
+
return impl
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def int_abs_impl(context, builder, sig, args):
|
| 392 |
+
[x] = args
|
| 393 |
+
ZERO = Constant(x.type, None)
|
| 394 |
+
ltz = builder.icmp_signed('<', x, ZERO)
|
| 395 |
+
negated = builder.neg(x)
|
| 396 |
+
res = builder.select(ltz, negated, x)
|
| 397 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def uint_abs_impl(context, builder, sig, args):
|
| 401 |
+
[x] = args
|
| 402 |
+
return impl_ret_untracked(context, builder, sig.return_type, x)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def int_shl_impl(context, builder, sig, args):
|
| 406 |
+
[valty, amtty] = sig.args
|
| 407 |
+
[val, amt] = args
|
| 408 |
+
val = context.cast(builder, val, valty, sig.return_type)
|
| 409 |
+
amt = context.cast(builder, amt, amtty, sig.return_type)
|
| 410 |
+
res = builder.shl(val, amt)
|
| 411 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def int_shr_impl(context, builder, sig, args):
|
| 415 |
+
[valty, amtty] = sig.args
|
| 416 |
+
[val, amt] = args
|
| 417 |
+
val = context.cast(builder, val, valty, sig.return_type)
|
| 418 |
+
amt = context.cast(builder, amt, amtty, sig.return_type)
|
| 419 |
+
if sig.return_type.signed:
|
| 420 |
+
res = builder.ashr(val, amt)
|
| 421 |
+
else:
|
| 422 |
+
res = builder.lshr(val, amt)
|
| 423 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def int_and_impl(context, builder, sig, args):
|
| 427 |
+
[at, bt] = sig.args
|
| 428 |
+
[av, bv] = args
|
| 429 |
+
cav = context.cast(builder, av, at, sig.return_type)
|
| 430 |
+
cbc = context.cast(builder, bv, bt, sig.return_type)
|
| 431 |
+
res = builder.and_(cav, cbc)
|
| 432 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def int_or_impl(context, builder, sig, args):
|
| 436 |
+
[at, bt] = sig.args
|
| 437 |
+
[av, bv] = args
|
| 438 |
+
cav = context.cast(builder, av, at, sig.return_type)
|
| 439 |
+
cbc = context.cast(builder, bv, bt, sig.return_type)
|
| 440 |
+
res = builder.or_(cav, cbc)
|
| 441 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def int_xor_impl(context, builder, sig, args):
|
| 445 |
+
[at, bt] = sig.args
|
| 446 |
+
[av, bv] = args
|
| 447 |
+
cav = context.cast(builder, av, at, sig.return_type)
|
| 448 |
+
cbc = context.cast(builder, bv, bt, sig.return_type)
|
| 449 |
+
res = builder.xor(cav, cbc)
|
| 450 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def int_negate_impl(context, builder, sig, args):
|
| 454 |
+
[typ] = sig.args
|
| 455 |
+
[val] = args
|
| 456 |
+
# Negate before upcasting, for unsigned numbers
|
| 457 |
+
res = builder.neg(val)
|
| 458 |
+
res = context.cast(builder, res, typ, sig.return_type)
|
| 459 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
def int_positive_impl(context, builder, sig, args):
|
| 463 |
+
[typ] = sig.args
|
| 464 |
+
[val] = args
|
| 465 |
+
res = context.cast(builder, val, typ, sig.return_type)
|
| 466 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def int_invert_impl(context, builder, sig, args):
|
| 470 |
+
[typ] = sig.args
|
| 471 |
+
[val] = args
|
| 472 |
+
# Invert before upcasting, for unsigned numbers
|
| 473 |
+
res = builder.xor(val, Constant(val.type, int('1' * val.type.width, 2)))
|
| 474 |
+
res = context.cast(builder, res, typ, sig.return_type)
|
| 475 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def int_sign_impl(context, builder, sig, args):
|
| 479 |
+
"""
|
| 480 |
+
np.sign(int)
|
| 481 |
+
"""
|
| 482 |
+
[x] = args
|
| 483 |
+
POS = Constant(x.type, 1)
|
| 484 |
+
NEG = Constant(x.type, -1)
|
| 485 |
+
ZERO = Constant(x.type, 0)
|
| 486 |
+
|
| 487 |
+
cmp_zero = builder.icmp_unsigned('==', x, ZERO)
|
| 488 |
+
cmp_pos = builder.icmp_signed('>', x, ZERO)
|
| 489 |
+
|
| 490 |
+
presult = cgutils.alloca_once(builder, x.type)
|
| 491 |
+
|
| 492 |
+
bb_zero = builder.append_basic_block(".zero")
|
| 493 |
+
bb_postest = builder.append_basic_block(".postest")
|
| 494 |
+
bb_pos = builder.append_basic_block(".pos")
|
| 495 |
+
bb_neg = builder.append_basic_block(".neg")
|
| 496 |
+
bb_exit = builder.append_basic_block(".exit")
|
| 497 |
+
|
| 498 |
+
builder.cbranch(cmp_zero, bb_zero, bb_postest)
|
| 499 |
+
|
| 500 |
+
with builder.goto_block(bb_zero):
|
| 501 |
+
builder.store(ZERO, presult)
|
| 502 |
+
builder.branch(bb_exit)
|
| 503 |
+
|
| 504 |
+
with builder.goto_block(bb_postest):
|
| 505 |
+
builder.cbranch(cmp_pos, bb_pos, bb_neg)
|
| 506 |
+
|
| 507 |
+
with builder.goto_block(bb_pos):
|
| 508 |
+
builder.store(POS, presult)
|
| 509 |
+
builder.branch(bb_exit)
|
| 510 |
+
|
| 511 |
+
with builder.goto_block(bb_neg):
|
| 512 |
+
builder.store(NEG, presult)
|
| 513 |
+
builder.branch(bb_exit)
|
| 514 |
+
|
| 515 |
+
builder.position_at_end(bb_exit)
|
| 516 |
+
res = builder.load(presult)
|
| 517 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def bool_negate_impl(context, builder, sig, args):
|
| 521 |
+
[typ] = sig.args
|
| 522 |
+
[val] = args
|
| 523 |
+
res = context.cast(builder, val, typ, sig.return_type)
|
| 524 |
+
res = builder.neg(res)
|
| 525 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def bool_unary_positive_impl(context, builder, sig, args):
|
| 529 |
+
[typ] = sig.args
|
| 530 |
+
[val] = args
|
| 531 |
+
res = context.cast(builder, val, typ, sig.return_type)
|
| 532 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
# lower_builtin(operator.eq, types.boolean, types.boolean)(int_eq_impl)
|
| 536 |
+
# lower_builtin(operator.ne, types.boolean, types.boolean)(int_ne_impl)
|
| 537 |
+
# lower_builtin(operator.lt, types.boolean, types.boolean)(int_ult_impl)
|
| 538 |
+
# lower_builtin(operator.le, types.boolean, types.boolean)(int_ule_impl)
|
| 539 |
+
# lower_builtin(operator.gt, types.boolean, types.boolean)(int_ugt_impl)
|
| 540 |
+
# lower_builtin(operator.ge, types.boolean, types.boolean)(int_uge_impl)
|
| 541 |
+
# lower_builtin(operator.neg, types.boolean)(bool_negate_impl)
|
| 542 |
+
# lower_builtin(operator.pos, types.boolean)(bool_unary_positive_impl)
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
# def _implement_integer_operators():
|
| 546 |
+
# ty = types.Integer
|
| 547 |
+
|
| 548 |
+
# lower_builtin(operator.add, ty, ty)(int_add_impl)
|
| 549 |
+
# lower_builtin(operator.iadd, ty, ty)(int_add_impl)
|
| 550 |
+
# lower_builtin(operator.sub, ty, ty)(int_sub_impl)
|
| 551 |
+
# lower_builtin(operator.isub, ty, ty)(int_sub_impl)
|
| 552 |
+
# lower_builtin(operator.mul, ty, ty)(int_mul_impl)
|
| 553 |
+
# lower_builtin(operator.imul, ty, ty)(int_mul_impl)
|
| 554 |
+
# lower_builtin(operator.eq, ty, ty)(int_eq_impl)
|
| 555 |
+
# lower_builtin(operator.ne, ty, ty)(int_ne_impl)
|
| 556 |
+
|
| 557 |
+
# lower_builtin(operator.lshift, ty, ty)(int_shl_impl)
|
| 558 |
+
# lower_builtin(operator.ilshift, ty, ty)(int_shl_impl)
|
| 559 |
+
# lower_builtin(operator.rshift, ty, ty)(int_shr_impl)
|
| 560 |
+
# lower_builtin(operator.irshift, ty, ty)(int_shr_impl)
|
| 561 |
+
|
| 562 |
+
# lower_builtin(operator.neg, ty)(int_negate_impl)
|
| 563 |
+
# lower_builtin(operator.pos, ty)(int_positive_impl)
|
| 564 |
+
|
| 565 |
+
# lower_builtin(operator.pow, ty, ty)(int_power_impl)
|
| 566 |
+
# lower_builtin(operator.ipow, ty, ty)(int_power_impl)
|
| 567 |
+
# lower_builtin(pow, ty, ty)(int_power_impl)
|
| 568 |
+
|
| 569 |
+
# for ty in types.unsigned_domain:
|
| 570 |
+
# lower_builtin(operator.lt, ty, ty)(int_ult_impl)
|
| 571 |
+
# lower_builtin(operator.le, ty, ty)(int_ule_impl)
|
| 572 |
+
# lower_builtin(operator.gt, ty, ty)(int_ugt_impl)
|
| 573 |
+
# lower_builtin(operator.ge, ty, ty)(int_uge_impl)
|
| 574 |
+
# lower_builtin(operator.pow, types.Float, ty)(int_power_impl)
|
| 575 |
+
# lower_builtin(operator.ipow, types.Float, ty)(int_power_impl)
|
| 576 |
+
# lower_builtin(pow, types.Float, ty)(int_power_impl)
|
| 577 |
+
# lower_builtin(abs, ty)(uint_abs_impl)
|
| 578 |
+
|
| 579 |
+
# lower_builtin(operator.lt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
|
| 580 |
+
# lower_builtin(operator.gt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
|
| 581 |
+
# lower_builtin(operator.le, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
|
| 582 |
+
# lower_builtin(operator.ge, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
|
| 583 |
+
# for ty in types.signed_domain:
|
| 584 |
+
# lower_builtin(operator.lt, ty, ty)(int_slt_impl)
|
| 585 |
+
# lower_builtin(operator.le, ty, ty)(int_sle_impl)
|
| 586 |
+
# lower_builtin(operator.gt, ty, ty)(int_sgt_impl)
|
| 587 |
+
# lower_builtin(operator.ge, ty, ty)(int_sge_impl)
|
| 588 |
+
# lower_builtin(operator.pow, types.Float, ty)(int_power_impl)
|
| 589 |
+
# lower_builtin(operator.ipow, types.Float, ty)(int_power_impl)
|
| 590 |
+
# lower_builtin(pow, types.Float, ty)(int_power_impl)
|
| 591 |
+
# lower_builtin(abs, ty)(int_abs_impl)
|
| 592 |
+
|
| 593 |
+
# def _implement_bitwise_operators():
|
| 594 |
+
# for ty in (types.Boolean, types.Integer):
|
| 595 |
+
# lower_builtin(operator.and_, ty, ty)(int_and_impl)
|
| 596 |
+
# lower_builtin(operator.iand, ty, ty)(int_and_impl)
|
| 597 |
+
# lower_builtin(operator.or_, ty, ty)(int_or_impl)
|
| 598 |
+
# lower_builtin(operator.ior, ty, ty)(int_or_impl)
|
| 599 |
+
# lower_builtin(operator.xor, ty, ty)(int_xor_impl)
|
| 600 |
+
# lower_builtin(operator.ixor, ty, ty)(int_xor_impl)
|
| 601 |
+
|
| 602 |
+
# lower_builtin(operator.invert, ty)(int_invert_impl)
|
| 603 |
+
|
| 604 |
+
# _implement_integer_operators()
|
| 605 |
+
|
| 606 |
+
# _implement_bitwise_operators()
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
def real_add_impl(context, builder, sig, args):
|
| 610 |
+
res = builder.fadd(*args)
|
| 611 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def real_sub_impl(context, builder, sig, args):
|
| 615 |
+
res = builder.fsub(*args)
|
| 616 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
def real_mul_impl(context, builder, sig, args):
|
| 620 |
+
res = builder.fmul(*args)
|
| 621 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def real_div_impl(context, builder, sig, args):
|
| 625 |
+
with cgutils.if_zero(builder, args[1]):
|
| 626 |
+
context.error_model.fp_zero_division(builder, ("division by zero",))
|
| 627 |
+
res = builder.fdiv(*args)
|
| 628 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def real_divmod(context, builder, x, y):
|
| 632 |
+
assert x.type == y.type
|
| 633 |
+
floatty = x.type
|
| 634 |
+
|
| 635 |
+
module = builder.module
|
| 636 |
+
fname = context.mangler(".numba.python.rem", [x.type])
|
| 637 |
+
fnty = ir.FunctionType(floatty, (floatty, floatty, ir.PointerType(floatty)))
|
| 638 |
+
fn = cgutils.get_or_insert_function(module, fnty, fname)
|
| 639 |
+
|
| 640 |
+
if fn.is_declaration:
|
| 641 |
+
fn.linkage = 'linkonce_odr'
|
| 642 |
+
fnbuilder = ir.IRBuilder(fn.append_basic_block('entry'))
|
| 643 |
+
fx, fy, pmod = fn.args
|
| 644 |
+
div, mod = real_divmod_func_body(context, fnbuilder, fx, fy)
|
| 645 |
+
fnbuilder.store(mod, pmod)
|
| 646 |
+
fnbuilder.ret(div)
|
| 647 |
+
|
| 648 |
+
pmod = cgutils.alloca_once(builder, floatty)
|
| 649 |
+
quotient = builder.call(fn, (x, y, pmod))
|
| 650 |
+
return quotient, builder.load(pmod)
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def real_divmod_func_body(context, builder, vx, wx):
|
| 654 |
+
# Reference Objects/floatobject.c
|
| 655 |
+
#
|
| 656 |
+
# float_divmod(PyObject *v, PyObject *w)
|
| 657 |
+
# {
|
| 658 |
+
# double vx, wx;
|
| 659 |
+
# double div, mod, floordiv;
|
| 660 |
+
# CONVERT_TO_DOUBLE(v, vx);
|
| 661 |
+
# CONVERT_TO_DOUBLE(w, wx);
|
| 662 |
+
# mod = fmod(vx, wx);
|
| 663 |
+
# /* fmod is typically exact, so vx-mod is *mathematically* an
|
| 664 |
+
# exact multiple of wx. But this is fp arithmetic, and fp
|
| 665 |
+
# vx - mod is an approximation; the result is that div may
|
| 666 |
+
# not be an exact integral value after the division, although
|
| 667 |
+
# it will always be very close to one.
|
| 668 |
+
# */
|
| 669 |
+
# div = (vx - mod) / wx;
|
| 670 |
+
# if (mod) {
|
| 671 |
+
# /* ensure the remainder has the same sign as the denominator */
|
| 672 |
+
# if ((wx < 0) != (mod < 0)) {
|
| 673 |
+
# mod += wx;
|
| 674 |
+
# div -= 1.0;
|
| 675 |
+
# }
|
| 676 |
+
# }
|
| 677 |
+
# else {
|
| 678 |
+
# /* the remainder is zero, and in the presence of signed zeroes
|
| 679 |
+
# fmod returns different results across platforms; ensure
|
| 680 |
+
# it has the same sign as the denominator; we'd like to do
|
| 681 |
+
# "mod = wx * 0.0", but that may get optimized away */
|
| 682 |
+
# mod *= mod; /* hide "mod = +0" from optimizer */
|
| 683 |
+
# if (wx < 0.0)
|
| 684 |
+
# mod = -mod;
|
| 685 |
+
# }
|
| 686 |
+
# /* snap quotient to nearest integral value */
|
| 687 |
+
# if (div) {
|
| 688 |
+
# floordiv = floor(div);
|
| 689 |
+
# if (div - floordiv > 0.5)
|
| 690 |
+
# floordiv += 1.0;
|
| 691 |
+
# }
|
| 692 |
+
# else {
|
| 693 |
+
# /* div is zero - get the same sign as the true quotient */
|
| 694 |
+
# div *= div; /* hide "div = +0" from optimizers */
|
| 695 |
+
# floordiv = div * vx / wx; /* zero w/ sign of vx/wx */
|
| 696 |
+
# }
|
| 697 |
+
# return Py_BuildValue("(dd)", floordiv, mod);
|
| 698 |
+
# }
|
| 699 |
+
pmod = cgutils.alloca_once(builder, vx.type)
|
| 700 |
+
pdiv = cgutils.alloca_once(builder, vx.type)
|
| 701 |
+
pfloordiv = cgutils.alloca_once(builder, vx.type)
|
| 702 |
+
|
| 703 |
+
mod = builder.frem(vx, wx)
|
| 704 |
+
div = builder.fdiv(builder.fsub(vx, mod), wx)
|
| 705 |
+
|
| 706 |
+
builder.store(mod, pmod)
|
| 707 |
+
builder.store(div, pdiv)
|
| 708 |
+
|
| 709 |
+
# Note the use of negative zero for proper negating with `ZERO - x`
|
| 710 |
+
ZERO = vx.type(0.0)
|
| 711 |
+
NZERO = vx.type(-0.0)
|
| 712 |
+
ONE = vx.type(1.0)
|
| 713 |
+
mod_istrue = builder.fcmp_unordered('!=', mod, ZERO)
|
| 714 |
+
wx_ltz = builder.fcmp_ordered('<', wx, ZERO)
|
| 715 |
+
mod_ltz = builder.fcmp_ordered('<', mod, ZERO)
|
| 716 |
+
|
| 717 |
+
with builder.if_else(mod_istrue, likely=True) as (if_nonzero_mod, if_zero_mod):
|
| 718 |
+
with if_nonzero_mod:
|
| 719 |
+
# `mod` is non-zero or NaN
|
| 720 |
+
# Ensure the remainder has the same sign as the denominator
|
| 721 |
+
wx_ltz_ne_mod_ltz = builder.icmp_unsigned('!=', wx_ltz, mod_ltz)
|
| 722 |
+
|
| 723 |
+
with builder.if_then(wx_ltz_ne_mod_ltz):
|
| 724 |
+
builder.store(builder.fsub(div, ONE), pdiv)
|
| 725 |
+
builder.store(builder.fadd(mod, wx), pmod)
|
| 726 |
+
|
| 727 |
+
with if_zero_mod:
|
| 728 |
+
# `mod` is zero, select the proper sign depending on
|
| 729 |
+
# the denominator's sign
|
| 730 |
+
mod = builder.select(wx_ltz, NZERO, ZERO)
|
| 731 |
+
builder.store(mod, pmod)
|
| 732 |
+
|
| 733 |
+
del mod, div
|
| 734 |
+
|
| 735 |
+
div = builder.load(pdiv)
|
| 736 |
+
div_istrue = builder.fcmp_ordered('!=', div, ZERO)
|
| 737 |
+
|
| 738 |
+
with builder.if_then(div_istrue):
|
| 739 |
+
realtypemap = {'float': types.float32,
|
| 740 |
+
'double': types.float64}
|
| 741 |
+
realtype = realtypemap[str(wx.type)]
|
| 742 |
+
floorfn = context.get_function(math.floor,
|
| 743 |
+
typing.signature(realtype, realtype))
|
| 744 |
+
floordiv = floorfn(builder, [div])
|
| 745 |
+
floordivdiff = builder.fsub(div, floordiv)
|
| 746 |
+
floordivincr = builder.fadd(floordiv, ONE)
|
| 747 |
+
HALF = Constant(wx.type, 0.5)
|
| 748 |
+
pred = builder.fcmp_ordered('>', floordivdiff, HALF)
|
| 749 |
+
floordiv = builder.select(pred, floordivincr, floordiv)
|
| 750 |
+
builder.store(floordiv, pfloordiv)
|
| 751 |
+
|
| 752 |
+
with cgutils.ifnot(builder, div_istrue):
|
| 753 |
+
div = builder.fmul(div, div)
|
| 754 |
+
builder.store(div, pdiv)
|
| 755 |
+
floordiv = builder.fdiv(builder.fmul(div, vx), wx)
|
| 756 |
+
builder.store(floordiv, pfloordiv)
|
| 757 |
+
|
| 758 |
+
return builder.load(pfloordiv), builder.load(pmod)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
# @lower_builtin(divmod, types.Float, types.Float)
|
| 762 |
+
def real_divmod_impl(context, builder, sig, args, loc=None):
|
| 763 |
+
x, y = args
|
| 764 |
+
quot = cgutils.alloca_once(builder, x.type, name="quot")
|
| 765 |
+
rem = cgutils.alloca_once(builder, x.type, name="rem")
|
| 766 |
+
|
| 767 |
+
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
|
| 768 |
+
) as (if_zero, if_non_zero):
|
| 769 |
+
with if_zero:
|
| 770 |
+
if not context.error_model.fp_zero_division(
|
| 771 |
+
builder, ("modulo by zero",), loc):
|
| 772 |
+
# No exception raised => compute the nan result,
|
| 773 |
+
# and set the FP exception word for Numpy warnings.
|
| 774 |
+
q = builder.fdiv(x, y)
|
| 775 |
+
r = builder.frem(x, y)
|
| 776 |
+
builder.store(q, quot)
|
| 777 |
+
builder.store(r, rem)
|
| 778 |
+
with if_non_zero:
|
| 779 |
+
q, r = real_divmod(context, builder, x, y)
|
| 780 |
+
builder.store(q, quot)
|
| 781 |
+
builder.store(r, rem)
|
| 782 |
+
|
| 783 |
+
return cgutils.pack_array(builder,
|
| 784 |
+
(builder.load(quot), builder.load(rem)))
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
def real_mod_impl(context, builder, sig, args, loc=None):
|
| 788 |
+
x, y = args
|
| 789 |
+
res = cgutils.alloca_once(builder, x.type)
|
| 790 |
+
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
|
| 791 |
+
) as (if_zero, if_non_zero):
|
| 792 |
+
with if_zero:
|
| 793 |
+
if not context.error_model.fp_zero_division(
|
| 794 |
+
builder, ("modulo by zero",), loc):
|
| 795 |
+
# No exception raised => compute the nan result,
|
| 796 |
+
# and set the FP exception word for Numpy warnings.
|
| 797 |
+
rem = builder.frem(x, y)
|
| 798 |
+
builder.store(rem, res)
|
| 799 |
+
with if_non_zero:
|
| 800 |
+
_, rem = real_divmod(context, builder, x, y)
|
| 801 |
+
builder.store(rem, res)
|
| 802 |
+
return impl_ret_untracked(context, builder, sig.return_type,
|
| 803 |
+
builder.load(res))
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
def real_floordiv_impl(context, builder, sig, args, loc=None):
|
| 807 |
+
x, y = args
|
| 808 |
+
res = cgutils.alloca_once(builder, x.type)
|
| 809 |
+
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
|
| 810 |
+
) as (if_zero, if_non_zero):
|
| 811 |
+
with if_zero:
|
| 812 |
+
if not context.error_model.fp_zero_division(
|
| 813 |
+
builder, ("division by zero",), loc):
|
| 814 |
+
# No exception raised => compute the +/-inf or nan result,
|
| 815 |
+
# and set the FP exception word for Numpy warnings.
|
| 816 |
+
quot = builder.fdiv(x, y)
|
| 817 |
+
builder.store(quot, res)
|
| 818 |
+
with if_non_zero:
|
| 819 |
+
quot, _ = real_divmod(context, builder, x, y)
|
| 820 |
+
builder.store(quot, res)
|
| 821 |
+
return impl_ret_untracked(context, builder, sig.return_type,
|
| 822 |
+
builder.load(res))
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
def real_power_impl(context, builder, sig, args):
|
| 826 |
+
x, y = args
|
| 827 |
+
module = builder.module
|
| 828 |
+
if context.implement_powi_as_math_call:
|
| 829 |
+
imp = context.get_function(math.pow, sig)
|
| 830 |
+
res = imp(builder, args)
|
| 831 |
+
else:
|
| 832 |
+
fn = module.declare_intrinsic('llvm.pow', [y.type])
|
| 833 |
+
res = builder.call(fn, (x, y))
|
| 834 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
def real_lt_impl(context, builder, sig, args):
|
| 838 |
+
res = builder.fcmp_ordered('<', *args)
|
| 839 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
def real_le_impl(context, builder, sig, args):
|
| 843 |
+
res = builder.fcmp_ordered('<=', *args)
|
| 844 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def real_gt_impl(context, builder, sig, args):
|
| 848 |
+
res = builder.fcmp_ordered('>', *args)
|
| 849 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
def real_ge_impl(context, builder, sig, args):
|
| 853 |
+
res = builder.fcmp_ordered('>=', *args)
|
| 854 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def real_eq_impl(context, builder, sig, args):
|
| 858 |
+
res = builder.fcmp_ordered('==', *args)
|
| 859 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def real_ne_impl(context, builder, sig, args):
|
| 863 |
+
res = builder.fcmp_unordered('!=', *args)
|
| 864 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def real_abs_impl(context, builder, sig, args):
|
| 868 |
+
[ty] = sig.args
|
| 869 |
+
sig = typing.signature(ty, ty)
|
| 870 |
+
impl = context.get_function(math.fabs, sig)
|
| 871 |
+
return impl(builder, args)
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
def real_negate_impl(context, builder, sig, args):
|
| 875 |
+
from numba.cpython import mathimpl
|
| 876 |
+
res = mathimpl.negate_real(builder, args[0])
|
| 877 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
def real_positive_impl(context, builder, sig, args):
|
| 881 |
+
[typ] = sig.args
|
| 882 |
+
[val] = args
|
| 883 |
+
res = context.cast(builder, val, typ, sig.return_type)
|
| 884 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def real_sign_impl(context, builder, sig, args):
|
| 888 |
+
"""
|
| 889 |
+
np.sign(float)
|
| 890 |
+
"""
|
| 891 |
+
[x] = args
|
| 892 |
+
POS = Constant(x.type, 1)
|
| 893 |
+
NEG = Constant(x.type, -1)
|
| 894 |
+
ZERO = Constant(x.type, 0)
|
| 895 |
+
|
| 896 |
+
presult = cgutils.alloca_once(builder, x.type)
|
| 897 |
+
|
| 898 |
+
is_pos = builder.fcmp_ordered('>', x, ZERO)
|
| 899 |
+
is_neg = builder.fcmp_ordered('<', x, ZERO)
|
| 900 |
+
|
| 901 |
+
with builder.if_else(is_pos) as (gt_zero, not_gt_zero):
|
| 902 |
+
with gt_zero:
|
| 903 |
+
builder.store(POS, presult)
|
| 904 |
+
with not_gt_zero:
|
| 905 |
+
with builder.if_else(is_neg) as (lt_zero, not_lt_zero):
|
| 906 |
+
with lt_zero:
|
| 907 |
+
builder.store(NEG, presult)
|
| 908 |
+
with not_lt_zero:
|
| 909 |
+
# For both NaN and 0, the result of sign() is simply
|
| 910 |
+
# the input value.
|
| 911 |
+
builder.store(x, presult)
|
| 912 |
+
|
| 913 |
+
res = builder.load(presult)
|
| 914 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
# ty = types.Float
|
| 918 |
+
|
| 919 |
+
# lower_builtin(operator.add, ty, ty)(real_add_impl)
|
| 920 |
+
# lower_builtin(operator.iadd, ty, ty)(real_add_impl)
|
| 921 |
+
# lower_builtin(operator.sub, ty, ty)(real_sub_impl)
|
| 922 |
+
# lower_builtin(operator.isub, ty, ty)(real_sub_impl)
|
| 923 |
+
# lower_builtin(operator.mul, ty, ty)(real_mul_impl)
|
| 924 |
+
# lower_builtin(operator.imul, ty, ty)(real_mul_impl)
|
| 925 |
+
# lower_builtin(operator.floordiv, ty, ty)(real_floordiv_impl)
|
| 926 |
+
# lower_builtin(operator.ifloordiv, ty, ty)(real_floordiv_impl)
|
| 927 |
+
# lower_builtin(operator.truediv, ty, ty)(real_div_impl)
|
| 928 |
+
# lower_builtin(operator.itruediv, ty, ty)(real_div_impl)
|
| 929 |
+
# lower_builtin(operator.mod, ty, ty)(real_mod_impl)
|
| 930 |
+
# lower_builtin(operator.imod, ty, ty)(real_mod_impl)
|
| 931 |
+
# lower_builtin(operator.pow, ty, ty)(real_power_impl)
|
| 932 |
+
# lower_builtin(operator.ipow, ty, ty)(real_power_impl)
|
| 933 |
+
# lower_builtin(pow, ty, ty)(real_power_impl)
|
| 934 |
+
|
| 935 |
+
# lower_builtin(operator.eq, ty, ty)(real_eq_impl)
|
| 936 |
+
# lower_builtin(operator.ne, ty, ty)(real_ne_impl)
|
| 937 |
+
# lower_builtin(operator.lt, ty, ty)(real_lt_impl)
|
| 938 |
+
# lower_builtin(operator.le, ty, ty)(real_le_impl)
|
| 939 |
+
# lower_builtin(operator.gt, ty, ty)(real_gt_impl)
|
| 940 |
+
# lower_builtin(operator.ge, ty, ty)(real_ge_impl)
|
| 941 |
+
|
| 942 |
+
# lower_builtin(abs, ty)(real_abs_impl)
|
| 943 |
+
|
| 944 |
+
# lower_builtin(operator.neg, ty)(real_negate_impl)
|
| 945 |
+
# lower_builtin(operator.pos, ty)(real_positive_impl)
|
| 946 |
+
|
| 947 |
+
# del ty
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
# @lower_getattr(types.Complex, "real")
|
| 951 |
+
def complex_real_impl(context, builder, typ, value):
|
| 952 |
+
cplx = context.make_complex(builder, typ, value=value)
|
| 953 |
+
res = cplx.real
|
| 954 |
+
return impl_ret_untracked(context, builder, typ, res)
|
| 955 |
+
|
| 956 |
+
# @lower_getattr(types.Complex, "imag")
|
| 957 |
+
def complex_imag_impl(context, builder, typ, value):
|
| 958 |
+
cplx = context.make_complex(builder, typ, value=value)
|
| 959 |
+
res = cplx.imag
|
| 960 |
+
return impl_ret_untracked(context, builder, typ, res)
|
| 961 |
+
|
| 962 |
+
# @lower_builtin("complex.conjugate", types.Complex)
|
| 963 |
+
def complex_conjugate_impl(context, builder, sig, args):
|
| 964 |
+
from numba.cpython import mathimpl
|
| 965 |
+
z = context.make_complex(builder, sig.args[0], args[0])
|
| 966 |
+
z.imag = mathimpl.negate_real(builder, z.imag)
|
| 967 |
+
res = z._getvalue()
|
| 968 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 969 |
+
|
| 970 |
+
def real_real_impl(context, builder, typ, value):
|
| 971 |
+
return impl_ret_untracked(context, builder, typ, value)
|
| 972 |
+
|
| 973 |
+
def real_imag_impl(context, builder, typ, value):
|
| 974 |
+
res = cgutils.get_null_value(value.type)
|
| 975 |
+
return impl_ret_untracked(context, builder, typ, res)
|
| 976 |
+
|
| 977 |
+
def real_conjugate_impl(context, builder, sig, args):
|
| 978 |
+
return impl_ret_untracked(context, builder, sig.return_type, args[0])
|
| 979 |
+
|
| 980 |
+
# for cls in (types.Float, types.Integer):
|
| 981 |
+
# lower_getattr(cls, "real")(real_real_impl)
|
| 982 |
+
# lower_getattr(cls, "imag")(real_imag_impl)
|
| 983 |
+
# lower_builtin("complex.conjugate", cls)(real_conjugate_impl)
|
| 984 |
+
|
| 985 |
+
|
| 986 |
+
# @lower_builtin(operator.pow, types.Complex, types.Complex)
|
| 987 |
+
# @lower_builtin(operator.ipow, types.Complex, types.Complex)
|
| 988 |
+
# @lower_builtin(pow, types.Complex, types.Complex)
|
| 989 |
+
def complex_power_impl(context, builder, sig, args):
|
| 990 |
+
[ca, cb] = args
|
| 991 |
+
ty = sig.args[0]
|
| 992 |
+
fty = ty.underlying_float
|
| 993 |
+
a = context.make_helper(builder, ty, value=ca)
|
| 994 |
+
b = context.make_helper(builder, ty, value=cb)
|
| 995 |
+
c = context.make_helper(builder, ty)
|
| 996 |
+
module = builder.module
|
| 997 |
+
pa = a._getpointer()
|
| 998 |
+
pb = b._getpointer()
|
| 999 |
+
pc = c._getpointer()
|
| 1000 |
+
|
| 1001 |
+
# Optimize for square because cpow loses a lot of precision
|
| 1002 |
+
TWO = context.get_constant(fty, 2)
|
| 1003 |
+
ZERO = context.get_constant(fty, 0)
|
| 1004 |
+
|
| 1005 |
+
b_real_is_two = builder.fcmp_ordered('==', b.real, TWO)
|
| 1006 |
+
b_imag_is_zero = builder.fcmp_ordered('==', b.imag, ZERO)
|
| 1007 |
+
b_is_two = builder.and_(b_real_is_two, b_imag_is_zero)
|
| 1008 |
+
|
| 1009 |
+
with builder.if_else(b_is_two) as (then, otherwise):
|
| 1010 |
+
with then:
|
| 1011 |
+
# Lower as multiplication
|
| 1012 |
+
res = complex_mul_impl(context, builder, sig, (ca, ca))
|
| 1013 |
+
cres = context.make_helper(builder, ty, value=res)
|
| 1014 |
+
c.real = cres.real
|
| 1015 |
+
c.imag = cres.imag
|
| 1016 |
+
|
| 1017 |
+
with otherwise:
|
| 1018 |
+
# Lower with call to external function
|
| 1019 |
+
func_name = {
|
| 1020 |
+
types.complex64: "numba_cpowf",
|
| 1021 |
+
types.complex128: "numba_cpow",
|
| 1022 |
+
}[ty]
|
| 1023 |
+
fnty = ir.FunctionType(ir.VoidType(), [pa.type] * 3)
|
| 1024 |
+
cpow = cgutils.get_or_insert_function(module, fnty, func_name)
|
| 1025 |
+
builder.call(cpow, (pa, pb, pc))
|
| 1026 |
+
|
| 1027 |
+
res = builder.load(pc)
|
| 1028 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1029 |
+
|
| 1030 |
+
def complex_add_impl(context, builder, sig, args):
|
| 1031 |
+
[cx, cy] = args
|
| 1032 |
+
ty = sig.args[0]
|
| 1033 |
+
x = context.make_complex(builder, ty, value=cx)
|
| 1034 |
+
y = context.make_complex(builder, ty, value=cy)
|
| 1035 |
+
z = context.make_complex(builder, ty)
|
| 1036 |
+
a = x.real
|
| 1037 |
+
b = x.imag
|
| 1038 |
+
c = y.real
|
| 1039 |
+
d = y.imag
|
| 1040 |
+
z.real = builder.fadd(a, c)
|
| 1041 |
+
z.imag = builder.fadd(b, d)
|
| 1042 |
+
res = z._getvalue()
|
| 1043 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1044 |
+
|
| 1045 |
+
|
| 1046 |
+
def complex_sub_impl(context, builder, sig, args):
|
| 1047 |
+
[cx, cy] = args
|
| 1048 |
+
ty = sig.args[0]
|
| 1049 |
+
x = context.make_complex(builder, ty, value=cx)
|
| 1050 |
+
y = context.make_complex(builder, ty, value=cy)
|
| 1051 |
+
z = context.make_complex(builder, ty)
|
| 1052 |
+
a = x.real
|
| 1053 |
+
b = x.imag
|
| 1054 |
+
c = y.real
|
| 1055 |
+
d = y.imag
|
| 1056 |
+
z.real = builder.fsub(a, c)
|
| 1057 |
+
z.imag = builder.fsub(b, d)
|
| 1058 |
+
res = z._getvalue()
|
| 1059 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
def complex_mul_impl(context, builder, sig, args):
|
| 1063 |
+
"""
|
| 1064 |
+
(a+bi)(c+di)=(ac-bd)+i(ad+bc)
|
| 1065 |
+
"""
|
| 1066 |
+
[cx, cy] = args
|
| 1067 |
+
ty = sig.args[0]
|
| 1068 |
+
x = context.make_complex(builder, ty, value=cx)
|
| 1069 |
+
y = context.make_complex(builder, ty, value=cy)
|
| 1070 |
+
z = context.make_complex(builder, ty)
|
| 1071 |
+
a = x.real
|
| 1072 |
+
b = x.imag
|
| 1073 |
+
c = y.real
|
| 1074 |
+
d = y.imag
|
| 1075 |
+
ac = builder.fmul(a, c)
|
| 1076 |
+
bd = builder.fmul(b, d)
|
| 1077 |
+
ad = builder.fmul(a, d)
|
| 1078 |
+
bc = builder.fmul(b, c)
|
| 1079 |
+
z.real = builder.fsub(ac, bd)
|
| 1080 |
+
z.imag = builder.fadd(ad, bc)
|
| 1081 |
+
res = z._getvalue()
|
| 1082 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1083 |
+
|
| 1084 |
+
|
| 1085 |
+
NAN = float('nan')
|
| 1086 |
+
|
| 1087 |
+
def complex_div_impl(context, builder, sig, args):
|
| 1088 |
+
def complex_div(a, b):
|
| 1089 |
+
# This is CPython's algorithm (in _Py_c_quot()).
|
| 1090 |
+
areal = a.real
|
| 1091 |
+
aimag = a.imag
|
| 1092 |
+
breal = b.real
|
| 1093 |
+
bimag = b.imag
|
| 1094 |
+
if not breal and not bimag:
|
| 1095 |
+
raise ZeroDivisionError("complex division by zero")
|
| 1096 |
+
if abs(breal) >= abs(bimag):
|
| 1097 |
+
# Divide tops and bottom by b.real
|
| 1098 |
+
if not breal:
|
| 1099 |
+
return complex(NAN, NAN)
|
| 1100 |
+
ratio = bimag / breal
|
| 1101 |
+
denom = breal + bimag * ratio
|
| 1102 |
+
return complex(
|
| 1103 |
+
(areal + aimag * ratio) / denom,
|
| 1104 |
+
(aimag - areal * ratio) / denom)
|
| 1105 |
+
else:
|
| 1106 |
+
# Divide tops and bottom by b.imag
|
| 1107 |
+
if not bimag:
|
| 1108 |
+
return complex(NAN, NAN)
|
| 1109 |
+
ratio = breal / bimag
|
| 1110 |
+
denom = breal * ratio + bimag
|
| 1111 |
+
return complex(
|
| 1112 |
+
(a.real * ratio + a.imag) / denom,
|
| 1113 |
+
(a.imag * ratio - a.real) / denom)
|
| 1114 |
+
|
| 1115 |
+
res = context.compile_internal(builder, complex_div, sig, args)
|
| 1116 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
def complex_negate_impl(context, builder, sig, args):
|
| 1120 |
+
from numba.cpython import mathimpl
|
| 1121 |
+
[typ] = sig.args
|
| 1122 |
+
[val] = args
|
| 1123 |
+
cmplx = context.make_complex(builder, typ, value=val)
|
| 1124 |
+
res = context.make_complex(builder, typ)
|
| 1125 |
+
res.real = mathimpl.negate_real(builder, cmplx.real)
|
| 1126 |
+
res.imag = mathimpl.negate_real(builder, cmplx.imag)
|
| 1127 |
+
res = res._getvalue()
|
| 1128 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1129 |
+
|
| 1130 |
+
|
| 1131 |
+
def complex_positive_impl(context, builder, sig, args):
|
| 1132 |
+
[val] = args
|
| 1133 |
+
return impl_ret_untracked(context, builder, sig.return_type, val)
|
| 1134 |
+
|
| 1135 |
+
|
| 1136 |
+
def complex_eq_impl(context, builder, sig, args):
|
| 1137 |
+
[cx, cy] = args
|
| 1138 |
+
typ = sig.args[0]
|
| 1139 |
+
x = context.make_complex(builder, typ, value=cx)
|
| 1140 |
+
y = context.make_complex(builder, typ, value=cy)
|
| 1141 |
+
|
| 1142 |
+
reals_are_eq = builder.fcmp_ordered('==', x.real, y.real)
|
| 1143 |
+
imags_are_eq = builder.fcmp_ordered('==', x.imag, y.imag)
|
| 1144 |
+
res = builder.and_(reals_are_eq, imags_are_eq)
|
| 1145 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1146 |
+
|
| 1147 |
+
|
| 1148 |
+
def complex_ne_impl(context, builder, sig, args):
|
| 1149 |
+
[cx, cy] = args
|
| 1150 |
+
typ = sig.args[0]
|
| 1151 |
+
x = context.make_complex(builder, typ, value=cx)
|
| 1152 |
+
y = context.make_complex(builder, typ, value=cy)
|
| 1153 |
+
|
| 1154 |
+
reals_are_ne = builder.fcmp_unordered('!=', x.real, y.real)
|
| 1155 |
+
imags_are_ne = builder.fcmp_unordered('!=', x.imag, y.imag)
|
| 1156 |
+
res = builder.or_(reals_are_ne, imags_are_ne)
|
| 1157 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
def complex_abs_impl(context, builder, sig, args):
|
| 1161 |
+
"""
|
| 1162 |
+
abs(z) := hypot(z.real, z.imag)
|
| 1163 |
+
"""
|
| 1164 |
+
def complex_abs(z):
|
| 1165 |
+
return math.hypot(z.real, z.imag)
|
| 1166 |
+
|
| 1167 |
+
res = context.compile_internal(builder, complex_abs, sig, args)
|
| 1168 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
# ty = types.Complex
|
| 1172 |
+
|
| 1173 |
+
# lower_builtin(operator.add, ty, ty)(complex_add_impl)
|
| 1174 |
+
# lower_builtin(operator.iadd, ty, ty)(complex_add_impl)
|
| 1175 |
+
# lower_builtin(operator.sub, ty, ty)(complex_sub_impl)
|
| 1176 |
+
# lower_builtin(operator.isub, ty, ty)(complex_sub_impl)
|
| 1177 |
+
# lower_builtin(operator.mul, ty, ty)(complex_mul_impl)
|
| 1178 |
+
# lower_builtin(operator.imul, ty, ty)(complex_mul_impl)
|
| 1179 |
+
# lower_builtin(operator.truediv, ty, ty)(complex_div_impl)
|
| 1180 |
+
# lower_builtin(operator.itruediv, ty, ty)(complex_div_impl)
|
| 1181 |
+
# lower_builtin(operator.neg, ty)(complex_negate_impl)
|
| 1182 |
+
# lower_builtin(operator.pos, ty)(complex_positive_impl)
|
| 1183 |
+
# # Complex modulo is deprecated in python3
|
| 1184 |
+
|
| 1185 |
+
# lower_builtin(operator.eq, ty, ty)(complex_eq_impl)
|
| 1186 |
+
# lower_builtin(operator.ne, ty, ty)(complex_ne_impl)
|
| 1187 |
+
|
| 1188 |
+
# lower_builtin(abs, ty)(complex_abs_impl)
|
| 1189 |
+
|
| 1190 |
+
# del ty
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
# @lower_builtin("number.item", types.Boolean)
|
| 1194 |
+
# @lower_builtin("number.item", types.Number)
|
| 1195 |
+
def number_item_impl(context, builder, sig, args):
|
| 1196 |
+
"""
|
| 1197 |
+
The no-op .item() method on booleans and numbers.
|
| 1198 |
+
"""
|
| 1199 |
+
return args[0]
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
#------------------------------------------------------------------------------
|
| 1203 |
+
|
| 1204 |
+
|
| 1205 |
+
def number_not_impl(context, builder, sig, args):
|
| 1206 |
+
[typ] = sig.args
|
| 1207 |
+
[val] = args
|
| 1208 |
+
istrue = context.cast(builder, val, typ, sig.return_type)
|
| 1209 |
+
res = builder.not_(istrue)
|
| 1210 |
+
return impl_ret_untracked(context, builder, sig.return_type, res)
|
| 1211 |
+
|
| 1212 |
+
# @lower_builtin(bool, types.Boolean)
|
| 1213 |
+
def bool_as_bool(context, builder, sig, args):
|
| 1214 |
+
[val] = args
|
| 1215 |
+
return val
|
| 1216 |
+
|
| 1217 |
+
# @lower_builtin(bool, types.Integer)
|
| 1218 |
+
def int_as_bool(context, builder, sig, args):
|
| 1219 |
+
[val] = args
|
| 1220 |
+
return builder.icmp_unsigned('!=', val, Constant(val.type, 0))
|
| 1221 |
+
|
| 1222 |
+
# @lower_builtin(bool, types.Float)
|
| 1223 |
+
def float_as_bool(context, builder, sig, args):
|
| 1224 |
+
[val] = args
|
| 1225 |
+
return builder.fcmp_unordered('!=', val, Constant(val.type, 0.0))
|
| 1226 |
+
|
| 1227 |
+
# @lower_builtin(bool, types.Complex)
|
| 1228 |
+
def complex_as_bool(context, builder, sig, args):
|
| 1229 |
+
[typ] = sig.args
|
| 1230 |
+
[val] = args
|
| 1231 |
+
cmplx = context.make_complex(builder, typ, val)
|
| 1232 |
+
real, imag = cmplx.real, cmplx.imag
|
| 1233 |
+
zero = Constant(real.type, 0.0)
|
| 1234 |
+
real_istrue = builder.fcmp_unordered('!=', real, zero)
|
| 1235 |
+
imag_istrue = builder.fcmp_unordered('!=', imag, zero)
|
| 1236 |
+
return builder.or_(real_istrue, imag_istrue)
|
| 1237 |
+
|
| 1238 |
+
|
| 1239 |
+
# for ty in (types.Integer, types.Float, types.Complex):
|
| 1240 |
+
# lower_builtin(operator.not_, ty)(number_not_impl)
|
| 1241 |
+
|
| 1242 |
+
# lower_builtin(operator.not_, types.boolean)(number_not_impl)
|
| 1243 |
+
|
| 1244 |
+
|
| 1245 |
+
#------------------------------------------------------------------------------
|
| 1246 |
+
# Hashing numbers, see hashing.py
|
| 1247 |
+
|
| 1248 |
+
#-------------------------------------------------------------------------------
|
| 1249 |
+
# Implicit casts between numerics
|
| 1250 |
+
|
| 1251 |
+
# @lower_cast(types.IntegerLiteral, types.Integer)
|
| 1252 |
+
# @lower_cast(types.IntegerLiteral, types.Float)
|
| 1253 |
+
# @lower_cast(types.IntegerLiteral, types.Complex)
|
| 1254 |
+
def literal_int_to_number(context, builder, fromty, toty, val):
|
| 1255 |
+
lit = context.get_constant_generic(
|
| 1256 |
+
builder,
|
| 1257 |
+
fromty.literal_type,
|
| 1258 |
+
fromty.literal_value,
|
| 1259 |
+
)
|
| 1260 |
+
return context.cast(builder, lit, fromty.literal_type, toty)
|
| 1261 |
+
|
| 1262 |
+
|
| 1263 |
+
# @lower_cast(types.Integer, types.Integer)
|
| 1264 |
+
def integer_to_integer(context, builder, fromty, toty, val):
|
| 1265 |
+
if toty.bitwidth == fromty.bitwidth:
|
| 1266 |
+
# Just a change of signedness
|
| 1267 |
+
return val
|
| 1268 |
+
elif toty.bitwidth < fromty.bitwidth:
|
| 1269 |
+
# Downcast
|
| 1270 |
+
return builder.trunc(val, context.get_value_type(toty))
|
| 1271 |
+
elif fromty.signed:
|
| 1272 |
+
# Signed upcast
|
| 1273 |
+
return builder.sext(val, context.get_value_type(toty))
|
| 1274 |
+
else:
|
| 1275 |
+
# Unsigned upcast
|
| 1276 |
+
return builder.zext(val, context.get_value_type(toty))
|
| 1277 |
+
|
| 1278 |
+
# @lower_cast(types.Integer, types.voidptr)
|
| 1279 |
+
def integer_to_voidptr(context, builder, fromty, toty, val):
|
| 1280 |
+
return builder.inttoptr(val, context.get_value_type(toty))
|
| 1281 |
+
|
| 1282 |
+
# @lower_cast(types.Float, types.Float)
|
| 1283 |
+
def float_to_float(context, builder, fromty, toty, val):
|
| 1284 |
+
lty = context.get_value_type(toty)
|
| 1285 |
+
if fromty.bitwidth < toty.bitwidth:
|
| 1286 |
+
return builder.fpext(val, lty)
|
| 1287 |
+
else:
|
| 1288 |
+
return builder.fptrunc(val, lty)
|
| 1289 |
+
|
| 1290 |
+
# @lower_cast(types.Integer, types.Float)
|
| 1291 |
+
def integer_to_float(context, builder, fromty, toty, val):
|
| 1292 |
+
lty = context.get_value_type(toty)
|
| 1293 |
+
if fromty.signed:
|
| 1294 |
+
return builder.sitofp(val, lty)
|
| 1295 |
+
else:
|
| 1296 |
+
return builder.uitofp(val, lty)
|
| 1297 |
+
|
| 1298 |
+
# @lower_cast(types.Float, types.Integer)
|
| 1299 |
+
def float_to_integer(context, builder, fromty, toty, val):
|
| 1300 |
+
lty = context.get_value_type(toty)
|
| 1301 |
+
if toty.signed:
|
| 1302 |
+
return builder.fptosi(val, lty)
|
| 1303 |
+
else:
|
| 1304 |
+
return builder.fptoui(val, lty)
|
| 1305 |
+
|
| 1306 |
+
# @lower_cast(types.Float, types.Complex)
|
| 1307 |
+
# @lower_cast(types.Integer, types.Complex)
|
| 1308 |
+
def non_complex_to_complex(context, builder, fromty, toty, val):
|
| 1309 |
+
real = context.cast(builder, val, fromty, toty.underlying_float)
|
| 1310 |
+
imag = context.get_constant(toty.underlying_float, 0)
|
| 1311 |
+
|
| 1312 |
+
cmplx = context.make_complex(builder, toty)
|
| 1313 |
+
cmplx.real = real
|
| 1314 |
+
cmplx.imag = imag
|
| 1315 |
+
return cmplx._getvalue()
|
| 1316 |
+
|
| 1317 |
+
# @lower_cast(types.Complex, types.Complex)
|
| 1318 |
+
def complex_to_complex(context, builder, fromty, toty, val):
|
| 1319 |
+
srcty = fromty.underlying_float
|
| 1320 |
+
dstty = toty.underlying_float
|
| 1321 |
+
|
| 1322 |
+
src = context.make_complex(builder, fromty, value=val)
|
| 1323 |
+
dst = context.make_complex(builder, toty)
|
| 1324 |
+
dst.real = context.cast(builder, src.real, srcty, dstty)
|
| 1325 |
+
dst.imag = context.cast(builder, src.imag, srcty, dstty)
|
| 1326 |
+
return dst._getvalue()
|
| 1327 |
+
|
| 1328 |
+
# @lower_cast(types.Any, types.Boolean)
|
| 1329 |
+
def any_to_boolean(context, builder, fromty, toty, val):
|
| 1330 |
+
return context.is_true(builder, fromty, val)
|
| 1331 |
+
|
| 1332 |
+
# @lower_cast(types.Boolean, types.Number)
|
| 1333 |
+
def boolean_to_any(context, builder, fromty, toty, val):
|
| 1334 |
+
# Casting from boolean to anything first casts to int32
|
| 1335 |
+
asint = builder.zext(val, ir.IntType(32))
|
| 1336 |
+
return context.cast(builder, asint, types.int32, toty)
|
| 1337 |
+
|
| 1338 |
+
# @lower_cast(types.IntegerLiteral, types.Boolean)
|
| 1339 |
+
# @lower_cast(types.BooleanLiteral, types.Boolean)
|
| 1340 |
+
def literal_int_to_boolean(context, builder, fromty, toty, val):
|
| 1341 |
+
lit = context.get_constant_generic(
|
| 1342 |
+
builder,
|
| 1343 |
+
fromty.literal_type,
|
| 1344 |
+
fromty.literal_value,
|
| 1345 |
+
)
|
| 1346 |
+
return context.is_true(builder, fromty.literal_type, lit)
|
| 1347 |
+
|
| 1348 |
+
#-------------------------------------------------------------------------------
|
| 1349 |
+
# Constants
|
| 1350 |
+
|
| 1351 |
+
# @lower_constant(types.Complex)
|
| 1352 |
+
def constant_complex(context, builder, ty, pyval):
|
| 1353 |
+
fty = ty.underlying_float
|
| 1354 |
+
real = context.get_constant_generic(builder, fty, pyval.real)
|
| 1355 |
+
imag = context.get_constant_generic(builder, fty, pyval.imag)
|
| 1356 |
+
return Constant.literal_struct((real, imag))
|
| 1357 |
+
|
| 1358 |
+
# @lower_constant(types.Integer)
|
| 1359 |
+
# @lower_constant(types.Float)
|
| 1360 |
+
# @lower_constant(types.Boolean)
|
| 1361 |
+
def constant_integer(context, builder, ty, pyval):
|
| 1362 |
+
# See https://github.com/numba/numba/issues/6979
|
| 1363 |
+
# llvmlite ir.IntType specialises the formatting of the constant for a
|
| 1364 |
+
# cpython bool. A NumPy np.bool_ is not a cpython bool so force it to be one
|
| 1365 |
+
# so that the constant renders correctly!
|
| 1366 |
+
if isinstance(pyval, np.bool_):
|
| 1367 |
+
pyval = bool(pyval)
|
| 1368 |
+
lty = context.get_value_type(ty)
|
| 1369 |
+
return lty(pyval)
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
#-------------------------------------------------------------------------------
|
| 1373 |
+
# View
|
| 1374 |
+
|
| 1375 |
+
def scalar_view(scalar, viewty):
|
| 1376 |
+
""" Typing for the np scalar 'view' method. """
|
| 1377 |
+
if (isinstance(scalar, (types.Float, types.Integer))
|
| 1378 |
+
and isinstance(viewty, types.abstract.DTypeSpec)):
|
| 1379 |
+
if scalar.bitwidth != viewty.dtype.bitwidth:
|
| 1380 |
+
raise errors.TypingError(
|
| 1381 |
+
"Changing the dtype of a 0d array is only supported if the "
|
| 1382 |
+
"itemsize is unchanged")
|
| 1383 |
+
|
| 1384 |
+
def impl(scalar, viewty):
|
| 1385 |
+
return viewer(scalar, viewty)
|
| 1386 |
+
return impl
|
| 1387 |
+
|
| 1388 |
+
|
| 1389 |
+
# overload_method(types.Float, 'view')(scalar_view)
|
| 1390 |
+
# overload_method(types.Integer, 'view')(scalar_view)
|
deepseek/lib/python3.10/site-packages/numba/np/npyimpl.py
ADDED
|
@@ -0,0 +1,873 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of functions in the Numpy package.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import math
|
| 7 |
+
import sys
|
| 8 |
+
import itertools
|
| 9 |
+
from collections import namedtuple
|
| 10 |
+
|
| 11 |
+
import llvmlite.ir as ir
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import operator
|
| 15 |
+
|
| 16 |
+
from numba.np import arrayobj, ufunc_db, numpy_support
|
| 17 |
+
from numba.np.ufunc.sigparse import parse_signature
|
| 18 |
+
from numba.core.imputils import (Registry, impl_ret_new_ref, force_error_model,
|
| 19 |
+
impl_ret_borrowed)
|
| 20 |
+
from numba.core import typing, types, utils, cgutils, callconv
|
| 21 |
+
from numba.np.numpy_support import (
|
| 22 |
+
ufunc_find_matching_loop, select_array_wrapper, from_dtype, _ufunc_loop_sig
|
| 23 |
+
)
|
| 24 |
+
from numba.np.arrayobj import _getitem_array_generic
|
| 25 |
+
from numba.core.typing import npydecl
|
| 26 |
+
from numba.core.extending import overload, intrinsic
|
| 27 |
+
|
| 28 |
+
from numba.core import errors
|
| 29 |
+
|
| 30 |
+
registry = Registry('npyimpl')
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
########################################################################
|
| 34 |
+
|
| 35 |
+
# In the way we generate code, ufuncs work with scalar as well as
|
| 36 |
+
# with array arguments. The following helper classes help dealing
|
| 37 |
+
# with scalar and array arguments in a regular way.
|
| 38 |
+
#
|
| 39 |
+
# In short, the classes provide a uniform interface. The interface
|
| 40 |
+
# handles the indexing of as many dimensions as the array may have.
|
| 41 |
+
# For scalars, all indexing is ignored and when the value is read,
|
| 42 |
+
# the scalar is returned. For arrays code for actual indexing is
|
| 43 |
+
# generated and reading performs the appropriate indirection.
|
| 44 |
+
|
| 45 |
+
class _ScalarIndexingHelper(object):
|
| 46 |
+
def update_indices(self, loop_indices, name):
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
def as_values(self):
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class _ScalarHelper(object):
|
| 54 |
+
"""Helper class to handle scalar arguments (and result).
|
| 55 |
+
Note that store_data is only used when generating code for
|
| 56 |
+
a scalar ufunc and to write the output value.
|
| 57 |
+
|
| 58 |
+
For loading, the value is directly used without having any
|
| 59 |
+
kind of indexing nor memory backing it up. This is the use
|
| 60 |
+
for input arguments.
|
| 61 |
+
|
| 62 |
+
For storing, a variable is created in the stack where the
|
| 63 |
+
value will be written.
|
| 64 |
+
|
| 65 |
+
Note that it is not supported (as it is unneeded for our
|
| 66 |
+
current use-cases) reading back a stored value. This class
|
| 67 |
+
will always "load" the original value it got at its creation.
|
| 68 |
+
"""
|
| 69 |
+
def __init__(self, ctxt, bld, val, ty):
|
| 70 |
+
self.context = ctxt
|
| 71 |
+
self.builder = bld
|
| 72 |
+
self.val = val
|
| 73 |
+
self.base_type = ty
|
| 74 |
+
intpty = ctxt.get_value_type(types.intp)
|
| 75 |
+
self.shape = [ir.Constant(intpty, 1)]
|
| 76 |
+
|
| 77 |
+
lty = ctxt.get_data_type(ty) if ty != types.boolean else ir.IntType(1)
|
| 78 |
+
self._ptr = cgutils.alloca_once(bld, lty)
|
| 79 |
+
|
| 80 |
+
def create_iter_indices(self):
|
| 81 |
+
return _ScalarIndexingHelper()
|
| 82 |
+
|
| 83 |
+
def load_data(self, indices):
|
| 84 |
+
return self.val
|
| 85 |
+
|
| 86 |
+
def store_data(self, indices, val):
|
| 87 |
+
self.builder.store(val, self._ptr)
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def return_val(self):
|
| 91 |
+
return self.builder.load(self._ptr)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',
|
| 95 |
+
('array', 'indices'))):
|
| 96 |
+
def update_indices(self, loop_indices, name):
|
| 97 |
+
bld = self.array.builder
|
| 98 |
+
intpty = self.array.context.get_value_type(types.intp)
|
| 99 |
+
ONE = ir.Constant(ir.IntType(intpty.width), 1)
|
| 100 |
+
|
| 101 |
+
# we are only interested in as many inner dimensions as dimensions
|
| 102 |
+
# the indexed array has (the outer dimensions are broadcast, so
|
| 103 |
+
# ignoring the outer indices produces the desired result.
|
| 104 |
+
indices = loop_indices[len(loop_indices) - len(self.indices):]
|
| 105 |
+
for src, dst, dim in zip(indices, self.indices, self.array.shape):
|
| 106 |
+
cond = bld.icmp_unsigned('>', dim, ONE)
|
| 107 |
+
with bld.if_then(cond):
|
| 108 |
+
bld.store(src, dst)
|
| 109 |
+
|
| 110 |
+
def as_values(self):
|
| 111 |
+
"""
|
| 112 |
+
The indexing helper is built using alloca for each value, so it
|
| 113 |
+
actually contains pointers to the actual indices to load. Note
|
| 114 |
+
that update_indices assumes the same. This method returns the
|
| 115 |
+
indices as values
|
| 116 |
+
"""
|
| 117 |
+
bld = self.array.builder
|
| 118 |
+
return [bld.load(index) for index in self.indices]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',
|
| 122 |
+
'shape', 'strides', 'data',
|
| 123 |
+
'layout', 'base_type', 'ndim',
|
| 124 |
+
'return_val'))):
|
| 125 |
+
"""Helper class to handle array arguments/result.
|
| 126 |
+
It provides methods to generate code loading/storing specific
|
| 127 |
+
items as well as support code for handling indices.
|
| 128 |
+
"""
|
| 129 |
+
def create_iter_indices(self):
|
| 130 |
+
intpty = self.context.get_value_type(types.intp)
|
| 131 |
+
ZERO = ir.Constant(ir.IntType(intpty.width), 0)
|
| 132 |
+
|
| 133 |
+
indices = []
|
| 134 |
+
for i in range(self.ndim):
|
| 135 |
+
x = cgutils.alloca_once(self.builder, ir.IntType(intpty.width))
|
| 136 |
+
self.builder.store(ZERO, x)
|
| 137 |
+
indices.append(x)
|
| 138 |
+
return _ArrayIndexingHelper(self, indices)
|
| 139 |
+
|
| 140 |
+
def _load_effective_address(self, indices):
|
| 141 |
+
return cgutils.get_item_pointer2(self.context,
|
| 142 |
+
self.builder,
|
| 143 |
+
data=self.data,
|
| 144 |
+
shape=self.shape,
|
| 145 |
+
strides=self.strides,
|
| 146 |
+
layout=self.layout,
|
| 147 |
+
inds=indices)
|
| 148 |
+
|
| 149 |
+
def load_data(self, indices):
|
| 150 |
+
model = self.context.data_model_manager[self.base_type]
|
| 151 |
+
ptr = self._load_effective_address(indices)
|
| 152 |
+
return model.load_from_data_pointer(self.builder, ptr)
|
| 153 |
+
|
| 154 |
+
def store_data(self, indices, value):
|
| 155 |
+
ctx = self.context
|
| 156 |
+
bld = self.builder
|
| 157 |
+
store_value = ctx.get_value_as_data(bld, self.base_type, value)
|
| 158 |
+
assert ctx.get_data_type(self.base_type) == store_value.type
|
| 159 |
+
bld.store(store_value, self._load_effective_address(indices))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class _ArrayGUHelper(namedtuple('_ArrayHelper', ('context', 'builder',
|
| 163 |
+
'shape', 'strides', 'data',
|
| 164 |
+
'layout', 'base_type', 'ndim',
|
| 165 |
+
'inner_arr_ty', 'is_input_arg'))):
|
| 166 |
+
"""Helper class to handle array arguments/result.
|
| 167 |
+
It provides methods to generate code loading/storing specific
|
| 168 |
+
items as well as support code for handling indices.
|
| 169 |
+
|
| 170 |
+
Contrary to _ArrayHelper, this class can create a view to a subarray
|
| 171 |
+
"""
|
| 172 |
+
def create_iter_indices(self):
|
| 173 |
+
intpty = self.context.get_value_type(types.intp)
|
| 174 |
+
ZERO = ir.Constant(ir.IntType(intpty.width), 0)
|
| 175 |
+
|
| 176 |
+
indices = []
|
| 177 |
+
for i in range(self.ndim - self.inner_arr_ty.ndim):
|
| 178 |
+
x = cgutils.alloca_once(self.builder, ir.IntType(intpty.width))
|
| 179 |
+
self.builder.store(ZERO, x)
|
| 180 |
+
indices.append(x)
|
| 181 |
+
return _ArrayIndexingHelper(self, indices)
|
| 182 |
+
|
| 183 |
+
def _load_effective_address(self, indices):
|
| 184 |
+
context = self.context
|
| 185 |
+
builder = self.builder
|
| 186 |
+
arr_ty = types.Array(self.base_type, self.ndim, self.layout)
|
| 187 |
+
arr = context.make_array(arr_ty)(context, builder, self.data)
|
| 188 |
+
|
| 189 |
+
return cgutils.get_item_pointer2(context,
|
| 190 |
+
builder,
|
| 191 |
+
data=arr.data,
|
| 192 |
+
shape=self.shape,
|
| 193 |
+
strides=self.strides,
|
| 194 |
+
layout=self.layout,
|
| 195 |
+
inds=indices)
|
| 196 |
+
|
| 197 |
+
def load_data(self, indices):
|
| 198 |
+
context, builder = self.context, self.builder
|
| 199 |
+
|
| 200 |
+
if self.inner_arr_ty.ndim == 0 and self.is_input_arg:
|
| 201 |
+
# scalar case for input arguments
|
| 202 |
+
model = context.data_model_manager[self.base_type]
|
| 203 |
+
ptr = self._load_effective_address(indices)
|
| 204 |
+
return model.load_from_data_pointer(builder, ptr)
|
| 205 |
+
elif self.inner_arr_ty.ndim == 0 and not self.is_input_arg:
|
| 206 |
+
# Output arrays are handled as 1d with shape=(1,) when its
|
| 207 |
+
# signature represents a scalar. For instance: "(n),(m) -> ()"
|
| 208 |
+
intpty = context.get_value_type(types.intp)
|
| 209 |
+
one = intpty(1)
|
| 210 |
+
|
| 211 |
+
fromty = types.Array(self.base_type, self.ndim, self.layout)
|
| 212 |
+
toty = types.Array(self.base_type, 1, self.layout)
|
| 213 |
+
itemsize = intpty(arrayobj.get_itemsize(context, fromty))
|
| 214 |
+
|
| 215 |
+
# create a view from the original ndarray to a 1d array
|
| 216 |
+
arr_from = self.context.make_array(fromty)(context,
|
| 217 |
+
builder,
|
| 218 |
+
self.data)
|
| 219 |
+
arr_to = self.context.make_array(toty)(context, builder)
|
| 220 |
+
arrayobj.populate_array(
|
| 221 |
+
arr_to,
|
| 222 |
+
data=self._load_effective_address(indices),
|
| 223 |
+
shape=cgutils.pack_array(builder, [one]),
|
| 224 |
+
strides=cgutils.pack_array(builder, [itemsize]),
|
| 225 |
+
itemsize=arr_from.itemsize,
|
| 226 |
+
meminfo=arr_from.meminfo,
|
| 227 |
+
parent=arr_from.parent)
|
| 228 |
+
return arr_to._getvalue()
|
| 229 |
+
else:
|
| 230 |
+
# generic case
|
| 231 |
+
# getitem n-dim array -> m-dim array, where N > M
|
| 232 |
+
index_types = (types.int64,) * (self.ndim - self.inner_arr_ty.ndim)
|
| 233 |
+
arrty = types.Array(self.base_type, self.ndim, self.layout)
|
| 234 |
+
arr = self.context.make_array(arrty)(context, builder, self.data)
|
| 235 |
+
res = _getitem_array_generic(context, builder,
|
| 236 |
+
self.inner_arr_ty, arrty, arr,
|
| 237 |
+
index_types, indices)
|
| 238 |
+
return impl_ret_borrowed(context, builder, self.inner_arr_ty, res)
|
| 239 |
+
|
| 240 |
+
def guard_shape(self, loopshape):
|
| 241 |
+
inner_ndim = self.inner_arr_ty.ndim
|
| 242 |
+
def raise_impl(loop_shape, array_shape):
|
| 243 |
+
# This would in fact be a test for broadcasting.
|
| 244 |
+
# Broadcast would fail if, ignoring the core dimensions, the
|
| 245 |
+
# remaining ones are different than indices given by loop shape.
|
| 246 |
+
|
| 247 |
+
remaining = len(array_shape) - inner_ndim
|
| 248 |
+
_raise = (remaining > len(loop_shape))
|
| 249 |
+
if not _raise:
|
| 250 |
+
for i in range(remaining):
|
| 251 |
+
_raise |= (array_shape[i] != loop_shape[i])
|
| 252 |
+
if _raise:
|
| 253 |
+
# Ideally we should call `np.broadcast_shapes` with loop and
|
| 254 |
+
# array shapes. But since broadcasting is not supported here,
|
| 255 |
+
# we just raise an error
|
| 256 |
+
# TODO: check why raising a dynamic exception here fails
|
| 257 |
+
raise ValueError('Loop and array shapes are incompatible')
|
| 258 |
+
|
| 259 |
+
context, builder = self.context, self.builder
|
| 260 |
+
sig = types.none(
|
| 261 |
+
types.UniTuple(types.intp, len(loopshape)),
|
| 262 |
+
types.UniTuple(types.intp, len(self.shape)),
|
| 263 |
+
)
|
| 264 |
+
tup = (context.make_tuple(builder, sig.args[0], loopshape),
|
| 265 |
+
context.make_tuple(builder, sig.args[1], self.shape))
|
| 266 |
+
context.compile_internal(builder, raise_impl, sig, tup)
|
| 267 |
+
|
| 268 |
+
def guard_match_core_dims(self, other: '_ArrayGUHelper', ndims: int):
|
| 269 |
+
# arguments with the same signature should match their core dimensions
|
| 270 |
+
#
|
| 271 |
+
# @guvectorize('(n,m), (n,m) -> (n)')
|
| 272 |
+
# def foo(x, y, res):
|
| 273 |
+
# ...
|
| 274 |
+
#
|
| 275 |
+
# x and y should have the same core (2D) dimensions
|
| 276 |
+
def raise_impl(self_shape, other_shape):
|
| 277 |
+
same = True
|
| 278 |
+
a, b = len(self_shape) - ndims, len(other_shape) - ndims
|
| 279 |
+
for i in range(ndims):
|
| 280 |
+
same &= self_shape[a + i] == other_shape[b + i]
|
| 281 |
+
if not same:
|
| 282 |
+
# NumPy raises the following:
|
| 283 |
+
# ValueError: gufunc: Input operand 1 has a mismatch in its
|
| 284 |
+
# core dimension 0, with gufunc signature (n),(n) -> ()
|
| 285 |
+
# (size 3 is different from 2)
|
| 286 |
+
# But since we cannot raise a dynamic exception here, we just
|
| 287 |
+
# (try) something meaninful
|
| 288 |
+
msg = ('Operand has a mismatch in one of its core dimensions. '
|
| 289 |
+
'Please, check if all arguments to a @guvectorize '
|
| 290 |
+
'function have the same core dimensions.')
|
| 291 |
+
raise ValueError(msg)
|
| 292 |
+
|
| 293 |
+
context, builder = self.context, self.builder
|
| 294 |
+
sig = types.none(
|
| 295 |
+
types.UniTuple(types.intp, len(self.shape)),
|
| 296 |
+
types.UniTuple(types.intp, len(other.shape)),
|
| 297 |
+
)
|
| 298 |
+
tup = (context.make_tuple(builder, sig.args[0], self.shape),
|
| 299 |
+
context.make_tuple(builder, sig.args[1], other.shape),)
|
| 300 |
+
context.compile_internal(builder, raise_impl, sig, tup)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):
|
| 304 |
+
"""returns an instance of the appropriate Helper (either
|
| 305 |
+
_ScalarHelper or _ArrayHelper) class to handle the argument.
|
| 306 |
+
using the polymorphic interface of the Helper classes, scalar
|
| 307 |
+
and array cases can be handled with the same code"""
|
| 308 |
+
|
| 309 |
+
# first un-Optional Optionals
|
| 310 |
+
if isinstance(tyinp, types.Optional):
|
| 311 |
+
oty = tyinp
|
| 312 |
+
tyinp = tyinp.type
|
| 313 |
+
inp = ctxt.cast(bld, inp, oty, tyinp)
|
| 314 |
+
|
| 315 |
+
# then prepare the arg for a concrete instance
|
| 316 |
+
if isinstance(tyinp, types.ArrayCompatible):
|
| 317 |
+
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
|
| 318 |
+
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
|
| 319 |
+
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
|
| 320 |
+
return _ArrayHelper(ctxt, bld, shape, strides, ary.data,
|
| 321 |
+
tyinp.layout, tyinp.dtype, tyinp.ndim, inp)
|
| 322 |
+
elif (types.unliteral(tyinp) in types.number_domain | {types.boolean}
|
| 323 |
+
or isinstance(tyinp, types.scalars._NPDatetimeBase)):
|
| 324 |
+
return _ScalarHelper(ctxt, bld, inp, tyinp)
|
| 325 |
+
else:
|
| 326 |
+
raise NotImplementedError('unsupported type for {0}: {1}'.format(where,
|
| 327 |
+
str(tyinp)))
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),
|
| 331 |
+
types.intp, types.CPointer(types.intp))
|
| 332 |
+
def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):
|
| 333 |
+
'''Low-level utility function used in calculating a shape for
|
| 334 |
+
an implicit output array. This function assumes that the
|
| 335 |
+
destination shape is an LLVM pointer to a C-style array that was
|
| 336 |
+
already initialized to a size of one along all axes.
|
| 337 |
+
|
| 338 |
+
Returns an integer value:
|
| 339 |
+
>= 1 : Succeeded. Return value should equal the number of dimensions in
|
| 340 |
+
the destination shape.
|
| 341 |
+
0 : Failed to broadcast because source shape is larger than the
|
| 342 |
+
destination shape (this case should be weeded out at type
|
| 343 |
+
checking).
|
| 344 |
+
< 0 : Failed to broadcast onto destination axis, at axis number ==
|
| 345 |
+
-(return_value + 1).
|
| 346 |
+
'''
|
| 347 |
+
if src_ndim > dest_ndim:
|
| 348 |
+
# This check should have been done during type checking, but
|
| 349 |
+
# let's be defensive anyway...
|
| 350 |
+
return 0
|
| 351 |
+
else:
|
| 352 |
+
src_index = 0
|
| 353 |
+
dest_index = dest_ndim - src_ndim
|
| 354 |
+
while src_index < src_ndim:
|
| 355 |
+
src_dim_size = src_shape[src_index]
|
| 356 |
+
dest_dim_size = dest_shape[dest_index]
|
| 357 |
+
# Check to see if we've already mutated the destination
|
| 358 |
+
# shape along this axis.
|
| 359 |
+
if dest_dim_size != 1:
|
| 360 |
+
# If we have mutated the destination shape already,
|
| 361 |
+
# then the source axis size must either be one,
|
| 362 |
+
# or the destination axis size.
|
| 363 |
+
if src_dim_size != dest_dim_size and src_dim_size != 1:
|
| 364 |
+
return -(dest_index + 1)
|
| 365 |
+
elif src_dim_size != 1:
|
| 366 |
+
# If the destination size is still its initial
|
| 367 |
+
dest_shape[dest_index] = src_dim_size
|
| 368 |
+
src_index += 1
|
| 369 |
+
dest_index += 1
|
| 370 |
+
return dest_index
|
| 371 |
+
|
| 372 |
+
def _build_array(context, builder, array_ty, input_types, inputs):
|
| 373 |
+
"""Utility function to handle allocation of an implicit output array
|
| 374 |
+
given the target context, builder, output array type, and a list of
|
| 375 |
+
_ArrayHelper instances.
|
| 376 |
+
"""
|
| 377 |
+
# First, strip optional types, ufunc loops are typed on concrete types
|
| 378 |
+
input_types = [x.type if isinstance(x, types.Optional) else x
|
| 379 |
+
for x in input_types]
|
| 380 |
+
|
| 381 |
+
intp_ty = context.get_value_type(types.intp)
|
| 382 |
+
def make_intp_const(val):
|
| 383 |
+
return context.get_constant(types.intp, val)
|
| 384 |
+
|
| 385 |
+
ZERO = make_intp_const(0)
|
| 386 |
+
ONE = make_intp_const(1)
|
| 387 |
+
|
| 388 |
+
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
|
| 389 |
+
"src_shape")
|
| 390 |
+
dest_ndim = make_intp_const(array_ty.ndim)
|
| 391 |
+
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
|
| 392 |
+
"dest_shape")
|
| 393 |
+
dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)
|
| 394 |
+
for index in range(array_ty.ndim))
|
| 395 |
+
|
| 396 |
+
# Initialize the destination shape with all ones.
|
| 397 |
+
for dest_shape_addr in dest_shape_addrs:
|
| 398 |
+
builder.store(ONE, dest_shape_addr)
|
| 399 |
+
|
| 400 |
+
# For each argument, try to broadcast onto the destination shape,
|
| 401 |
+
# mutating along any axis where the argument shape is not one and
|
| 402 |
+
# the destination shape is one.
|
| 403 |
+
for arg_number, arg in enumerate(inputs):
|
| 404 |
+
if not hasattr(arg, "ndim"): # Skip scalar arguments
|
| 405 |
+
continue
|
| 406 |
+
arg_ndim = make_intp_const(arg.ndim)
|
| 407 |
+
for index in range(arg.ndim):
|
| 408 |
+
builder.store(arg.shape[index],
|
| 409 |
+
cgutils.gep_inbounds(builder, src_shape, index))
|
| 410 |
+
arg_result = context.compile_internal(
|
| 411 |
+
builder, _broadcast_onto, _broadcast_onto_sig,
|
| 412 |
+
[arg_ndim, src_shape, dest_ndim, dest_shape])
|
| 413 |
+
with cgutils.if_unlikely(builder,
|
| 414 |
+
builder.icmp_signed('<', arg_result, ONE)):
|
| 415 |
+
msg = "unable to broadcast argument %d to output array" % (
|
| 416 |
+
arg_number,)
|
| 417 |
+
|
| 418 |
+
loc = errors.loc_info.get('loc', None)
|
| 419 |
+
if loc is not None:
|
| 420 |
+
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
|
| 421 |
+
|
| 422 |
+
context.call_conv.return_user_exc(builder, ValueError, (msg,))
|
| 423 |
+
|
| 424 |
+
real_array_ty = array_ty.as_array
|
| 425 |
+
|
| 426 |
+
dest_shape_tup = tuple(builder.load(dest_shape_addr)
|
| 427 |
+
for dest_shape_addr in dest_shape_addrs)
|
| 428 |
+
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,
|
| 429 |
+
dest_shape_tup)
|
| 430 |
+
|
| 431 |
+
# Get the best argument to call __array_wrap__ on
|
| 432 |
+
array_wrapper_index = select_array_wrapper(input_types)
|
| 433 |
+
array_wrapper_ty = input_types[array_wrapper_index]
|
| 434 |
+
try:
|
| 435 |
+
# __array_wrap__(source wrapped array, out array) -> out wrapped array
|
| 436 |
+
array_wrap = context.get_function('__array_wrap__',
|
| 437 |
+
array_ty(array_wrapper_ty, real_array_ty))
|
| 438 |
+
except NotImplementedError:
|
| 439 |
+
# If it's the same priority as a regular array, assume we
|
| 440 |
+
# should use the allocated array unchanged.
|
| 441 |
+
if array_wrapper_ty.array_priority != types.Array.array_priority:
|
| 442 |
+
raise
|
| 443 |
+
out_val = array_val._getvalue()
|
| 444 |
+
else:
|
| 445 |
+
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
|
| 446 |
+
out_val = array_wrap(builder, wrap_args)
|
| 447 |
+
|
| 448 |
+
ndim = array_ty.ndim
|
| 449 |
+
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
|
| 450 |
+
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
|
| 451 |
+
return _ArrayHelper(context, builder, shape, strides, array_val.data,
|
| 452 |
+
array_ty.layout, array_ty.dtype, ndim,
|
| 453 |
+
out_val)
|
| 454 |
+
|
| 455 |
+
# ufuncs either return a single result when nout == 1, else a tuple of results
|
| 456 |
+
|
| 457 |
+
def _unpack_output_types(ufunc, sig):
|
| 458 |
+
if ufunc.nout == 1:
|
| 459 |
+
return [sig.return_type]
|
| 460 |
+
else:
|
| 461 |
+
return list(sig.return_type)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def _unpack_output_values(ufunc, builder, values):
|
| 465 |
+
if ufunc.nout == 1:
|
| 466 |
+
return [values]
|
| 467 |
+
else:
|
| 468 |
+
return cgutils.unpack_tuple(builder, values)
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def _pack_output_values(ufunc, context, builder, typ, values):
|
| 472 |
+
if ufunc.nout == 1:
|
| 473 |
+
return values[0]
|
| 474 |
+
else:
|
| 475 |
+
return context.make_tuple(builder, typ, values)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel_class):
|
| 479 |
+
# This is the code generator that builds all the looping needed
|
| 480 |
+
# to execute a numpy functions over several dimensions (including
|
| 481 |
+
# scalar cases).
|
| 482 |
+
#
|
| 483 |
+
# context - the code generation context
|
| 484 |
+
# builder - the code emitter
|
| 485 |
+
# sig - signature of the ufunc
|
| 486 |
+
# args - the args to the ufunc
|
| 487 |
+
# ufunc - the ufunc itself
|
| 488 |
+
# kernel_class - a code generating subclass of _Kernel that provides
|
| 489 |
+
|
| 490 |
+
arguments = [_prepare_argument(context, builder, arg, tyarg)
|
| 491 |
+
for arg, tyarg in zip(args, sig.args)]
|
| 492 |
+
|
| 493 |
+
if len(arguments) < ufunc.nin:
|
| 494 |
+
raise RuntimeError(
|
| 495 |
+
"Not enough inputs to {}, expected {} got {}"
|
| 496 |
+
.format(ufunc.__name__, ufunc.nin, len(arguments)))
|
| 497 |
+
|
| 498 |
+
for out_i, ret_ty in enumerate(_unpack_output_types(ufunc, sig)):
|
| 499 |
+
if ufunc.nin + out_i >= len(arguments):
|
| 500 |
+
# this out argument is not provided
|
| 501 |
+
if isinstance(ret_ty, types.ArrayCompatible):
|
| 502 |
+
output = _build_array(context, builder, ret_ty, sig.args, arguments)
|
| 503 |
+
else:
|
| 504 |
+
output = _prepare_argument(
|
| 505 |
+
context, builder,
|
| 506 |
+
ir.Constant(context.get_value_type(ret_ty), None), ret_ty)
|
| 507 |
+
arguments.append(output)
|
| 508 |
+
elif context.enable_nrt:
|
| 509 |
+
# Incref the output
|
| 510 |
+
context.nrt.incref(builder, ret_ty, args[ufunc.nin + out_i])
|
| 511 |
+
|
| 512 |
+
inputs = arguments[:ufunc.nin]
|
| 513 |
+
outputs = arguments[ufunc.nin:]
|
| 514 |
+
assert len(outputs) == ufunc.nout
|
| 515 |
+
|
| 516 |
+
outer_sig = _ufunc_loop_sig(
|
| 517 |
+
[a.base_type for a in outputs],
|
| 518 |
+
[a.base_type for a in inputs]
|
| 519 |
+
)
|
| 520 |
+
kernel = kernel_class(context, builder, outer_sig)
|
| 521 |
+
intpty = context.get_value_type(types.intp)
|
| 522 |
+
|
| 523 |
+
indices = [inp.create_iter_indices() for inp in inputs]
|
| 524 |
+
|
| 525 |
+
# assume outputs are all the same size, which numpy requires
|
| 526 |
+
|
| 527 |
+
loopshape = outputs[0].shape
|
| 528 |
+
|
| 529 |
+
# count the number of C and F layout arrays, respectively
|
| 530 |
+
input_layouts = [inp.layout for inp in inputs
|
| 531 |
+
if isinstance(inp, _ArrayHelper)]
|
| 532 |
+
num_c_layout = len([x for x in input_layouts if x == 'C'])
|
| 533 |
+
num_f_layout = len([x for x in input_layouts if x == 'F'])
|
| 534 |
+
|
| 535 |
+
# Only choose F iteration order if more arrays are in F layout.
|
| 536 |
+
# Default to C order otherwise.
|
| 537 |
+
# This is a best effort for performance. NumPy has more fancy logic that
|
| 538 |
+
# uses array iterators in non-trivial cases.
|
| 539 |
+
if num_f_layout > num_c_layout:
|
| 540 |
+
order = 'F'
|
| 541 |
+
else:
|
| 542 |
+
order = 'C'
|
| 543 |
+
|
| 544 |
+
with cgutils.loop_nest(builder, loopshape, intp=intpty, order=order) as loop_indices:
|
| 545 |
+
vals_in = []
|
| 546 |
+
for i, (index, arg) in enumerate(zip(indices, inputs)):
|
| 547 |
+
index.update_indices(loop_indices, i)
|
| 548 |
+
vals_in.append(arg.load_data(index.as_values()))
|
| 549 |
+
|
| 550 |
+
vals_out = _unpack_output_values(ufunc, builder, kernel.generate(*vals_in))
|
| 551 |
+
for val_out, output in zip(vals_out, outputs):
|
| 552 |
+
output.store_data(loop_indices, val_out)
|
| 553 |
+
|
| 554 |
+
out = _pack_output_values(ufunc, context, builder, sig.return_type, [o.return_val for o in outputs])
|
| 555 |
+
return impl_ret_new_ref(context, builder, sig.return_type, out)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
def numpy_gufunc_kernel(context, builder, sig, args, ufunc, kernel_class):
|
| 559 |
+
arguments = []
|
| 560 |
+
expected_ndims = kernel_class.dufunc.expected_ndims()
|
| 561 |
+
expected_ndims = expected_ndims[0] + expected_ndims[1]
|
| 562 |
+
is_input = [True] * ufunc.nin + [False] * ufunc.nout
|
| 563 |
+
for arg, ty, exp_ndim, is_inp in zip(args, sig.args, expected_ndims, is_input): # noqa: E501
|
| 564 |
+
if isinstance(ty, types.ArrayCompatible):
|
| 565 |
+
# Create an array helper that iteration returns a subarray
|
| 566 |
+
# with ndim specified by "exp_ndim"
|
| 567 |
+
arr = context.make_array(ty)(context, builder, arg)
|
| 568 |
+
shape = cgutils.unpack_tuple(builder, arr.shape, ty.ndim)
|
| 569 |
+
strides = cgutils.unpack_tuple(builder, arr.strides, ty.ndim)
|
| 570 |
+
inner_arr_ty = ty.copy(ndim=exp_ndim)
|
| 571 |
+
ndim = ty.ndim
|
| 572 |
+
layout = ty.layout
|
| 573 |
+
base_type = ty.dtype
|
| 574 |
+
array_helper = _ArrayGUHelper(context, builder,
|
| 575 |
+
shape, strides, arg,
|
| 576 |
+
layout, base_type, ndim,
|
| 577 |
+
inner_arr_ty, is_inp)
|
| 578 |
+
arguments.append(array_helper)
|
| 579 |
+
else:
|
| 580 |
+
scalar_helper = _ScalarHelper(context, builder, arg, ty)
|
| 581 |
+
arguments.append(scalar_helper)
|
| 582 |
+
kernel = kernel_class(context, builder, sig)
|
| 583 |
+
|
| 584 |
+
layouts = [arg.layout for arg in arguments
|
| 585 |
+
if isinstance(arg, _ArrayGUHelper)]
|
| 586 |
+
num_c_layout = len([x for x in layouts if x == 'C'])
|
| 587 |
+
num_f_layout = len([x for x in layouts if x == 'F'])
|
| 588 |
+
|
| 589 |
+
# Only choose F iteration order if more arrays are in F layout.
|
| 590 |
+
# Default to C order otherwise.
|
| 591 |
+
# This is a best effort for performance. NumPy has more fancy logic that
|
| 592 |
+
# uses array iterators in non-trivial cases.
|
| 593 |
+
if num_f_layout > num_c_layout:
|
| 594 |
+
order = 'F'
|
| 595 |
+
else:
|
| 596 |
+
order = 'C'
|
| 597 |
+
|
| 598 |
+
outputs = arguments[ufunc.nin:]
|
| 599 |
+
intpty = context.get_value_type(types.intp)
|
| 600 |
+
indices = [inp.create_iter_indices() for inp in arguments]
|
| 601 |
+
loopshape_ndim = outputs[0].ndim - outputs[0].inner_arr_ty.ndim
|
| 602 |
+
loopshape = outputs[0].shape[ : loopshape_ndim]
|
| 603 |
+
|
| 604 |
+
_sig = parse_signature(ufunc.gufunc_builder.signature)
|
| 605 |
+
for (idx_a, sig_a), (idx_b, sig_b) in itertools.combinations(
|
| 606 |
+
zip(range(len(arguments)),
|
| 607 |
+
_sig[0] + _sig[1]),
|
| 608 |
+
r = 2
|
| 609 |
+
):
|
| 610 |
+
# For each pair of arguments, both inputs and outputs, must match their
|
| 611 |
+
# inner dimensions if their signatures are the same.
|
| 612 |
+
arg_a, arg_b = arguments[idx_a], arguments[idx_b]
|
| 613 |
+
if sig_a == sig_b and \
|
| 614 |
+
all(isinstance(x, _ArrayGUHelper) for x in (arg_a, arg_b)):
|
| 615 |
+
arg_a, arg_b = arguments[idx_a], arguments[idx_b]
|
| 616 |
+
arg_a.guard_match_core_dims(arg_b, len(sig_a))
|
| 617 |
+
|
| 618 |
+
for arg in arguments[:ufunc.nin]:
|
| 619 |
+
if isinstance(arg, _ArrayGUHelper):
|
| 620 |
+
arg.guard_shape(loopshape)
|
| 621 |
+
|
| 622 |
+
with cgutils.loop_nest(builder,
|
| 623 |
+
loopshape,
|
| 624 |
+
intp=intpty,
|
| 625 |
+
order=order) as loop_indices:
|
| 626 |
+
vals_in = []
|
| 627 |
+
for i, (index, arg) in enumerate(zip(indices, arguments)):
|
| 628 |
+
index.update_indices(loop_indices, i)
|
| 629 |
+
vals_in.append(arg.load_data(index.as_values()))
|
| 630 |
+
|
| 631 |
+
kernel.generate(*vals_in)
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
# Kernels are the code to be executed inside the multidimensional loop.
|
| 635 |
+
class _Kernel(object):
|
| 636 |
+
def __init__(self, context, builder, outer_sig):
|
| 637 |
+
self.context = context
|
| 638 |
+
self.builder = builder
|
| 639 |
+
self.outer_sig = outer_sig
|
| 640 |
+
|
| 641 |
+
def cast(self, val, fromty, toty):
|
| 642 |
+
"""Numpy uses cast semantics that are different from standard Python
|
| 643 |
+
(for example, it does allow casting from complex to float).
|
| 644 |
+
|
| 645 |
+
This method acts as a patch to context.cast so that it allows
|
| 646 |
+
complex to real/int casts.
|
| 647 |
+
|
| 648 |
+
"""
|
| 649 |
+
if (isinstance(fromty, types.Complex) and
|
| 650 |
+
not isinstance(toty, types.Complex)):
|
| 651 |
+
# attempt conversion of the real part to the specified type.
|
| 652 |
+
# note that NumPy issues a warning in this kind of conversions
|
| 653 |
+
newty = fromty.underlying_float
|
| 654 |
+
attr = self.context.get_getattr(fromty, 'real')
|
| 655 |
+
val = attr(self.context, self.builder, fromty, val, 'real')
|
| 656 |
+
fromty = newty
|
| 657 |
+
# let the regular cast do the rest...
|
| 658 |
+
|
| 659 |
+
return self.context.cast(self.builder, val, fromty, toty)
|
| 660 |
+
|
| 661 |
+
def generate(self, *args):
|
| 662 |
+
isig = self.inner_sig
|
| 663 |
+
osig = self.outer_sig
|
| 664 |
+
cast_args = [self.cast(val, inty, outty)
|
| 665 |
+
for val, inty, outty in
|
| 666 |
+
zip(args, osig.args, isig.args)]
|
| 667 |
+
if self.cres.objectmode:
|
| 668 |
+
func_type = self.context.call_conv.get_function_type(
|
| 669 |
+
types.pyobject, [types.pyobject] * len(isig.args))
|
| 670 |
+
else:
|
| 671 |
+
func_type = self.context.call_conv.get_function_type(
|
| 672 |
+
isig.return_type, isig.args)
|
| 673 |
+
module = self.builder.block.function.module
|
| 674 |
+
entry_point = cgutils.get_or_insert_function(
|
| 675 |
+
module, func_type,
|
| 676 |
+
self.cres.fndesc.llvm_func_name)
|
| 677 |
+
entry_point.attributes.add("alwaysinline")
|
| 678 |
+
|
| 679 |
+
_, res = self.context.call_conv.call_function(
|
| 680 |
+
self.builder, entry_point, isig.return_type, isig.args,
|
| 681 |
+
cast_args)
|
| 682 |
+
return self.cast(res, isig.return_type, osig.return_type)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
def _ufunc_db_function(ufunc):
|
| 686 |
+
"""Use the ufunc loop type information to select the code generation
|
| 687 |
+
function from the table provided by the dict_of_kernels. The dict
|
| 688 |
+
of kernels maps the loop identifier to a function with the
|
| 689 |
+
following signature: (context, builder, signature, args).
|
| 690 |
+
|
| 691 |
+
The loop type information has the form 'AB->C'. The letters to the
|
| 692 |
+
left of '->' are the input types (specified as NumPy letter
|
| 693 |
+
types). The letters to the right of '->' are the output
|
| 694 |
+
types. There must be 'ufunc.nin' letters to the left of '->', and
|
| 695 |
+
'ufunc.nout' letters to the right.
|
| 696 |
+
|
| 697 |
+
For example, a binary float loop resulting in a float, will have
|
| 698 |
+
the following signature: 'ff->f'.
|
| 699 |
+
|
| 700 |
+
A given ufunc implements many loops. The list of loops implemented
|
| 701 |
+
for a given ufunc can be accessed using the 'types' attribute in
|
| 702 |
+
the ufunc object. The NumPy machinery selects the first loop that
|
| 703 |
+
fits a given calling signature (in our case, what we call the
|
| 704 |
+
outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'.
|
| 705 |
+
"""
|
| 706 |
+
|
| 707 |
+
class _KernelImpl(_Kernel):
|
| 708 |
+
def __init__(self, context, builder, outer_sig):
|
| 709 |
+
super(_KernelImpl, self).__init__(context, builder, outer_sig)
|
| 710 |
+
loop = ufunc_find_matching_loop(
|
| 711 |
+
ufunc, outer_sig.args + tuple(_unpack_output_types(ufunc, outer_sig)))
|
| 712 |
+
self.fn = context.get_ufunc_info(ufunc).get(loop.ufunc_sig)
|
| 713 |
+
self.inner_sig = _ufunc_loop_sig(loop.outputs, loop.inputs)
|
| 714 |
+
|
| 715 |
+
if self.fn is None:
|
| 716 |
+
msg = "Don't know how to lower ufunc '{0}' for loop '{1}'"
|
| 717 |
+
raise NotImplementedError(msg.format(ufunc.__name__, loop))
|
| 718 |
+
|
| 719 |
+
def generate(self, *args):
|
| 720 |
+
isig = self.inner_sig
|
| 721 |
+
osig = self.outer_sig
|
| 722 |
+
|
| 723 |
+
cast_args = [self.cast(val, inty, outty)
|
| 724 |
+
for val, inty, outty in zip(args, osig.args,
|
| 725 |
+
isig.args)]
|
| 726 |
+
with force_error_model(self.context, 'numpy'):
|
| 727 |
+
res = self.fn(self.context, self.builder, isig, cast_args)
|
| 728 |
+
dmm = self.context.data_model_manager
|
| 729 |
+
res = dmm[isig.return_type].from_return(self.builder, res)
|
| 730 |
+
return self.cast(res, isig.return_type, osig.return_type)
|
| 731 |
+
|
| 732 |
+
return _KernelImpl
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
################################################################################
|
| 736 |
+
# Helper functions that register the ufuncs
|
| 737 |
+
|
| 738 |
+
def register_ufunc_kernel(ufunc, kernel, lower):
|
| 739 |
+
def do_ufunc(context, builder, sig, args):
|
| 740 |
+
return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel)
|
| 741 |
+
|
| 742 |
+
_any = types.Any
|
| 743 |
+
in_args = (_any,) * ufunc.nin
|
| 744 |
+
|
| 745 |
+
# Add a lowering for each out argument that is missing.
|
| 746 |
+
for n_explicit_out in range(ufunc.nout + 1):
|
| 747 |
+
out_args = (types.Array,) * n_explicit_out
|
| 748 |
+
lower(ufunc, *in_args, *out_args)(do_ufunc)
|
| 749 |
+
|
| 750 |
+
return kernel
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
def register_unary_operator_kernel(operator, ufunc, kernel, lower,
|
| 754 |
+
inplace=False):
|
| 755 |
+
assert not inplace # are there any inplace unary operators?
|
| 756 |
+
def lower_unary_operator(context, builder, sig, args):
|
| 757 |
+
return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel)
|
| 758 |
+
_arr_kind = types.Array
|
| 759 |
+
lower(operator, _arr_kind)(lower_unary_operator)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def register_binary_operator_kernel(op, ufunc, kernel, lower, inplace=False):
|
| 763 |
+
def lower_binary_operator(context, builder, sig, args):
|
| 764 |
+
return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel)
|
| 765 |
+
|
| 766 |
+
def lower_inplace_operator(context, builder, sig, args):
|
| 767 |
+
# The visible signature is (A, B) -> A
|
| 768 |
+
# The implementation's signature (with explicit output)
|
| 769 |
+
# is (A, B, A) -> A
|
| 770 |
+
args = tuple(args) + (args[0],)
|
| 771 |
+
sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],))
|
| 772 |
+
return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel)
|
| 773 |
+
|
| 774 |
+
_any = types.Any
|
| 775 |
+
_arr_kind = types.Array
|
| 776 |
+
formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)]
|
| 777 |
+
for sig in formal_sigs:
|
| 778 |
+
if not inplace:
|
| 779 |
+
lower(op, *sig)(lower_binary_operator)
|
| 780 |
+
else:
|
| 781 |
+
lower(op, *sig)(lower_inplace_operator)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
################################################################################
|
| 785 |
+
# Use the contents of ufunc_db to initialize the supported ufuncs
|
| 786 |
+
|
| 787 |
+
@registry.lower(operator.pos, types.Array)
|
| 788 |
+
def array_positive_impl(context, builder, sig, args):
|
| 789 |
+
'''Lowering function for +(array) expressions. Defined here
|
| 790 |
+
(numba.targets.npyimpl) since the remaining array-operator
|
| 791 |
+
lowering functions are also registered in this module.
|
| 792 |
+
'''
|
| 793 |
+
class _UnaryPositiveKernel(_Kernel):
|
| 794 |
+
def generate(self, *args):
|
| 795 |
+
[val] = args
|
| 796 |
+
return val
|
| 797 |
+
|
| 798 |
+
return numpy_ufunc_kernel(context, builder, sig, args, np.positive,
|
| 799 |
+
_UnaryPositiveKernel)
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
def register_ufuncs(ufuncs, lower):
|
| 803 |
+
kernels = {}
|
| 804 |
+
for ufunc in ufuncs:
|
| 805 |
+
db_func = _ufunc_db_function(ufunc)
|
| 806 |
+
kernels[ufunc] = register_ufunc_kernel(ufunc, db_func, lower)
|
| 807 |
+
|
| 808 |
+
for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map,
|
| 809 |
+
npydecl.NumpyRulesArrayOperator._op_map,
|
| 810 |
+
):
|
| 811 |
+
for operator, ufunc_name in _op_map.items():
|
| 812 |
+
ufunc = getattr(np, ufunc_name)
|
| 813 |
+
kernel = kernels[ufunc]
|
| 814 |
+
if ufunc.nin == 1:
|
| 815 |
+
register_unary_operator_kernel(operator, ufunc, kernel, lower)
|
| 816 |
+
elif ufunc.nin == 2:
|
| 817 |
+
register_binary_operator_kernel(operator, ufunc, kernel, lower)
|
| 818 |
+
else:
|
| 819 |
+
raise RuntimeError("There shouldn't be any non-unary or binary operators")
|
| 820 |
+
|
| 821 |
+
for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map,
|
| 822 |
+
):
|
| 823 |
+
for operator, ufunc_name in _op_map.items():
|
| 824 |
+
ufunc = getattr(np, ufunc_name)
|
| 825 |
+
kernel = kernels[ufunc]
|
| 826 |
+
if ufunc.nin == 1:
|
| 827 |
+
register_unary_operator_kernel(operator, ufunc, kernel, lower,
|
| 828 |
+
inplace=True)
|
| 829 |
+
elif ufunc.nin == 2:
|
| 830 |
+
register_binary_operator_kernel(operator, ufunc, kernel, lower,
|
| 831 |
+
inplace=True)
|
| 832 |
+
else:
|
| 833 |
+
raise RuntimeError("There shouldn't be any non-unary or binary operators")
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
register_ufuncs(ufunc_db.get_ufuncs(), registry.lower)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
@intrinsic
|
| 840 |
+
def _make_dtype_object(typingctx, desc):
|
| 841 |
+
"""Given a string or NumberClass description *desc*, returns the dtype object.
|
| 842 |
+
"""
|
| 843 |
+
def from_nb_type(nb_type):
|
| 844 |
+
return_type = types.DType(nb_type)
|
| 845 |
+
sig = return_type(desc)
|
| 846 |
+
|
| 847 |
+
def codegen(context, builder, signature, args):
|
| 848 |
+
# All dtype objects are dummy values in LLVM.
|
| 849 |
+
# They only exist in the type level.
|
| 850 |
+
return context.get_dummy_value()
|
| 851 |
+
|
| 852 |
+
return sig, codegen
|
| 853 |
+
|
| 854 |
+
if isinstance(desc, types.Literal):
|
| 855 |
+
# Convert the str description into np.dtype then to numba type.
|
| 856 |
+
nb_type = from_dtype(np.dtype(desc.literal_value))
|
| 857 |
+
return from_nb_type(nb_type)
|
| 858 |
+
elif isinstance(desc, types.functions.NumberClass):
|
| 859 |
+
thestr = str(desc.dtype)
|
| 860 |
+
# Convert the str description into np.dtype then to numba type.
|
| 861 |
+
nb_type = from_dtype(np.dtype(thestr))
|
| 862 |
+
return from_nb_type(nb_type)
|
| 863 |
+
|
| 864 |
+
@overload(np.dtype)
|
| 865 |
+
def numpy_dtype(desc):
|
| 866 |
+
"""Provide an implementation so that numpy.dtype function can be lowered.
|
| 867 |
+
"""
|
| 868 |
+
if isinstance(desc, (types.Literal, types.functions.NumberClass)):
|
| 869 |
+
def imp(desc):
|
| 870 |
+
return _make_dtype_object(desc)
|
| 871 |
+
return imp
|
| 872 |
+
else:
|
| 873 |
+
raise errors.NumbaTypeError('unknown dtype descriptor: {}'.format(desc))
|
deepseek/lib/python3.10/site-packages/numba/np/polynomial/__init__.py
ADDED
|
File without changes
|
deepseek/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_core.cpython-310.pyc
ADDED
|
Binary file (6.59 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/numba/np/polynomial/polynomial_functions.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of operations involving polynomials.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.polynomial import polynomial as poly
|
| 8 |
+
from numpy.polynomial import polyutils as pu
|
| 9 |
+
|
| 10 |
+
from numba import literal_unroll
|
| 11 |
+
from numba.core import types, errors
|
| 12 |
+
from numba.core.extending import overload
|
| 13 |
+
from numba.np.numpy_support import type_can_asarray, as_dtype, from_dtype
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@overload(np.roots)
|
| 17 |
+
def roots_impl(p):
|
| 18 |
+
|
| 19 |
+
# cast int vectors to float cf. numpy, this is a bit dicey as
|
| 20 |
+
# the roots could be complex which will fail anyway
|
| 21 |
+
ty = getattr(p, 'dtype', p)
|
| 22 |
+
if isinstance(ty, types.Integer):
|
| 23 |
+
cast_t = np.float64
|
| 24 |
+
else:
|
| 25 |
+
cast_t = as_dtype(ty)
|
| 26 |
+
|
| 27 |
+
def roots_impl(p):
|
| 28 |
+
# impl based on numpy:
|
| 29 |
+
# https://github.com/numpy/numpy/blob/master/numpy/lib/polynomial.py
|
| 30 |
+
|
| 31 |
+
if len(p.shape) != 1:
|
| 32 |
+
raise ValueError("Input must be a 1d array.")
|
| 33 |
+
|
| 34 |
+
non_zero = np.nonzero(p)[0]
|
| 35 |
+
|
| 36 |
+
if len(non_zero) == 0:
|
| 37 |
+
return np.zeros(0, dtype=cast_t)
|
| 38 |
+
|
| 39 |
+
tz = len(p) - non_zero[-1] - 1
|
| 40 |
+
|
| 41 |
+
# pull out the coeffs selecting between possible zero pads
|
| 42 |
+
p = p[int(non_zero[0]):int(non_zero[-1]) + 1]
|
| 43 |
+
|
| 44 |
+
n = len(p)
|
| 45 |
+
if n > 1:
|
| 46 |
+
# construct companion matrix, ensure fortran order
|
| 47 |
+
# to give to eigvals, write to upper diag and then
|
| 48 |
+
# transpose.
|
| 49 |
+
A = np.diag(np.ones((n - 2,), cast_t), 1).T
|
| 50 |
+
A[0, :] = -p[1:] / p[0] # normalize
|
| 51 |
+
roots = np.linalg.eigvals(A)
|
| 52 |
+
else:
|
| 53 |
+
roots = np.zeros(0, dtype=cast_t)
|
| 54 |
+
|
| 55 |
+
# add in additional zeros on the end if needed
|
| 56 |
+
if tz > 0:
|
| 57 |
+
return np.hstack((roots, np.zeros(tz, dtype=cast_t)))
|
| 58 |
+
else:
|
| 59 |
+
return roots
|
| 60 |
+
|
| 61 |
+
return roots_impl
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@overload(pu.trimseq)
|
| 65 |
+
def polyutils_trimseq(seq):
|
| 66 |
+
if not type_can_asarray(seq):
|
| 67 |
+
msg = 'The argument "seq" must be array-like'
|
| 68 |
+
raise errors.TypingError(msg)
|
| 69 |
+
|
| 70 |
+
if isinstance(seq, types.BaseTuple):
|
| 71 |
+
msg = 'Unsupported type %r for argument "seq"'
|
| 72 |
+
raise errors.TypingError(msg % (seq))
|
| 73 |
+
|
| 74 |
+
if np.ndim(seq) > 1:
|
| 75 |
+
msg = 'Coefficient array is not 1-d'
|
| 76 |
+
raise errors.NumbaValueError(msg)
|
| 77 |
+
|
| 78 |
+
def impl(seq):
|
| 79 |
+
if len(seq) == 0:
|
| 80 |
+
return seq
|
| 81 |
+
else:
|
| 82 |
+
for i in range(len(seq) - 1, -1, -1):
|
| 83 |
+
if seq[i] != 0:
|
| 84 |
+
break
|
| 85 |
+
return seq[:i + 1]
|
| 86 |
+
|
| 87 |
+
return impl
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@overload(pu.as_series)
|
| 91 |
+
def polyutils_as_series(alist, trim=True):
|
| 92 |
+
if not type_can_asarray(alist):
|
| 93 |
+
msg = 'The argument "alist" must be array-like'
|
| 94 |
+
raise errors.TypingError(msg)
|
| 95 |
+
|
| 96 |
+
if not isinstance(trim, (bool, types.Boolean)):
|
| 97 |
+
msg = 'The argument "trim" must be boolean'
|
| 98 |
+
raise errors.TypingError(msg)
|
| 99 |
+
|
| 100 |
+
res_dtype = np.float64
|
| 101 |
+
|
| 102 |
+
tuple_input = isinstance(alist, types.BaseTuple)
|
| 103 |
+
list_input = isinstance(alist, types.List)
|
| 104 |
+
if tuple_input:
|
| 105 |
+
if np.any(np.array([np.ndim(a) > 1 for a in alist])):
|
| 106 |
+
raise errors.NumbaValueError("Coefficient array is not 1-d")
|
| 107 |
+
|
| 108 |
+
res_dtype = _poly_result_dtype(*alist)
|
| 109 |
+
|
| 110 |
+
elif list_input:
|
| 111 |
+
dt = as_dtype(_get_list_type(alist))
|
| 112 |
+
res_dtype = np.result_type(dt, np.float64)
|
| 113 |
+
|
| 114 |
+
else:
|
| 115 |
+
if np.ndim(alist) <= 2:
|
| 116 |
+
res_dtype = np.result_type(res_dtype, as_dtype(alist.dtype))
|
| 117 |
+
else:
|
| 118 |
+
# If total dimension has ndim > 2, then coeff arrays are not 1D
|
| 119 |
+
raise errors.NumbaValueError("Coefficient array is not 1-d")
|
| 120 |
+
|
| 121 |
+
def impl(alist, trim=True):
|
| 122 |
+
if tuple_input:
|
| 123 |
+
arrays = []
|
| 124 |
+
for item in literal_unroll(alist):
|
| 125 |
+
arrays.append(np.atleast_1d(np.asarray(item)).astype(res_dtype))
|
| 126 |
+
|
| 127 |
+
elif list_input:
|
| 128 |
+
arrays = [np.atleast_1d(np.asarray(a)).astype(res_dtype)
|
| 129 |
+
for a in alist]
|
| 130 |
+
|
| 131 |
+
else:
|
| 132 |
+
alist_arr = np.asarray(alist)
|
| 133 |
+
arrays = [np.atleast_1d(np.asarray(a)).astype(res_dtype)
|
| 134 |
+
for a in alist_arr]
|
| 135 |
+
|
| 136 |
+
if min([a.size for a in arrays]) == 0:
|
| 137 |
+
raise ValueError("Coefficient array is empty")
|
| 138 |
+
|
| 139 |
+
if trim:
|
| 140 |
+
arrays = [pu.trimseq(a) for a in arrays]
|
| 141 |
+
|
| 142 |
+
ret = arrays
|
| 143 |
+
return ret
|
| 144 |
+
|
| 145 |
+
return impl
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _get_list_type(l):
|
| 149 |
+
# A helper function that takes a list (possibly nested) and returns its
|
| 150 |
+
# dtype. Returns a Numba type.
|
| 151 |
+
dt = l.dtype
|
| 152 |
+
if (not isinstance(dt, types.Number)) and type_can_asarray(dt):
|
| 153 |
+
return _get_list_type(dt)
|
| 154 |
+
else:
|
| 155 |
+
return dt
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _poly_result_dtype(*args):
|
| 159 |
+
# A helper function that takes a tuple of inputs and returns their result
|
| 160 |
+
# dtype. Used for poly functions. Returns a NumPy dtype.
|
| 161 |
+
res_dtype = np.float64
|
| 162 |
+
for item in args:
|
| 163 |
+
if isinstance(item, types.BaseTuple):
|
| 164 |
+
s1 = item.types
|
| 165 |
+
elif isinstance(item, types.List):
|
| 166 |
+
s1 = [_get_list_type(item)]
|
| 167 |
+
elif isinstance(item, types.Number):
|
| 168 |
+
s1 = [item]
|
| 169 |
+
elif isinstance(item, types.Array):
|
| 170 |
+
s1 = [item.dtype]
|
| 171 |
+
else:
|
| 172 |
+
msg = 'Input dtype must be scalar'
|
| 173 |
+
raise errors.TypingError(msg)
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
l = [as_dtype(t) for t in s1]
|
| 177 |
+
l.append(res_dtype)
|
| 178 |
+
res_dtype = (np.result_type(*l))
|
| 179 |
+
except errors.NumbaNotImplementedError:
|
| 180 |
+
msg = 'Input dtype must be scalar.'
|
| 181 |
+
raise errors.TypingError(msg)
|
| 182 |
+
|
| 183 |
+
return from_dtype(res_dtype)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@overload(poly.polyadd)
|
| 187 |
+
def numpy_polyadd(c1, c2):
|
| 188 |
+
if not type_can_asarray(c1):
|
| 189 |
+
msg = 'The argument "c1" must be array-like'
|
| 190 |
+
raise errors.TypingError(msg)
|
| 191 |
+
|
| 192 |
+
if not type_can_asarray(c2):
|
| 193 |
+
msg = 'The argument "c2" must be array-like'
|
| 194 |
+
raise errors.TypingError(msg)
|
| 195 |
+
|
| 196 |
+
def impl(c1, c2):
|
| 197 |
+
arr1, arr2 = pu.as_series((c1, c2))
|
| 198 |
+
diff = len(arr2) - len(arr1)
|
| 199 |
+
if diff > 0:
|
| 200 |
+
zr = np.zeros(diff)
|
| 201 |
+
arr1 = np.concatenate((arr1, zr))
|
| 202 |
+
if diff < 0:
|
| 203 |
+
zr = np.zeros(-diff)
|
| 204 |
+
arr2 = np.concatenate((arr2, zr))
|
| 205 |
+
val = arr1 + arr2
|
| 206 |
+
return pu.trimseq(val)
|
| 207 |
+
|
| 208 |
+
return impl
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
@overload(poly.polysub)
|
| 212 |
+
def numpy_polysub(c1, c2):
|
| 213 |
+
if not type_can_asarray(c1):
|
| 214 |
+
msg = 'The argument "c1" must be array-like'
|
| 215 |
+
raise errors.TypingError(msg)
|
| 216 |
+
|
| 217 |
+
if not type_can_asarray(c2):
|
| 218 |
+
msg = 'The argument "c2" must be array-like'
|
| 219 |
+
raise errors.TypingError(msg)
|
| 220 |
+
|
| 221 |
+
def impl(c1, c2):
|
| 222 |
+
arr1, arr2 = pu.as_series((c1, c2))
|
| 223 |
+
diff = len(arr2) - len(arr1)
|
| 224 |
+
if diff > 0:
|
| 225 |
+
zr = np.zeros(diff)
|
| 226 |
+
arr1 = np.concatenate((arr1, zr))
|
| 227 |
+
if diff < 0:
|
| 228 |
+
zr = np.zeros(-diff)
|
| 229 |
+
arr2 = np.concatenate((arr2, zr))
|
| 230 |
+
val = arr1 - arr2
|
| 231 |
+
return pu.trimseq(val)
|
| 232 |
+
|
| 233 |
+
return impl
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
@overload(poly.polymul)
|
| 237 |
+
def numpy_polymul(c1, c2):
|
| 238 |
+
if not type_can_asarray(c1):
|
| 239 |
+
msg = 'The argument "c1" must be array-like'
|
| 240 |
+
raise errors.TypingError(msg)
|
| 241 |
+
|
| 242 |
+
if not type_can_asarray(c2):
|
| 243 |
+
msg = 'The argument "c2" must be array-like'
|
| 244 |
+
raise errors.TypingError(msg)
|
| 245 |
+
|
| 246 |
+
def impl(c1, c2):
|
| 247 |
+
arr1, arr2 = pu.as_series((c1, c2))
|
| 248 |
+
val = np.convolve(arr1, arr2)
|
| 249 |
+
return pu.trimseq(val)
|
| 250 |
+
|
| 251 |
+
return impl
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
@overload(poly.polyval, prefer_literal=True)
|
| 255 |
+
def poly_polyval(x, c, tensor=True):
|
| 256 |
+
if not type_can_asarray(x):
|
| 257 |
+
msg = 'The argument "x" must be array-like'
|
| 258 |
+
raise errors.TypingError(msg)
|
| 259 |
+
|
| 260 |
+
if not type_can_asarray(c):
|
| 261 |
+
msg = 'The argument "c" must be array-like'
|
| 262 |
+
raise errors.TypingError(msg)
|
| 263 |
+
|
| 264 |
+
if not isinstance(tensor, (bool, types.BooleanLiteral)):
|
| 265 |
+
msg = 'The argument "tensor" must be boolean'
|
| 266 |
+
raise errors.RequireLiteralValue(msg)
|
| 267 |
+
|
| 268 |
+
res_dtype = _poly_result_dtype(c, x)
|
| 269 |
+
|
| 270 |
+
# Simulate new_shape = (1,) * np.ndim(x) in the general case
|
| 271 |
+
# If x is a number, new_shape is not used
|
| 272 |
+
# If x is a tuple or a list, then it's 1d hence new_shape=(1,)
|
| 273 |
+
x_nd_array = not isinstance(x, types.Number)
|
| 274 |
+
new_shape = (1,)
|
| 275 |
+
if isinstance(x, types.Array):
|
| 276 |
+
# If x is a np.array, then take its dimension
|
| 277 |
+
new_shape = (1,) * np.ndim(x)
|
| 278 |
+
|
| 279 |
+
if isinstance(tensor, bool):
|
| 280 |
+
tensor_arg = tensor
|
| 281 |
+
else:
|
| 282 |
+
tensor_arg = tensor.literal_value
|
| 283 |
+
|
| 284 |
+
def impl(x, c, tensor=True):
|
| 285 |
+
arr = np.asarray(c).astype(res_dtype)
|
| 286 |
+
inputs = np.asarray(x).astype(res_dtype)
|
| 287 |
+
if x_nd_array and tensor_arg:
|
| 288 |
+
arr = arr.reshape(arr.shape + new_shape)
|
| 289 |
+
|
| 290 |
+
l = len(arr)
|
| 291 |
+
y = arr[l - 1] + inputs * 0
|
| 292 |
+
|
| 293 |
+
for i in range(l - 1, 0, -1):
|
| 294 |
+
y = arr[i - 1] + y * inputs
|
| 295 |
+
|
| 296 |
+
return y
|
| 297 |
+
|
| 298 |
+
return impl
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
@overload(poly.polyint)
|
| 302 |
+
def poly_polyint(c, m=1):
|
| 303 |
+
|
| 304 |
+
if not type_can_asarray(c):
|
| 305 |
+
msg = 'The argument "c" must be array-like'
|
| 306 |
+
raise errors.TypingError(msg)
|
| 307 |
+
|
| 308 |
+
if not isinstance(m, (int, types.Integer)):
|
| 309 |
+
msg = 'The argument "m" must be an integer'
|
| 310 |
+
raise errors.TypingError(msg)
|
| 311 |
+
|
| 312 |
+
res_dtype = as_dtype(_poly_result_dtype(c))
|
| 313 |
+
|
| 314 |
+
if not np.issubdtype(res_dtype, np.number):
|
| 315 |
+
msg = f'Input dtype must be scalar. Found {res_dtype} instead'
|
| 316 |
+
raise errors.TypingError(msg)
|
| 317 |
+
|
| 318 |
+
is1D = ((np.ndim(c) == 1) or
|
| 319 |
+
(isinstance(c, (types.List, types.BaseTuple))
|
| 320 |
+
and isinstance(c.dtype, types.Number)))
|
| 321 |
+
|
| 322 |
+
def impl(c, m=1):
|
| 323 |
+
c = np.asarray(c).astype(res_dtype)
|
| 324 |
+
cdt = c.dtype
|
| 325 |
+
for i in range(m):
|
| 326 |
+
n = len(c)
|
| 327 |
+
|
| 328 |
+
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
|
| 329 |
+
tmp[0] = c[0] * 0
|
| 330 |
+
tmp[1] = c[0]
|
| 331 |
+
for j in range(1, n):
|
| 332 |
+
tmp[j + 1] = c[j] / (j + 1)
|
| 333 |
+
c = tmp
|
| 334 |
+
if is1D:
|
| 335 |
+
return pu.trimseq(c)
|
| 336 |
+
else:
|
| 337 |
+
return c
|
| 338 |
+
|
| 339 |
+
return impl
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@overload(poly.polydiv)
|
| 343 |
+
def numpy_polydiv(c1, c2):
|
| 344 |
+
if not type_can_asarray(c1):
|
| 345 |
+
msg = 'The argument "c1" must be array-like'
|
| 346 |
+
raise errors.TypingError(msg)
|
| 347 |
+
|
| 348 |
+
if not type_can_asarray(c2):
|
| 349 |
+
msg = 'The argument "c2" must be array-like'
|
| 350 |
+
raise errors.TypingError(msg)
|
| 351 |
+
|
| 352 |
+
def impl(c1, c2):
|
| 353 |
+
arr1, arr2 = pu.as_series((c1, c2))
|
| 354 |
+
if arr2[-1] == 0:
|
| 355 |
+
raise ZeroDivisionError()
|
| 356 |
+
|
| 357 |
+
l1 = len(arr1)
|
| 358 |
+
l2 = len(arr2)
|
| 359 |
+
if l1 < l2:
|
| 360 |
+
return arr1[:1] * 0, arr1
|
| 361 |
+
elif l2 == 1:
|
| 362 |
+
return arr1 / arr2[-1], arr1[:1] * 0
|
| 363 |
+
else:
|
| 364 |
+
dlen = l1 - l2
|
| 365 |
+
scl = arr2[-1]
|
| 366 |
+
arr2 = arr2[:-1] / scl
|
| 367 |
+
i = dlen
|
| 368 |
+
j = l1 - 1
|
| 369 |
+
while i >= 0:
|
| 370 |
+
arr1[i:j] -= arr2 * arr1[j]
|
| 371 |
+
i -= 1
|
| 372 |
+
j -= 1
|
| 373 |
+
return arr1[j + 1:] / scl, pu.trimseq(arr1[:j + 1])
|
| 374 |
+
|
| 375 |
+
return impl
|
deepseek/lib/python3.10/site-packages/numba/np/random/__init__.py
ADDED
|
File without changes
|
deepseek/lib/python3.10/site-packages/numba/np/random/generator_core.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Core Implementations for Generator/BitGenerator Models.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from llvmlite import ir
|
| 6 |
+
from numba.core import cgutils, types
|
| 7 |
+
from numba.core.extending import (intrinsic, make_attribute_wrapper, models,
|
| 8 |
+
overload, register_jitable,
|
| 9 |
+
register_model)
|
| 10 |
+
from numba import float32
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@register_model(types.NumPyRandomBitGeneratorType)
|
| 14 |
+
class NumPyRngBitGeneratorModel(models.StructModel):
|
| 15 |
+
def __init__(self, dmm, fe_type):
|
| 16 |
+
members = [
|
| 17 |
+
('parent', types.pyobject),
|
| 18 |
+
('state_address', types.uintp),
|
| 19 |
+
('state', types.uintp),
|
| 20 |
+
('fnptr_next_uint64', types.uintp),
|
| 21 |
+
('fnptr_next_uint32', types.uintp),
|
| 22 |
+
('fnptr_next_double', types.uintp),
|
| 23 |
+
('bit_generator', types.uintp),
|
| 24 |
+
]
|
| 25 |
+
super(NumPyRngBitGeneratorModel, self).__init__(dmm, fe_type, members)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
_bit_gen_type = types.NumPyRandomBitGeneratorType('bit_generator')
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@register_model(types.NumPyRandomGeneratorType)
|
| 32 |
+
class NumPyRandomGeneratorTypeModel(models.StructModel):
|
| 33 |
+
def __init__(self, dmm, fe_type):
|
| 34 |
+
members = [
|
| 35 |
+
('bit_generator', _bit_gen_type),
|
| 36 |
+
('meminfo', types.MemInfoPointer(types.voidptr)),
|
| 37 |
+
('parent', types.pyobject)
|
| 38 |
+
]
|
| 39 |
+
super(
|
| 40 |
+
NumPyRandomGeneratorTypeModel,
|
| 41 |
+
self).__init__(
|
| 42 |
+
dmm,
|
| 43 |
+
fe_type,
|
| 44 |
+
members)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# The Generator instances have a bit_generator attr
|
| 48 |
+
make_attribute_wrapper(
|
| 49 |
+
types.NumPyRandomGeneratorType,
|
| 50 |
+
'bit_generator',
|
| 51 |
+
'bit_generator')
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _generate_next_binding(overloadable_function, return_type):
|
| 55 |
+
"""
|
| 56 |
+
Generate the overloads for "next_(some type)" functions.
|
| 57 |
+
"""
|
| 58 |
+
@intrinsic
|
| 59 |
+
def intrin_NumPyRandomBitGeneratorType_next_ty(tyctx, inst):
|
| 60 |
+
sig = return_type(inst)
|
| 61 |
+
|
| 62 |
+
def codegen(cgctx, builder, sig, llargs):
|
| 63 |
+
name = overloadable_function.__name__
|
| 64 |
+
struct_ptr = cgutils.create_struct_proxy(inst)(cgctx, builder,
|
| 65 |
+
value=llargs[0])
|
| 66 |
+
|
| 67 |
+
# Get the 'state' and 'fnptr_next_(type)' members of the struct
|
| 68 |
+
state = struct_ptr.state
|
| 69 |
+
next_double_addr = getattr(struct_ptr, f'fnptr_{name}')
|
| 70 |
+
|
| 71 |
+
# LLVM IR types needed
|
| 72 |
+
ll_void_ptr_t = cgctx.get_value_type(types.voidptr)
|
| 73 |
+
ll_return_t = cgctx.get_value_type(return_type)
|
| 74 |
+
ll_uintp_t = cgctx.get_value_type(types.uintp)
|
| 75 |
+
|
| 76 |
+
# Convert the stored Generator function address to a pointer
|
| 77 |
+
next_fn_fnptr = builder.inttoptr(
|
| 78 |
+
next_double_addr, ll_void_ptr_t)
|
| 79 |
+
# Add the function to the module
|
| 80 |
+
fnty = ir.FunctionType(ll_return_t, (ll_uintp_t,))
|
| 81 |
+
next_fn = cgutils.get_or_insert_function(
|
| 82 |
+
builder.module, fnty, name)
|
| 83 |
+
# Bit cast the function pointer to the function type
|
| 84 |
+
fnptr_as_fntype = builder.bitcast(next_fn_fnptr, next_fn.type)
|
| 85 |
+
# call it with the "state" address as the arg
|
| 86 |
+
ret = builder.call(fnptr_as_fntype, (state,))
|
| 87 |
+
return ret
|
| 88 |
+
return sig, codegen
|
| 89 |
+
|
| 90 |
+
@overload(overloadable_function)
|
| 91 |
+
def ol_next_ty(bitgen):
|
| 92 |
+
if isinstance(bitgen, types.NumPyRandomBitGeneratorType):
|
| 93 |
+
def impl(bitgen):
|
| 94 |
+
return intrin_NumPyRandomBitGeneratorType_next_ty(bitgen)
|
| 95 |
+
return impl
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Some function stubs for "next(some type)", these will be overloaded
|
| 99 |
+
def next_double(bitgen):
|
| 100 |
+
return bitgen.ctypes.next_double(bitgen.ctypes.state)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def next_uint32(bitgen):
|
| 104 |
+
return bitgen.ctypes.next_uint32(bitgen.ctypes.state)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def next_uint64(bitgen):
|
| 108 |
+
return bitgen.ctypes.next_uint64(bitgen.ctypes.state)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
_generate_next_binding(next_double, types.double)
|
| 112 |
+
_generate_next_binding(next_uint32, types.uint32)
|
| 113 |
+
_generate_next_binding(next_uint64, types.uint64)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# See: https://github.com/numpy/numpy/pull/20314
|
| 117 |
+
@register_jitable
|
| 118 |
+
def next_float(bitgen):
|
| 119 |
+
return float32(float32(next_uint32(bitgen) >> 8)
|
| 120 |
+
* float32(1.0) / float32(16777216.0))
|
deepseek/lib/python3.10/site-packages/numba/np/random/random_methods.py
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from numba import uint64, uint32, uint16, uint8
|
| 4 |
+
from numba.core.extending import register_jitable
|
| 5 |
+
|
| 6 |
+
from numba.np.random._constants import (UINT32_MAX, UINT64_MAX,
|
| 7 |
+
UINT16_MAX, UINT8_MAX)
|
| 8 |
+
from numba.np.random.generator_core import next_uint32, next_uint64
|
| 9 |
+
|
| 10 |
+
# All following implementations are direct translations from:
|
| 11 |
+
# https://github.com/numpy/numpy/blob/7cfef93c77599bd387ecc6a15d186c5a46024dac/numpy/random/src/distributions/distributions.c
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@register_jitable
|
| 15 |
+
def gen_mask(max):
|
| 16 |
+
mask = uint64(max)
|
| 17 |
+
mask |= mask >> 1
|
| 18 |
+
mask |= mask >> 2
|
| 19 |
+
mask |= mask >> 4
|
| 20 |
+
mask |= mask >> 8
|
| 21 |
+
mask |= mask >> 16
|
| 22 |
+
mask |= mask >> 32
|
| 23 |
+
return mask
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@register_jitable
|
| 27 |
+
def buffered_bounded_bool(bitgen, off, rng, bcnt, buf):
|
| 28 |
+
if (rng == 0):
|
| 29 |
+
return off, bcnt, buf
|
| 30 |
+
if not bcnt:
|
| 31 |
+
buf = next_uint32(bitgen)
|
| 32 |
+
bcnt = 31
|
| 33 |
+
else:
|
| 34 |
+
buf >>= 1
|
| 35 |
+
bcnt -= 1
|
| 36 |
+
|
| 37 |
+
return ((buf & 1) != 0), bcnt, buf
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@register_jitable
|
| 41 |
+
def buffered_uint8(bitgen, bcnt, buf):
|
| 42 |
+
if not bcnt:
|
| 43 |
+
buf = next_uint32(bitgen)
|
| 44 |
+
bcnt = 3
|
| 45 |
+
else:
|
| 46 |
+
buf >>= 8
|
| 47 |
+
bcnt -= 1
|
| 48 |
+
|
| 49 |
+
return uint8(buf), bcnt, buf
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@register_jitable
|
| 53 |
+
def buffered_uint16(bitgen, bcnt, buf):
|
| 54 |
+
if not bcnt:
|
| 55 |
+
buf = next_uint32(bitgen)
|
| 56 |
+
bcnt = 1
|
| 57 |
+
else:
|
| 58 |
+
buf >>= 16
|
| 59 |
+
bcnt -= 1
|
| 60 |
+
|
| 61 |
+
return uint16(buf), bcnt, buf
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# The following implementations use Lemire's algorithm:
|
| 65 |
+
# https://arxiv.org/abs/1805.10941
|
| 66 |
+
@register_jitable
|
| 67 |
+
def buffered_bounded_lemire_uint8(bitgen, rng, bcnt, buf):
|
| 68 |
+
"""
|
| 69 |
+
Generates a random unsigned 8 bit integer bounded
|
| 70 |
+
within a given interval using Lemire's rejection.
|
| 71 |
+
|
| 72 |
+
The buffer acts as storage for a 32 bit integer
|
| 73 |
+
drawn from the associated BitGenerator so that
|
| 74 |
+
multiple integers of smaller bitsize can be generated
|
| 75 |
+
from a single draw of the BitGenerator.
|
| 76 |
+
"""
|
| 77 |
+
# Note: `rng` should not be 0xFF. When this happens `rng_excl` becomes
|
| 78 |
+
# zero.
|
| 79 |
+
rng_excl = uint8(rng) + uint8(1)
|
| 80 |
+
|
| 81 |
+
assert (rng != 0xFF)
|
| 82 |
+
|
| 83 |
+
# Generate a scaled random number.
|
| 84 |
+
n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf)
|
| 85 |
+
m = uint16(n * rng_excl)
|
| 86 |
+
|
| 87 |
+
# Rejection sampling to remove any bias
|
| 88 |
+
leftover = m & 0xFF
|
| 89 |
+
|
| 90 |
+
if (leftover < rng_excl):
|
| 91 |
+
# `rng_excl` is a simple upper bound for `threshold`.
|
| 92 |
+
threshold = ((uint8(UINT8_MAX) - rng) % rng_excl)
|
| 93 |
+
|
| 94 |
+
while (leftover < threshold):
|
| 95 |
+
n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf)
|
| 96 |
+
m = uint16(n * rng_excl)
|
| 97 |
+
leftover = m & 0xFF
|
| 98 |
+
|
| 99 |
+
return m >> 8, bcnt, buf
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@register_jitable
|
| 103 |
+
def buffered_bounded_lemire_uint16(bitgen, rng, bcnt, buf):
|
| 104 |
+
"""
|
| 105 |
+
Generates a random unsigned 16 bit integer bounded
|
| 106 |
+
within a given interval using Lemire's rejection.
|
| 107 |
+
|
| 108 |
+
The buffer acts as storage for a 32 bit integer
|
| 109 |
+
drawn from the associated BitGenerator so that
|
| 110 |
+
multiple integers of smaller bitsize can be generated
|
| 111 |
+
from a single draw of the BitGenerator.
|
| 112 |
+
"""
|
| 113 |
+
# Note: `rng` should not be 0xFFFF. When this happens `rng_excl` becomes
|
| 114 |
+
# zero.
|
| 115 |
+
rng_excl = uint16(rng) + uint16(1)
|
| 116 |
+
|
| 117 |
+
assert (rng != 0xFFFF)
|
| 118 |
+
|
| 119 |
+
# Generate a scaled random number.
|
| 120 |
+
n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf)
|
| 121 |
+
m = uint32(n * rng_excl)
|
| 122 |
+
|
| 123 |
+
# Rejection sampling to remove any bias
|
| 124 |
+
leftover = m & 0xFFFF
|
| 125 |
+
|
| 126 |
+
if (leftover < rng_excl):
|
| 127 |
+
# `rng_excl` is a simple upper bound for `threshold`.
|
| 128 |
+
threshold = ((uint16(UINT16_MAX) - rng) % rng_excl)
|
| 129 |
+
|
| 130 |
+
while (leftover < threshold):
|
| 131 |
+
n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf)
|
| 132 |
+
m = uint32(n * rng_excl)
|
| 133 |
+
leftover = m & 0xFFFF
|
| 134 |
+
|
| 135 |
+
return m >> 16, bcnt, buf
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@register_jitable
|
| 139 |
+
def buffered_bounded_lemire_uint32(bitgen, rng):
|
| 140 |
+
"""
|
| 141 |
+
Generates a random unsigned 32 bit integer bounded
|
| 142 |
+
within a given interval using Lemire's rejection.
|
| 143 |
+
"""
|
| 144 |
+
rng_excl = uint32(rng) + uint32(1)
|
| 145 |
+
|
| 146 |
+
assert (rng != 0xFFFFFFFF)
|
| 147 |
+
|
| 148 |
+
# Generate a scaled random number.
|
| 149 |
+
m = uint64(next_uint32(bitgen)) * uint64(rng_excl)
|
| 150 |
+
|
| 151 |
+
# Rejection sampling to remove any bias
|
| 152 |
+
leftover = m & 0xFFFFFFFF
|
| 153 |
+
|
| 154 |
+
if (leftover < rng_excl):
|
| 155 |
+
# `rng_excl` is a simple upper bound for `threshold`.
|
| 156 |
+
threshold = (UINT32_MAX - rng) % rng_excl
|
| 157 |
+
|
| 158 |
+
while (leftover < threshold):
|
| 159 |
+
m = uint64(next_uint32(bitgen)) * uint64(rng_excl)
|
| 160 |
+
leftover = m & 0xFFFFFFFF
|
| 161 |
+
|
| 162 |
+
return (m >> 32)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@register_jitable
|
| 166 |
+
def bounded_lemire_uint64(bitgen, rng):
|
| 167 |
+
"""
|
| 168 |
+
Generates a random unsigned 64 bit integer bounded
|
| 169 |
+
within a given interval using Lemire's rejection.
|
| 170 |
+
"""
|
| 171 |
+
rng_excl = uint64(rng) + uint64(1)
|
| 172 |
+
|
| 173 |
+
assert (rng != 0xFFFFFFFFFFFFFFFF)
|
| 174 |
+
|
| 175 |
+
x = next_uint64(bitgen)
|
| 176 |
+
|
| 177 |
+
leftover = uint64(x) * uint64(rng_excl)
|
| 178 |
+
|
| 179 |
+
if (leftover < rng_excl):
|
| 180 |
+
threshold = (UINT64_MAX - rng) % rng_excl
|
| 181 |
+
|
| 182 |
+
while (leftover < threshold):
|
| 183 |
+
x = next_uint64(bitgen)
|
| 184 |
+
leftover = uint64(x) * uint64(rng_excl)
|
| 185 |
+
|
| 186 |
+
x0 = x & uint64(0xFFFFFFFF)
|
| 187 |
+
x1 = x >> 32
|
| 188 |
+
rng_excl0 = rng_excl & uint64(0xFFFFFFFF)
|
| 189 |
+
rng_excl1 = rng_excl >> 32
|
| 190 |
+
w0 = x0 * rng_excl0
|
| 191 |
+
t = x1 * rng_excl0 + (w0 >> 32)
|
| 192 |
+
w1 = t & uint64(0xFFFFFFFF)
|
| 193 |
+
w2 = t >> 32
|
| 194 |
+
w1 += x0 * rng_excl1
|
| 195 |
+
m1 = x1 * rng_excl1 + w2 + (w1 >> 32)
|
| 196 |
+
|
| 197 |
+
return m1
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@register_jitable
|
| 201 |
+
def random_bounded_uint64_fill(bitgen, low, rng, size, dtype):
|
| 202 |
+
"""
|
| 203 |
+
Returns a new array of given size with 64 bit integers
|
| 204 |
+
bounded by given interval.
|
| 205 |
+
"""
|
| 206 |
+
out = np.empty(size, dtype=dtype)
|
| 207 |
+
if rng == 0:
|
| 208 |
+
for i in np.ndindex(size):
|
| 209 |
+
out[i] = low
|
| 210 |
+
elif rng <= 0xFFFFFFFF:
|
| 211 |
+
if (rng == 0xFFFFFFFF):
|
| 212 |
+
for i in np.ndindex(size):
|
| 213 |
+
out[i] = low + next_uint32(bitgen)
|
| 214 |
+
else:
|
| 215 |
+
for i in np.ndindex(size):
|
| 216 |
+
out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng)
|
| 217 |
+
|
| 218 |
+
elif (rng == 0xFFFFFFFFFFFFFFFF):
|
| 219 |
+
for i in np.ndindex(size):
|
| 220 |
+
out[i] = low + next_uint64(bitgen)
|
| 221 |
+
else:
|
| 222 |
+
for i in np.ndindex(size):
|
| 223 |
+
out[i] = low + bounded_lemire_uint64(bitgen, rng)
|
| 224 |
+
|
| 225 |
+
return out
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
@register_jitable
|
| 229 |
+
def random_bounded_uint32_fill(bitgen, low, rng, size, dtype):
|
| 230 |
+
"""
|
| 231 |
+
Returns a new array of given size with 32 bit integers
|
| 232 |
+
bounded by given interval.
|
| 233 |
+
"""
|
| 234 |
+
out = np.empty(size, dtype=dtype)
|
| 235 |
+
if rng == 0:
|
| 236 |
+
for i in np.ndindex(size):
|
| 237 |
+
out[i] = low
|
| 238 |
+
elif rng == 0xFFFFFFFF:
|
| 239 |
+
# Lemire32 doesn't support rng = 0xFFFFFFFF.
|
| 240 |
+
for i in np.ndindex(size):
|
| 241 |
+
out[i] = low + next_uint32(bitgen)
|
| 242 |
+
else:
|
| 243 |
+
for i in np.ndindex(size):
|
| 244 |
+
out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng)
|
| 245 |
+
return out
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
@register_jitable
|
| 249 |
+
def random_bounded_uint16_fill(bitgen, low, rng, size, dtype):
|
| 250 |
+
"""
|
| 251 |
+
Returns a new array of given size with 16 bit integers
|
| 252 |
+
bounded by given interval.
|
| 253 |
+
"""
|
| 254 |
+
buf = 0
|
| 255 |
+
bcnt = 0
|
| 256 |
+
|
| 257 |
+
out = np.empty(size, dtype=dtype)
|
| 258 |
+
if rng == 0:
|
| 259 |
+
for i in np.ndindex(size):
|
| 260 |
+
out[i] = low
|
| 261 |
+
elif rng == 0xFFFF:
|
| 262 |
+
# Lemire16 doesn't support rng = 0xFFFF.
|
| 263 |
+
for i in np.ndindex(size):
|
| 264 |
+
val, bcnt, buf = buffered_uint16(bitgen, bcnt, buf)
|
| 265 |
+
out[i] = low + val
|
| 266 |
+
|
| 267 |
+
else:
|
| 268 |
+
for i in np.ndindex(size):
|
| 269 |
+
val, bcnt, buf = \
|
| 270 |
+
buffered_bounded_lemire_uint16(bitgen, rng,
|
| 271 |
+
bcnt, buf)
|
| 272 |
+
out[i] = low + val
|
| 273 |
+
return out
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@register_jitable
|
| 277 |
+
def random_bounded_uint8_fill(bitgen, low, rng, size, dtype):
|
| 278 |
+
"""
|
| 279 |
+
Returns a new array of given size with 8 bit integers
|
| 280 |
+
bounded by given interval.
|
| 281 |
+
"""
|
| 282 |
+
buf = 0
|
| 283 |
+
bcnt = 0
|
| 284 |
+
|
| 285 |
+
out = np.empty(size, dtype=dtype)
|
| 286 |
+
if rng == 0:
|
| 287 |
+
for i in np.ndindex(size):
|
| 288 |
+
out[i] = low
|
| 289 |
+
elif rng == 0xFF:
|
| 290 |
+
# Lemire8 doesn't support rng = 0xFF.
|
| 291 |
+
for i in np.ndindex(size):
|
| 292 |
+
val, bcnt, buf = buffered_uint8(bitgen, bcnt, buf)
|
| 293 |
+
out[i] = low + val
|
| 294 |
+
else:
|
| 295 |
+
for i in np.ndindex(size):
|
| 296 |
+
val, bcnt, buf = \
|
| 297 |
+
buffered_bounded_lemire_uint8(bitgen, rng,
|
| 298 |
+
bcnt, buf)
|
| 299 |
+
out[i] = low + val
|
| 300 |
+
return out
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@register_jitable
|
| 304 |
+
def random_bounded_bool_fill(bitgen, low, rng, size, dtype):
|
| 305 |
+
"""
|
| 306 |
+
Returns a new array of given size with boolean values.
|
| 307 |
+
"""
|
| 308 |
+
buf = 0
|
| 309 |
+
bcnt = 0
|
| 310 |
+
out = np.empty(size, dtype=dtype)
|
| 311 |
+
for i in np.ndindex(size):
|
| 312 |
+
val, bcnt, buf = buffered_bounded_bool(bitgen, low, rng, bcnt, buf)
|
| 313 |
+
out[i] = low + val
|
| 314 |
+
return out
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@register_jitable
|
| 318 |
+
def _randint_arg_check(low, high, endpoint, lower_bound, upper_bound):
|
| 319 |
+
"""
|
| 320 |
+
Check that low and high are within the bounds
|
| 321 |
+
for the given datatype.
|
| 322 |
+
"""
|
| 323 |
+
|
| 324 |
+
if low < lower_bound:
|
| 325 |
+
raise ValueError("low is out of bounds")
|
| 326 |
+
|
| 327 |
+
# This is being done to avoid high being accidentally
|
| 328 |
+
# casted to int64/32 while subtracting 1 before
|
| 329 |
+
# checking bounds, avoids overflow.
|
| 330 |
+
if high > 0:
|
| 331 |
+
high = uint64(high)
|
| 332 |
+
if not endpoint:
|
| 333 |
+
high -= uint64(1)
|
| 334 |
+
upper_bound = uint64(upper_bound)
|
| 335 |
+
if low > 0:
|
| 336 |
+
low = uint64(low)
|
| 337 |
+
if high > upper_bound:
|
| 338 |
+
raise ValueError("high is out of bounds")
|
| 339 |
+
if low > high: # -1 already subtracted, closed interval
|
| 340 |
+
raise ValueError("low is greater than high in given interval")
|
| 341 |
+
else:
|
| 342 |
+
if high > upper_bound:
|
| 343 |
+
raise ValueError("high is out of bounds")
|
| 344 |
+
if low > high: # -1 already subtracted, closed interval
|
| 345 |
+
raise ValueError("low is greater than high in given interval")
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
@register_jitable
|
| 349 |
+
def random_interval(bitgen, max_val):
|
| 350 |
+
if (max_val == 0):
|
| 351 |
+
return 0
|
| 352 |
+
|
| 353 |
+
max_val = uint64(max_val)
|
| 354 |
+
mask = uint64(gen_mask(max_val))
|
| 355 |
+
|
| 356 |
+
if (max_val <= 0xffffffff):
|
| 357 |
+
value = uint64(next_uint32(bitgen)) & mask
|
| 358 |
+
while value > max_val:
|
| 359 |
+
value = uint64(next_uint32(bitgen)) & mask
|
| 360 |
+
else:
|
| 361 |
+
value = next_uint64(bitgen) & mask
|
| 362 |
+
while value > max_val:
|
| 363 |
+
value = next_uint64(bitgen) & mask
|
| 364 |
+
|
| 365 |
+
return uint64(value)
|
deepseek/lib/python3.10/site-packages/numba/np/ufunc/__init__.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from numba.np.ufunc.decorators import Vectorize, GUVectorize, vectorize, guvectorize
|
| 4 |
+
from numba.np.ufunc._internal import PyUFunc_None, PyUFunc_Zero, PyUFunc_One
|
| 5 |
+
from numba.np.ufunc import _internal, array_exprs
|
| 6 |
+
from numba.np.ufunc.parallel import (threading_layer, get_num_threads,
|
| 7 |
+
set_num_threads, get_thread_id,
|
| 8 |
+
set_parallel_chunksize,
|
| 9 |
+
get_parallel_chunksize)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
if hasattr(_internal, 'PyUFunc_ReorderableNone'):
|
| 13 |
+
PyUFunc_ReorderableNone = _internal.PyUFunc_ReorderableNone
|
| 14 |
+
del _internal, array_exprs
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _init():
|
| 18 |
+
|
| 19 |
+
def init_cuda_vectorize():
|
| 20 |
+
from numba.cuda.vectorizers import CUDAVectorize
|
| 21 |
+
return CUDAVectorize
|
| 22 |
+
|
| 23 |
+
def init_cuda_guvectorize():
|
| 24 |
+
from numba.cuda.vectorizers import CUDAGUFuncVectorize
|
| 25 |
+
return CUDAGUFuncVectorize
|
| 26 |
+
|
| 27 |
+
Vectorize.target_registry.ondemand['cuda'] = init_cuda_vectorize
|
| 28 |
+
GUVectorize.target_registry.ondemand['cuda'] = init_cuda_guvectorize
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
_init()
|
| 32 |
+
del _init
|
deepseek/lib/python3.10/site-packages/numba/np/ufunc/decorators.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
|
| 3 |
+
from numba.np.ufunc import _internal
|
| 4 |
+
from numba.np.ufunc.parallel import ParallelUFuncBuilder, ParallelGUFuncBuilder
|
| 5 |
+
|
| 6 |
+
from numba.core.registry import DelayedRegistry
|
| 7 |
+
from numba.np.ufunc import dufunc
|
| 8 |
+
from numba.np.ufunc import gufunc
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class _BaseVectorize(object):
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def get_identity(cls, kwargs):
|
| 15 |
+
return kwargs.pop('identity', None)
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def get_cache(cls, kwargs):
|
| 19 |
+
return kwargs.pop('cache', False)
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def get_writable_args(cls, kwargs):
|
| 23 |
+
return kwargs.pop('writable_args', ())
|
| 24 |
+
|
| 25 |
+
@classmethod
|
| 26 |
+
def get_target_implementation(cls, kwargs):
|
| 27 |
+
target = kwargs.pop('target', 'cpu')
|
| 28 |
+
try:
|
| 29 |
+
return cls.target_registry[target]
|
| 30 |
+
except KeyError:
|
| 31 |
+
raise ValueError("Unsupported target: %s" % target)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Vectorize(_BaseVectorize):
|
| 35 |
+
target_registry = DelayedRegistry({'cpu': dufunc.DUFunc,
|
| 36 |
+
'parallel': ParallelUFuncBuilder,})
|
| 37 |
+
|
| 38 |
+
def __new__(cls, func, **kws):
|
| 39 |
+
identity = cls.get_identity(kws)
|
| 40 |
+
cache = cls.get_cache(kws)
|
| 41 |
+
imp = cls.get_target_implementation(kws)
|
| 42 |
+
return imp(func, identity=identity, cache=cache, targetoptions=kws)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class GUVectorize(_BaseVectorize):
|
| 46 |
+
target_registry = DelayedRegistry({'cpu': gufunc.GUFunc,
|
| 47 |
+
'parallel': ParallelGUFuncBuilder,})
|
| 48 |
+
|
| 49 |
+
def __new__(cls, func, signature, **kws):
|
| 50 |
+
identity = cls.get_identity(kws)
|
| 51 |
+
cache = cls.get_cache(kws)
|
| 52 |
+
imp = cls.get_target_implementation(kws)
|
| 53 |
+
writable_args = cls.get_writable_args(kws)
|
| 54 |
+
if imp is gufunc.GUFunc:
|
| 55 |
+
is_dyn = kws.pop('is_dynamic', False)
|
| 56 |
+
return imp(func, signature, identity=identity, cache=cache,
|
| 57 |
+
is_dynamic=is_dyn, targetoptions=kws,
|
| 58 |
+
writable_args=writable_args)
|
| 59 |
+
else:
|
| 60 |
+
return imp(func, signature, identity=identity, cache=cache,
|
| 61 |
+
targetoptions=kws, writable_args=writable_args)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def vectorize(ftylist_or_function=(), **kws):
|
| 65 |
+
"""vectorize(ftylist_or_function=(), target='cpu', identity=None, **kws)
|
| 66 |
+
|
| 67 |
+
A decorator that creates a NumPy ufunc object using Numba compiled
|
| 68 |
+
code. When no arguments or only keyword arguments are given,
|
| 69 |
+
vectorize will return a Numba dynamic ufunc (DUFunc) object, where
|
| 70 |
+
compilation/specialization may occur at call-time.
|
| 71 |
+
|
| 72 |
+
Args
|
| 73 |
+
-----
|
| 74 |
+
ftylist_or_function: function or iterable
|
| 75 |
+
|
| 76 |
+
When the first argument is a function, signatures are dealt
|
| 77 |
+
with at call-time.
|
| 78 |
+
|
| 79 |
+
When the first argument is an iterable of type signatures,
|
| 80 |
+
which are either function type object or a string describing
|
| 81 |
+
the function type, signatures are finalized at decoration
|
| 82 |
+
time.
|
| 83 |
+
|
| 84 |
+
Keyword Args
|
| 85 |
+
------------
|
| 86 |
+
|
| 87 |
+
target: str
|
| 88 |
+
A string for code generation target. Default to "cpu".
|
| 89 |
+
|
| 90 |
+
identity: int, str, or None
|
| 91 |
+
The identity (or unit) value for the element-wise function
|
| 92 |
+
being implemented. Allowed values are None (the default), 0, 1,
|
| 93 |
+
and "reorderable".
|
| 94 |
+
|
| 95 |
+
cache: bool
|
| 96 |
+
Turns on caching.
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
Returns
|
| 100 |
+
--------
|
| 101 |
+
|
| 102 |
+
A NumPy universal function
|
| 103 |
+
|
| 104 |
+
Examples
|
| 105 |
+
-------
|
| 106 |
+
@vectorize(['float32(float32, float32)',
|
| 107 |
+
'float64(float64, float64)'], identity=0)
|
| 108 |
+
def sum(a, b):
|
| 109 |
+
return a + b
|
| 110 |
+
|
| 111 |
+
@vectorize
|
| 112 |
+
def sum(a, b):
|
| 113 |
+
return a + b
|
| 114 |
+
|
| 115 |
+
@vectorize(identity=1)
|
| 116 |
+
def mul(a, b):
|
| 117 |
+
return a * b
|
| 118 |
+
|
| 119 |
+
"""
|
| 120 |
+
if isinstance(ftylist_or_function, str):
|
| 121 |
+
# Common user mistake
|
| 122 |
+
ftylist = [ftylist_or_function]
|
| 123 |
+
elif inspect.isfunction(ftylist_or_function):
|
| 124 |
+
return dufunc.DUFunc(ftylist_or_function, **kws)
|
| 125 |
+
elif ftylist_or_function is not None:
|
| 126 |
+
ftylist = ftylist_or_function
|
| 127 |
+
|
| 128 |
+
def wrap(func):
|
| 129 |
+
vec = Vectorize(func, **kws)
|
| 130 |
+
for sig in ftylist:
|
| 131 |
+
vec.add(sig)
|
| 132 |
+
if len(ftylist) > 0:
|
| 133 |
+
vec.disable_compile()
|
| 134 |
+
return vec.build_ufunc()
|
| 135 |
+
|
| 136 |
+
return wrap
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def guvectorize(*args, **kwargs):
|
| 140 |
+
"""guvectorize(ftylist, signature, target='cpu', identity=None, **kws)
|
| 141 |
+
|
| 142 |
+
A decorator to create NumPy generalized-ufunc object from Numba compiled
|
| 143 |
+
code.
|
| 144 |
+
|
| 145 |
+
Args
|
| 146 |
+
-----
|
| 147 |
+
ftylist: iterable
|
| 148 |
+
An iterable of type signatures, which are either
|
| 149 |
+
function type object or a string describing the
|
| 150 |
+
function type.
|
| 151 |
+
|
| 152 |
+
signature: str
|
| 153 |
+
A NumPy generalized-ufunc signature.
|
| 154 |
+
e.g. "(m, n), (n, p)->(m, p)"
|
| 155 |
+
|
| 156 |
+
identity: int, str, or None
|
| 157 |
+
The identity (or unit) value for the element-wise function
|
| 158 |
+
being implemented. Allowed values are None (the default), 0, 1,
|
| 159 |
+
and "reorderable".
|
| 160 |
+
|
| 161 |
+
cache: bool
|
| 162 |
+
Turns on caching.
|
| 163 |
+
|
| 164 |
+
writable_args: tuple
|
| 165 |
+
a tuple of indices of input variables that are writable.
|
| 166 |
+
|
| 167 |
+
target: str
|
| 168 |
+
A string for code generation target. Defaults to "cpu".
|
| 169 |
+
|
| 170 |
+
Returns
|
| 171 |
+
--------
|
| 172 |
+
|
| 173 |
+
A NumPy generalized universal-function
|
| 174 |
+
|
| 175 |
+
Example
|
| 176 |
+
-------
|
| 177 |
+
@guvectorize(['void(int32[:,:], int32[:,:], int32[:,:])',
|
| 178 |
+
'void(float32[:,:], float32[:,:], float32[:,:])'],
|
| 179 |
+
'(x, y),(x, y)->(x, y)')
|
| 180 |
+
def add_2d_array(a, b, c):
|
| 181 |
+
for i in range(c.shape[0]):
|
| 182 |
+
for j in range(c.shape[1]):
|
| 183 |
+
c[i, j] = a[i, j] + b[i, j]
|
| 184 |
+
|
| 185 |
+
"""
|
| 186 |
+
if len(args) == 1:
|
| 187 |
+
ftylist = []
|
| 188 |
+
signature = args[0]
|
| 189 |
+
kwargs.setdefault('is_dynamic', True)
|
| 190 |
+
elif len(args) == 2:
|
| 191 |
+
ftylist = args[0]
|
| 192 |
+
signature = args[1]
|
| 193 |
+
else:
|
| 194 |
+
raise TypeError('guvectorize() takes one or two positional arguments')
|
| 195 |
+
|
| 196 |
+
if isinstance(ftylist, str):
|
| 197 |
+
# Common user mistake
|
| 198 |
+
ftylist = [ftylist]
|
| 199 |
+
|
| 200 |
+
def wrap(func):
|
| 201 |
+
guvec = GUVectorize(func, signature, **kwargs)
|
| 202 |
+
for fty in ftylist:
|
| 203 |
+
guvec.add(fty)
|
| 204 |
+
if len(ftylist) > 0:
|
| 205 |
+
guvec.disable_compile()
|
| 206 |
+
return guvec.build_ufunc()
|
| 207 |
+
|
| 208 |
+
return wrap
|
deepseek/lib/python3.10/site-packages/numba/np/ufunc/gufunc.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba import typeof
|
| 2 |
+
from numba.core import types
|
| 3 |
+
from numba.np.ufunc.ufuncbuilder import GUFuncBuilder
|
| 4 |
+
from numba.np.ufunc.sigparse import parse_signature
|
| 5 |
+
from numba.np.ufunc.ufunc_base import UfuncBase, UfuncLowererBase
|
| 6 |
+
from numba.np.numpy_support import ufunc_find_matching_loop
|
| 7 |
+
from numba.core import serialize, errors
|
| 8 |
+
from numba.core.typing import npydecl
|
| 9 |
+
from numba.core.typing.templates import signature, AbstractTemplate
|
| 10 |
+
import functools
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def make_gufunc_kernel(_dufunc):
|
| 14 |
+
from numba.np import npyimpl
|
| 15 |
+
|
| 16 |
+
class GUFuncKernel(npyimpl._Kernel):
|
| 17 |
+
"""
|
| 18 |
+
npyimpl._Kernel subclass responsible for lowering a gufunc kernel
|
| 19 |
+
(element-wise function) inside a broadcast loop (which is
|
| 20 |
+
generated by npyimpl.numpy_gufunc_kernel()).
|
| 21 |
+
"""
|
| 22 |
+
dufunc = _dufunc
|
| 23 |
+
|
| 24 |
+
def __init__(self, context, builder, outer_sig):
|
| 25 |
+
super().__init__(context, builder, outer_sig)
|
| 26 |
+
ewise_types = self.dufunc._get_ewise_dtypes(outer_sig.args)
|
| 27 |
+
self.inner_sig, self.cres = self.dufunc.find_ewise_function(
|
| 28 |
+
ewise_types)
|
| 29 |
+
|
| 30 |
+
def cast(self, val, fromty, toty):
|
| 31 |
+
# Handle the case where "fromty" is an array and "toty" a scalar
|
| 32 |
+
if isinstance(fromty, types.Array) and not \
|
| 33 |
+
isinstance(toty, types.Array):
|
| 34 |
+
return super().cast(val, fromty.dtype, toty)
|
| 35 |
+
return super().cast(val, fromty, toty)
|
| 36 |
+
|
| 37 |
+
def generate(self, *args):
|
| 38 |
+
if self.cres.objectmode:
|
| 39 |
+
msg = ('Calling a guvectorize function in object mode is not '
|
| 40 |
+
'supported yet.')
|
| 41 |
+
raise errors.NumbaRuntimeError(msg)
|
| 42 |
+
self.context.add_linking_libs((self.cres.library,))
|
| 43 |
+
return super().generate(*args)
|
| 44 |
+
|
| 45 |
+
GUFuncKernel.__name__ += _dufunc.__name__
|
| 46 |
+
return GUFuncKernel
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class GUFuncLowerer(UfuncLowererBase):
|
| 50 |
+
'''Callable class responsible for lowering calls to a specific gufunc.
|
| 51 |
+
'''
|
| 52 |
+
def __init__(self, gufunc):
|
| 53 |
+
from numba.np import npyimpl
|
| 54 |
+
super().__init__(gufunc,
|
| 55 |
+
make_gufunc_kernel,
|
| 56 |
+
npyimpl.numpy_gufunc_kernel)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class GUFunc(serialize.ReduceMixin, UfuncBase):
|
| 60 |
+
"""
|
| 61 |
+
Dynamic generalized universal function (GUFunc)
|
| 62 |
+
intended to act like a normal Numpy gufunc, but capable
|
| 63 |
+
of call-time (just-in-time) compilation of fast loops
|
| 64 |
+
specialized to inputs.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(self, py_func, signature, identity=None, cache=None,
|
| 68 |
+
is_dynamic=False, targetoptions={}, writable_args=()):
|
| 69 |
+
self.ufunc = None
|
| 70 |
+
self._frozen = False
|
| 71 |
+
self._is_dynamic = is_dynamic
|
| 72 |
+
self._identity = identity
|
| 73 |
+
|
| 74 |
+
# GUFunc cannot inherit from GUFuncBuilder because "identity"
|
| 75 |
+
# is a property of GUFunc. Thus, we hold a reference to a GUFuncBuilder
|
| 76 |
+
# object here
|
| 77 |
+
self.gufunc_builder = GUFuncBuilder(
|
| 78 |
+
py_func, signature, identity, cache, targetoptions, writable_args)
|
| 79 |
+
|
| 80 |
+
self.__name__ = self.gufunc_builder.py_func.__name__
|
| 81 |
+
self.__doc__ = self.gufunc_builder.py_func.__doc__
|
| 82 |
+
self._dispatcher = self.gufunc_builder.nb_func
|
| 83 |
+
self._initialize(self._dispatcher)
|
| 84 |
+
functools.update_wrapper(self, py_func)
|
| 85 |
+
|
| 86 |
+
def _initialize(self, dispatcher):
|
| 87 |
+
self.build_ufunc()
|
| 88 |
+
self._install_type()
|
| 89 |
+
self._lower_me = GUFuncLowerer(self)
|
| 90 |
+
self._install_cg()
|
| 91 |
+
|
| 92 |
+
def _reduce_states(self):
|
| 93 |
+
gb = self.gufunc_builder
|
| 94 |
+
dct = dict(
|
| 95 |
+
py_func=gb.py_func,
|
| 96 |
+
signature=gb.signature,
|
| 97 |
+
identity=self._identity,
|
| 98 |
+
cache=gb.cache,
|
| 99 |
+
is_dynamic=self._is_dynamic,
|
| 100 |
+
targetoptions=gb.targetoptions,
|
| 101 |
+
writable_args=gb.writable_args,
|
| 102 |
+
typesigs=gb._sigs,
|
| 103 |
+
frozen=self._frozen,
|
| 104 |
+
)
|
| 105 |
+
return dct
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
def _rebuild(cls, py_func, signature, identity, cache, is_dynamic,
|
| 109 |
+
targetoptions, writable_args, typesigs, frozen):
|
| 110 |
+
self = cls(py_func=py_func, signature=signature, identity=identity,
|
| 111 |
+
cache=cache, is_dynamic=is_dynamic,
|
| 112 |
+
targetoptions=targetoptions, writable_args=writable_args)
|
| 113 |
+
for sig in typesigs:
|
| 114 |
+
self.add(sig)
|
| 115 |
+
self.build_ufunc()
|
| 116 |
+
self._frozen = frozen
|
| 117 |
+
return self
|
| 118 |
+
|
| 119 |
+
def __repr__(self):
|
| 120 |
+
return f"<numba._GUFunc '{self.__name__}'>"
|
| 121 |
+
|
| 122 |
+
def _install_type(self, typingctx=None):
|
| 123 |
+
"""Constructs and installs a typing class for a gufunc object in the
|
| 124 |
+
input typing context. If no typing context is given, then
|
| 125 |
+
_install_type() installs into the typing context of the
|
| 126 |
+
dispatcher object (should be same default context used by
|
| 127 |
+
jit() and njit()).
|
| 128 |
+
"""
|
| 129 |
+
if typingctx is None:
|
| 130 |
+
typingctx = self._dispatcher.targetdescr.typing_context
|
| 131 |
+
_ty_cls = type('GUFuncTyping_' + self.__name__,
|
| 132 |
+
(AbstractTemplate,),
|
| 133 |
+
dict(key=self, generic=self._type_me))
|
| 134 |
+
typingctx.insert_user_function(self, _ty_cls)
|
| 135 |
+
|
| 136 |
+
def add(self, fty):
|
| 137 |
+
self.gufunc_builder.add(fty)
|
| 138 |
+
|
| 139 |
+
def build_ufunc(self):
|
| 140 |
+
self.ufunc = self.gufunc_builder.build_ufunc()
|
| 141 |
+
return self
|
| 142 |
+
|
| 143 |
+
def expected_ndims(self):
|
| 144 |
+
parsed_sig = parse_signature(self.gufunc_builder.signature)
|
| 145 |
+
return (tuple(map(len, parsed_sig[0])), tuple(map(len, parsed_sig[1])))
|
| 146 |
+
|
| 147 |
+
def _type_me(self, argtys, kws):
|
| 148 |
+
"""
|
| 149 |
+
Implement AbstractTemplate.generic() for the typing class
|
| 150 |
+
built by gufunc._install_type().
|
| 151 |
+
|
| 152 |
+
Return the call-site signature after either validating the
|
| 153 |
+
element-wise signature or compiling for it.
|
| 154 |
+
"""
|
| 155 |
+
assert not kws
|
| 156 |
+
ufunc = self.ufunc
|
| 157 |
+
sig = self.gufunc_builder.signature
|
| 158 |
+
inp_ndims, out_ndims = self.expected_ndims()
|
| 159 |
+
ndims = inp_ndims + out_ndims
|
| 160 |
+
|
| 161 |
+
assert len(argtys), len(ndims)
|
| 162 |
+
for idx, arg in enumerate(argtys):
|
| 163 |
+
if isinstance(arg, types.Array) and arg.ndim < ndims[idx]:
|
| 164 |
+
kind = "Input" if idx < len(inp_ndims) else "Output"
|
| 165 |
+
i = idx if idx < len(inp_ndims) else idx - len(inp_ndims)
|
| 166 |
+
msg = (
|
| 167 |
+
f"{self.__name__}: {kind} operand {i} does not have "
|
| 168 |
+
f"enough dimensions (has {arg.ndim}, gufunc core with "
|
| 169 |
+
f"signature {sig} requires {ndims[idx]})")
|
| 170 |
+
raise errors.TypingError(msg)
|
| 171 |
+
|
| 172 |
+
_handle_inputs_result = npydecl.Numpy_rules_ufunc._handle_inputs(
|
| 173 |
+
ufunc, argtys, kws)
|
| 174 |
+
ewise_types, _, _, _ = _handle_inputs_result
|
| 175 |
+
sig, _ = self.find_ewise_function(ewise_types)
|
| 176 |
+
|
| 177 |
+
if sig is None:
|
| 178 |
+
# Matching element-wise signature was not found; must
|
| 179 |
+
# compile.
|
| 180 |
+
if self._frozen:
|
| 181 |
+
msg = f"cannot call {self} with types {argtys}"
|
| 182 |
+
raise errors.TypingError(msg)
|
| 183 |
+
self._compile_for_argtys(ewise_types)
|
| 184 |
+
# double check to ensure there is a match
|
| 185 |
+
sig, _ = self.find_ewise_function(ewise_types)
|
| 186 |
+
if sig == (None, None):
|
| 187 |
+
msg = f"Fail to compile {self.__name__} with types {argtys}"
|
| 188 |
+
raise errors.TypingError(msg)
|
| 189 |
+
|
| 190 |
+
assert sig is not None
|
| 191 |
+
|
| 192 |
+
return signature(types.none, *argtys)
|
| 193 |
+
|
| 194 |
+
def _compile_for_argtys(self, argtys, return_type=None):
|
| 195 |
+
# Compile a new guvectorize function! Use the gufunc signature
|
| 196 |
+
# i.e. (n,m),(m)->(n)
|
| 197 |
+
# plus ewise_types to build a numba function type
|
| 198 |
+
fnty = self._get_function_type(*argtys)
|
| 199 |
+
self.gufunc_builder.add(fnty)
|
| 200 |
+
|
| 201 |
+
def match_signature(self, ewise_types, sig):
|
| 202 |
+
dtypes = self._get_ewise_dtypes(sig.args)
|
| 203 |
+
return tuple(dtypes) == tuple(ewise_types)
|
| 204 |
+
|
| 205 |
+
@property
|
| 206 |
+
def is_dynamic(self):
|
| 207 |
+
return self._is_dynamic
|
| 208 |
+
|
| 209 |
+
def _get_ewise_dtypes(self, args):
|
| 210 |
+
argtys = map(lambda arg: arg if isinstance(arg, types.Type) else
|
| 211 |
+
typeof(arg), args)
|
| 212 |
+
tys = []
|
| 213 |
+
for argty in argtys:
|
| 214 |
+
if isinstance(argty, types.Array):
|
| 215 |
+
tys.append(argty.dtype)
|
| 216 |
+
else:
|
| 217 |
+
tys.append(argty)
|
| 218 |
+
return tys
|
| 219 |
+
|
| 220 |
+
def _num_args_match(self, *args):
|
| 221 |
+
parsed_sig = parse_signature(self.gufunc_builder.signature)
|
| 222 |
+
return len(args) == len(parsed_sig[0]) + len(parsed_sig[1])
|
| 223 |
+
|
| 224 |
+
def _get_function_type(self, *args):
|
| 225 |
+
parsed_sig = parse_signature(self.gufunc_builder.signature)
|
| 226 |
+
# ewise_types is a list of [int32, int32, int32, ...]
|
| 227 |
+
ewise_types = self._get_ewise_dtypes(args)
|
| 228 |
+
|
| 229 |
+
# first time calling the gufunc
|
| 230 |
+
# generate a signature based on input arguments
|
| 231 |
+
l = []
|
| 232 |
+
for idx, sig_dim in enumerate(parsed_sig[0]):
|
| 233 |
+
ndim = len(sig_dim)
|
| 234 |
+
if ndim == 0: # append scalar
|
| 235 |
+
l.append(ewise_types[idx])
|
| 236 |
+
else:
|
| 237 |
+
l.append(types.Array(ewise_types[idx], ndim, 'A'))
|
| 238 |
+
|
| 239 |
+
offset = len(parsed_sig[0])
|
| 240 |
+
# add return type to signature
|
| 241 |
+
for idx, sig_dim in enumerate(parsed_sig[1]):
|
| 242 |
+
retty = ewise_types[idx + offset]
|
| 243 |
+
ret_ndim = len(sig_dim) or 1 # small hack to return scalars
|
| 244 |
+
l.append(types.Array(retty, ret_ndim, 'A'))
|
| 245 |
+
|
| 246 |
+
return types.none(*l)
|
| 247 |
+
|
| 248 |
+
def __call__(self, *args, **kwargs):
|
| 249 |
+
# If compilation is disabled OR it is NOT a dynamic gufunc
|
| 250 |
+
# call the underlying gufunc
|
| 251 |
+
if self._frozen or not self.is_dynamic:
|
| 252 |
+
return self.ufunc(*args, **kwargs)
|
| 253 |
+
elif "out" in kwargs:
|
| 254 |
+
# If "out" argument is supplied
|
| 255 |
+
args += (kwargs.pop("out"),)
|
| 256 |
+
|
| 257 |
+
if self._num_args_match(*args) is False:
|
| 258 |
+
# It is not allowed to call a dynamic gufunc without
|
| 259 |
+
# providing all the arguments
|
| 260 |
+
# see: https://github.com/numba/numba/pull/5938#discussion_r506429392 # noqa: E501
|
| 261 |
+
msg = (
|
| 262 |
+
f"Too few arguments for function '{self.__name__}'. "
|
| 263 |
+
"Note that the pattern `out = gufunc(Arg1, Arg2, ..., ArgN)` "
|
| 264 |
+
"is not allowed. Use `gufunc(Arg1, Arg2, ..., ArgN, out) "
|
| 265 |
+
"instead.")
|
| 266 |
+
raise TypeError(msg)
|
| 267 |
+
|
| 268 |
+
# at this point we know the gufunc is a dynamic one
|
| 269 |
+
ewise = self._get_ewise_dtypes(args)
|
| 270 |
+
if not (self.ufunc and ufunc_find_matching_loop(self.ufunc, ewise)):
|
| 271 |
+
# A previous call (@njit -> @guvectorize) may have compiled a
|
| 272 |
+
# version for the element-wise dtypes. In this case, we don't need
|
| 273 |
+
# to compile it again, just build the (g)ufunc
|
| 274 |
+
if not self.find_ewise_function(ewise) != (None, None):
|
| 275 |
+
sig = self._get_function_type(*args)
|
| 276 |
+
self.add(sig)
|
| 277 |
+
self.build_ufunc()
|
| 278 |
+
|
| 279 |
+
return self.ufunc(*args, **kwargs)
|
deepseek/lib/python3.10/site-packages/numba/np/ufunc/ufuncbuilder.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import inspect
|
| 4 |
+
import warnings
|
| 5 |
+
from contextlib import contextmanager
|
| 6 |
+
|
| 7 |
+
from numba.core import config, targetconfig
|
| 8 |
+
from numba.core.decorators import jit
|
| 9 |
+
from numba.core.descriptors import TargetDescriptor
|
| 10 |
+
from numba.core.extending import is_jitted
|
| 11 |
+
from numba.core.errors import NumbaDeprecationWarning
|
| 12 |
+
from numba.core.options import TargetOptions, include_default_options
|
| 13 |
+
from numba.core.registry import cpu_target
|
| 14 |
+
from numba.core.target_extension import dispatcher_registry, target_registry
|
| 15 |
+
from numba.core import utils, types, serialize, compiler, sigutils
|
| 16 |
+
from numba.np.numpy_support import as_dtype
|
| 17 |
+
from numba.np.ufunc import _internal
|
| 18 |
+
from numba.np.ufunc.sigparse import parse_signature
|
| 19 |
+
from numba.np.ufunc.wrappers import build_ufunc_wrapper, build_gufunc_wrapper
|
| 20 |
+
from numba.core.caching import FunctionCache, NullCache
|
| 21 |
+
from numba.core.compiler_lock import global_compiler_lock
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_options_mixin = include_default_options(
|
| 25 |
+
"nopython",
|
| 26 |
+
"forceobj",
|
| 27 |
+
"boundscheck",
|
| 28 |
+
"fastmath",
|
| 29 |
+
"writable_args"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class UFuncTargetOptions(_options_mixin, TargetOptions):
|
| 34 |
+
|
| 35 |
+
def finalize(self, flags, options):
|
| 36 |
+
if not flags.is_set("enable_pyobject"):
|
| 37 |
+
flags.enable_pyobject = True
|
| 38 |
+
|
| 39 |
+
if not flags.is_set("enable_looplift"):
|
| 40 |
+
flags.enable_looplift = True
|
| 41 |
+
|
| 42 |
+
flags.inherit_if_not_set("nrt", default=True)
|
| 43 |
+
|
| 44 |
+
if not flags.is_set("debuginfo"):
|
| 45 |
+
flags.debuginfo = config.DEBUGINFO_DEFAULT
|
| 46 |
+
|
| 47 |
+
if not flags.is_set("boundscheck"):
|
| 48 |
+
flags.boundscheck = flags.debuginfo
|
| 49 |
+
|
| 50 |
+
flags.enable_pyobject_looplift = True
|
| 51 |
+
|
| 52 |
+
flags.inherit_if_not_set("fastmath")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class UFuncTarget(TargetDescriptor):
|
| 56 |
+
options = UFuncTargetOptions
|
| 57 |
+
|
| 58 |
+
def __init__(self):
|
| 59 |
+
super().__init__('ufunc')
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def typing_context(self):
|
| 63 |
+
return cpu_target.typing_context
|
| 64 |
+
|
| 65 |
+
@property
|
| 66 |
+
def target_context(self):
|
| 67 |
+
return cpu_target.target_context
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
ufunc_target = UFuncTarget()
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class UFuncDispatcher(serialize.ReduceMixin):
|
| 74 |
+
"""
|
| 75 |
+
An object handling compilation of various signatures for a ufunc.
|
| 76 |
+
"""
|
| 77 |
+
targetdescr = ufunc_target
|
| 78 |
+
|
| 79 |
+
def __init__(self, py_func, locals={}, targetoptions={}):
|
| 80 |
+
self.py_func = py_func
|
| 81 |
+
self.overloads = utils.UniqueDict()
|
| 82 |
+
self.targetoptions = targetoptions
|
| 83 |
+
self.locals = locals
|
| 84 |
+
self.cache = NullCache()
|
| 85 |
+
|
| 86 |
+
def _reduce_states(self):
|
| 87 |
+
"""
|
| 88 |
+
NOTE: part of ReduceMixin protocol
|
| 89 |
+
"""
|
| 90 |
+
return dict(
|
| 91 |
+
pyfunc=self.py_func,
|
| 92 |
+
locals=self.locals,
|
| 93 |
+
targetoptions=self.targetoptions,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
@classmethod
|
| 97 |
+
def _rebuild(cls, pyfunc, locals, targetoptions):
|
| 98 |
+
"""
|
| 99 |
+
NOTE: part of ReduceMixin protocol
|
| 100 |
+
"""
|
| 101 |
+
return cls(py_func=pyfunc, locals=locals, targetoptions=targetoptions)
|
| 102 |
+
|
| 103 |
+
def enable_caching(self):
|
| 104 |
+
self.cache = FunctionCache(self.py_func)
|
| 105 |
+
|
| 106 |
+
def compile(self, sig, locals={}, **targetoptions):
|
| 107 |
+
locs = self.locals.copy()
|
| 108 |
+
locs.update(locals)
|
| 109 |
+
|
| 110 |
+
topt = self.targetoptions.copy()
|
| 111 |
+
topt.update(targetoptions)
|
| 112 |
+
|
| 113 |
+
flags = compiler.Flags()
|
| 114 |
+
self.targetdescr.options.parse_as_flags(flags, topt)
|
| 115 |
+
|
| 116 |
+
flags.no_cpython_wrapper = True
|
| 117 |
+
flags.error_model = "numpy"
|
| 118 |
+
# Disable loop lifting
|
| 119 |
+
# The feature requires a real
|
| 120 |
+
# python function
|
| 121 |
+
flags.enable_looplift = False
|
| 122 |
+
|
| 123 |
+
return self._compile_core(sig, flags, locals)
|
| 124 |
+
|
| 125 |
+
def _compile_core(self, sig, flags, locals):
|
| 126 |
+
"""
|
| 127 |
+
Trigger the compiler on the core function or load a previously
|
| 128 |
+
compiled version from the cache. Returns the CompileResult.
|
| 129 |
+
"""
|
| 130 |
+
typingctx = self.targetdescr.typing_context
|
| 131 |
+
targetctx = self.targetdescr.target_context
|
| 132 |
+
|
| 133 |
+
@contextmanager
|
| 134 |
+
def store_overloads_on_success():
|
| 135 |
+
# use to ensure overloads are stored on success
|
| 136 |
+
try:
|
| 137 |
+
yield
|
| 138 |
+
except Exception:
|
| 139 |
+
raise
|
| 140 |
+
else:
|
| 141 |
+
exists = self.overloads.get(cres.signature)
|
| 142 |
+
if exists is None:
|
| 143 |
+
self.overloads[cres.signature] = cres
|
| 144 |
+
|
| 145 |
+
# Use cache and compiler in a critical section
|
| 146 |
+
with global_compiler_lock:
|
| 147 |
+
with targetconfig.ConfigStack().enter(flags.copy()):
|
| 148 |
+
with store_overloads_on_success():
|
| 149 |
+
# attempt look up of existing
|
| 150 |
+
cres = self.cache.load_overload(sig, targetctx)
|
| 151 |
+
if cres is not None:
|
| 152 |
+
return cres
|
| 153 |
+
|
| 154 |
+
# Compile
|
| 155 |
+
args, return_type = sigutils.normalize_signature(sig)
|
| 156 |
+
cres = compiler.compile_extra(typingctx, targetctx,
|
| 157 |
+
self.py_func, args=args,
|
| 158 |
+
return_type=return_type,
|
| 159 |
+
flags=flags, locals=locals)
|
| 160 |
+
|
| 161 |
+
# cache lookup failed before so safe to save
|
| 162 |
+
self.cache.save_overload(sig, cres)
|
| 163 |
+
|
| 164 |
+
return cres
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
dispatcher_registry[target_registry['npyufunc']] = UFuncDispatcher
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# Utility functions
|
| 171 |
+
|
| 172 |
+
def _compile_element_wise_function(nb_func, targetoptions, sig):
|
| 173 |
+
# Do compilation
|
| 174 |
+
# Return CompileResult to test
|
| 175 |
+
cres = nb_func.compile(sig, **targetoptions)
|
| 176 |
+
args, return_type = sigutils.normalize_signature(sig)
|
| 177 |
+
return cres, args, return_type
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def _finalize_ufunc_signature(cres, args, return_type):
|
| 181 |
+
'''Given a compilation result, argument types, and a return type,
|
| 182 |
+
build a valid Numba signature after validating that it doesn't
|
| 183 |
+
violate the constraints for the compilation mode.
|
| 184 |
+
'''
|
| 185 |
+
if return_type is None:
|
| 186 |
+
if cres.objectmode:
|
| 187 |
+
# Object mode is used and return type is not specified
|
| 188 |
+
raise TypeError("return type must be specified for object mode")
|
| 189 |
+
else:
|
| 190 |
+
return_type = cres.signature.return_type
|
| 191 |
+
|
| 192 |
+
assert return_type != types.pyobject
|
| 193 |
+
return return_type(*args)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def _build_element_wise_ufunc_wrapper(cres, signature):
|
| 197 |
+
'''Build a wrapper for the ufunc loop entry point given by the
|
| 198 |
+
compilation result object, using the element-wise signature.
|
| 199 |
+
'''
|
| 200 |
+
ctx = cres.target_context
|
| 201 |
+
library = cres.library
|
| 202 |
+
fname = cres.fndesc.llvm_func_name
|
| 203 |
+
|
| 204 |
+
with global_compiler_lock:
|
| 205 |
+
info = build_ufunc_wrapper(library, ctx, fname, signature,
|
| 206 |
+
cres.objectmode, cres)
|
| 207 |
+
ptr = info.library.get_pointer_to_function(info.name)
|
| 208 |
+
# Get dtypes
|
| 209 |
+
dtypenums = [as_dtype(a).num for a in signature.args]
|
| 210 |
+
dtypenums.append(as_dtype(signature.return_type).num)
|
| 211 |
+
return dtypenums, ptr, cres.environment
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
_identities = {
|
| 215 |
+
0: _internal.PyUFunc_Zero,
|
| 216 |
+
1: _internal.PyUFunc_One,
|
| 217 |
+
None: _internal.PyUFunc_None,
|
| 218 |
+
"reorderable": _internal.PyUFunc_ReorderableNone,
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def parse_identity(identity):
|
| 223 |
+
"""
|
| 224 |
+
Parse an identity value and return the corresponding low-level value
|
| 225 |
+
for Numpy.
|
| 226 |
+
"""
|
| 227 |
+
try:
|
| 228 |
+
identity = _identities[identity]
|
| 229 |
+
except KeyError:
|
| 230 |
+
raise ValueError("Invalid identity value %r" % (identity,))
|
| 231 |
+
return identity
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@contextmanager
|
| 235 |
+
def _suppress_deprecation_warning_nopython_not_supplied():
|
| 236 |
+
"""This suppresses the NumbaDeprecationWarning that occurs through the use
|
| 237 |
+
of `jit` without the `nopython` kwarg. This use of `jit` occurs in a few
|
| 238 |
+
places in the `{g,}ufunc` mechanism in Numba, predominantly to wrap the
|
| 239 |
+
"kernel" function."""
|
| 240 |
+
with warnings.catch_warnings():
|
| 241 |
+
warnings.filterwarnings('ignore',
|
| 242 |
+
category=NumbaDeprecationWarning,
|
| 243 |
+
message=(".*The 'nopython' keyword argument "
|
| 244 |
+
"was not supplied*"),)
|
| 245 |
+
yield
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# Class definitions
|
| 249 |
+
|
| 250 |
+
class _BaseUFuncBuilder(object):
|
| 251 |
+
|
| 252 |
+
def add(self, sig=None):
|
| 253 |
+
if hasattr(self, 'targetoptions'):
|
| 254 |
+
targetoptions = self.targetoptions
|
| 255 |
+
else:
|
| 256 |
+
targetoptions = self.nb_func.targetoptions
|
| 257 |
+
cres, args, return_type = _compile_element_wise_function(
|
| 258 |
+
self.nb_func, targetoptions, sig)
|
| 259 |
+
sig = self._finalize_signature(cres, args, return_type)
|
| 260 |
+
self._sigs.append(sig)
|
| 261 |
+
self._cres[sig] = cres
|
| 262 |
+
return cres
|
| 263 |
+
|
| 264 |
+
def disable_compile(self):
|
| 265 |
+
"""
|
| 266 |
+
Disable the compilation of new signatures at call time.
|
| 267 |
+
"""
|
| 268 |
+
# Override this for implementations that support lazy compilation
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class UFuncBuilder(_BaseUFuncBuilder):
|
| 272 |
+
|
| 273 |
+
def __init__(self, py_func, identity=None, cache=False, targetoptions={}):
|
| 274 |
+
if is_jitted(py_func):
|
| 275 |
+
py_func = py_func.py_func
|
| 276 |
+
self.py_func = py_func
|
| 277 |
+
self.identity = parse_identity(identity)
|
| 278 |
+
with _suppress_deprecation_warning_nopython_not_supplied():
|
| 279 |
+
self.nb_func = jit(_target='npyufunc',
|
| 280 |
+
cache=cache,
|
| 281 |
+
**targetoptions)(py_func)
|
| 282 |
+
self._sigs = []
|
| 283 |
+
self._cres = {}
|
| 284 |
+
|
| 285 |
+
def _finalize_signature(self, cres, args, return_type):
|
| 286 |
+
'''Slated for deprecation, use ufuncbuilder._finalize_ufunc_signature()
|
| 287 |
+
instead.
|
| 288 |
+
'''
|
| 289 |
+
return _finalize_ufunc_signature(cres, args, return_type)
|
| 290 |
+
|
| 291 |
+
def build_ufunc(self):
|
| 292 |
+
with global_compiler_lock:
|
| 293 |
+
dtypelist = []
|
| 294 |
+
ptrlist = []
|
| 295 |
+
if not self.nb_func:
|
| 296 |
+
raise TypeError("No definition")
|
| 297 |
+
|
| 298 |
+
# Get signature in the order they are added
|
| 299 |
+
keepalive = []
|
| 300 |
+
cres = None
|
| 301 |
+
for sig in self._sigs:
|
| 302 |
+
cres = self._cres[sig]
|
| 303 |
+
dtypenums, ptr, env = self.build(cres, sig)
|
| 304 |
+
dtypelist.append(dtypenums)
|
| 305 |
+
ptrlist.append(int(ptr))
|
| 306 |
+
keepalive.append((cres.library, env))
|
| 307 |
+
|
| 308 |
+
datlist = [None] * len(ptrlist)
|
| 309 |
+
|
| 310 |
+
if cres is None:
|
| 311 |
+
argspec = inspect.getfullargspec(self.py_func)
|
| 312 |
+
inct = len(argspec.args)
|
| 313 |
+
else:
|
| 314 |
+
inct = len(cres.signature.args)
|
| 315 |
+
outct = 1
|
| 316 |
+
|
| 317 |
+
# Becareful that fromfunc does not provide full error checking yet.
|
| 318 |
+
# If typenum is out-of-bound, we have nasty memory corruptions.
|
| 319 |
+
# For instance, -1 for typenum will cause segfault.
|
| 320 |
+
# If elements of type-list (2nd arg) is tuple instead,
|
| 321 |
+
# there will also memory corruption. (Seems like code rewrite.)
|
| 322 |
+
ufunc = _internal.fromfunc(
|
| 323 |
+
self.py_func.__name__, self.py_func.__doc__,
|
| 324 |
+
ptrlist, dtypelist, inct, outct, datlist,
|
| 325 |
+
keepalive, self.identity,
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
return ufunc
|
| 329 |
+
|
| 330 |
+
def build(self, cres, signature):
|
| 331 |
+
'''Slated for deprecation, use
|
| 332 |
+
ufuncbuilder._build_element_wise_ufunc_wrapper().
|
| 333 |
+
'''
|
| 334 |
+
return _build_element_wise_ufunc_wrapper(cres, signature)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class GUFuncBuilder(_BaseUFuncBuilder):
|
| 338 |
+
|
| 339 |
+
# TODO handle scalar
|
| 340 |
+
def __init__(self, py_func, signature, identity=None, cache=False,
|
| 341 |
+
targetoptions={}, writable_args=()):
|
| 342 |
+
self.py_func = py_func
|
| 343 |
+
self.identity = parse_identity(identity)
|
| 344 |
+
with _suppress_deprecation_warning_nopython_not_supplied():
|
| 345 |
+
self.nb_func = jit(_target='npyufunc', cache=cache)(py_func)
|
| 346 |
+
self.signature = signature
|
| 347 |
+
self.sin, self.sout = parse_signature(signature)
|
| 348 |
+
self.targetoptions = targetoptions
|
| 349 |
+
self.cache = cache
|
| 350 |
+
self._sigs = []
|
| 351 |
+
self._cres = {}
|
| 352 |
+
|
| 353 |
+
transform_arg = _get_transform_arg(py_func)
|
| 354 |
+
self.writable_args = tuple([transform_arg(a) for a in writable_args])
|
| 355 |
+
|
| 356 |
+
def _finalize_signature(self, cres, args, return_type):
|
| 357 |
+
if not cres.objectmode and cres.signature.return_type != types.void:
|
| 358 |
+
raise TypeError("gufunc kernel must have void return type")
|
| 359 |
+
|
| 360 |
+
if return_type is None:
|
| 361 |
+
return_type = types.void
|
| 362 |
+
|
| 363 |
+
return return_type(*args)
|
| 364 |
+
|
| 365 |
+
@global_compiler_lock
|
| 366 |
+
def build_ufunc(self):
|
| 367 |
+
type_list = []
|
| 368 |
+
func_list = []
|
| 369 |
+
if not self.nb_func:
|
| 370 |
+
raise TypeError("No definition")
|
| 371 |
+
|
| 372 |
+
# Get signature in the order they are added
|
| 373 |
+
keepalive = []
|
| 374 |
+
for sig in self._sigs:
|
| 375 |
+
cres = self._cres[sig]
|
| 376 |
+
dtypenums, ptr, env = self.build(cres)
|
| 377 |
+
type_list.append(dtypenums)
|
| 378 |
+
func_list.append(int(ptr))
|
| 379 |
+
keepalive.append((cres.library, env))
|
| 380 |
+
|
| 381 |
+
datalist = [None] * len(func_list)
|
| 382 |
+
|
| 383 |
+
nin = len(self.sin)
|
| 384 |
+
nout = len(self.sout)
|
| 385 |
+
|
| 386 |
+
# Pass envs to fromfuncsig to bind to the lifetime of the ufunc object
|
| 387 |
+
ufunc = _internal.fromfunc(
|
| 388 |
+
self.py_func.__name__, self.py_func.__doc__,
|
| 389 |
+
func_list, type_list, nin, nout, datalist,
|
| 390 |
+
keepalive, self.identity, self.signature, self.writable_args
|
| 391 |
+
)
|
| 392 |
+
return ufunc
|
| 393 |
+
|
| 394 |
+
def build(self, cres):
|
| 395 |
+
"""
|
| 396 |
+
Returns (dtype numbers, function ptr, EnvironmentObject)
|
| 397 |
+
"""
|
| 398 |
+
# Builder wrapper for ufunc entry point
|
| 399 |
+
signature = cres.signature
|
| 400 |
+
info = build_gufunc_wrapper(
|
| 401 |
+
self.py_func, cres, self.sin, self.sout,
|
| 402 |
+
cache=self.cache, is_parfors=False,
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
env = info.env
|
| 406 |
+
ptr = info.library.get_pointer_to_function(info.name)
|
| 407 |
+
# Get dtypes
|
| 408 |
+
dtypenums = []
|
| 409 |
+
for a in signature.args:
|
| 410 |
+
if isinstance(a, types.Array):
|
| 411 |
+
ty = a.dtype
|
| 412 |
+
else:
|
| 413 |
+
ty = a
|
| 414 |
+
dtypenums.append(as_dtype(ty).num)
|
| 415 |
+
return dtypenums, ptr, env
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def _get_transform_arg(py_func):
|
| 419 |
+
"""Return function that transform arg into index"""
|
| 420 |
+
args = inspect.getfullargspec(py_func).args
|
| 421 |
+
pos_by_arg = {arg: i for i, arg in enumerate(args)}
|
| 422 |
+
|
| 423 |
+
def transform_arg(arg):
|
| 424 |
+
if isinstance(arg, int):
|
| 425 |
+
return arg
|
| 426 |
+
|
| 427 |
+
try:
|
| 428 |
+
return pos_by_arg[arg]
|
| 429 |
+
except KeyError:
|
| 430 |
+
msg = (f"Specified writable arg {arg} not found in arg list "
|
| 431 |
+
f"{args} for function {py_func.__qualname__}")
|
| 432 |
+
raise RuntimeError(msg)
|
| 433 |
+
|
| 434 |
+
return transform_arg
|
deepseek/lib/python3.10/site-packages/numba/np/ufunc/wrappers.py
ADDED
|
@@ -0,0 +1,743 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import namedtuple
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from llvmlite.ir import Constant, IRBuilder
|
| 6 |
+
from llvmlite import ir
|
| 7 |
+
|
| 8 |
+
from numba.core import types, cgutils
|
| 9 |
+
from numba.core.compiler_lock import global_compiler_lock
|
| 10 |
+
from numba.core.caching import make_library_cache, NullCache
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_wrapper_info = namedtuple('_wrapper_info', ['library', 'env', 'name'])
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _build_ufunc_loop_body(load, store, context, func, builder, arrays, out,
|
| 17 |
+
offsets, store_offset, signature, pyapi, env):
|
| 18 |
+
elems = load()
|
| 19 |
+
|
| 20 |
+
# Compute
|
| 21 |
+
status, retval = context.call_conv.call_function(builder, func,
|
| 22 |
+
signature.return_type,
|
| 23 |
+
signature.args, elems)
|
| 24 |
+
|
| 25 |
+
# Store
|
| 26 |
+
with builder.if_else(status.is_ok, likely=True) as (if_ok, if_error):
|
| 27 |
+
with if_ok:
|
| 28 |
+
store(retval)
|
| 29 |
+
with if_error:
|
| 30 |
+
gil = pyapi.gil_ensure()
|
| 31 |
+
context.call_conv.raise_error(builder, pyapi, status)
|
| 32 |
+
pyapi.gil_release(gil)
|
| 33 |
+
|
| 34 |
+
# increment indices
|
| 35 |
+
for off, ary in zip(offsets, arrays):
|
| 36 |
+
builder.store(builder.add(builder.load(off), ary.step), off)
|
| 37 |
+
|
| 38 |
+
builder.store(builder.add(builder.load(store_offset), out.step),
|
| 39 |
+
store_offset)
|
| 40 |
+
|
| 41 |
+
return status.code
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _build_ufunc_loop_body_objmode(load, store, context, func, builder,
|
| 45 |
+
arrays, out, offsets, store_offset,
|
| 46 |
+
signature, env, pyapi):
|
| 47 |
+
elems = load()
|
| 48 |
+
|
| 49 |
+
# Compute
|
| 50 |
+
_objargs = [types.pyobject] * len(signature.args)
|
| 51 |
+
# We need to push the error indicator to avoid it messing with
|
| 52 |
+
# the ufunc's execution. We restore it unless the ufunc raised
|
| 53 |
+
# a new error.
|
| 54 |
+
with pyapi.err_push(keep_new=True):
|
| 55 |
+
status, retval = context.call_conv.call_function(builder, func,
|
| 56 |
+
types.pyobject,
|
| 57 |
+
_objargs, elems)
|
| 58 |
+
# Release owned reference to arguments
|
| 59 |
+
for elem in elems:
|
| 60 |
+
pyapi.decref(elem)
|
| 61 |
+
# NOTE: if an error occurred, it will be caught by the Numpy machinery
|
| 62 |
+
|
| 63 |
+
# Store
|
| 64 |
+
store(retval)
|
| 65 |
+
|
| 66 |
+
# increment indices
|
| 67 |
+
for off, ary in zip(offsets, arrays):
|
| 68 |
+
builder.store(builder.add(builder.load(off), ary.step), off)
|
| 69 |
+
|
| 70 |
+
builder.store(builder.add(builder.load(store_offset), out.step),
|
| 71 |
+
store_offset)
|
| 72 |
+
|
| 73 |
+
return status.code
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def build_slow_loop_body(context, func, builder, arrays, out, offsets,
|
| 77 |
+
store_offset, signature, pyapi, env):
|
| 78 |
+
def load():
|
| 79 |
+
elems = [ary.load_direct(builder.load(off))
|
| 80 |
+
for off, ary in zip(offsets, arrays)]
|
| 81 |
+
return elems
|
| 82 |
+
|
| 83 |
+
def store(retval):
|
| 84 |
+
out.store_direct(retval, builder.load(store_offset))
|
| 85 |
+
|
| 86 |
+
return _build_ufunc_loop_body(load, store, context, func, builder, arrays,
|
| 87 |
+
out, offsets, store_offset, signature, pyapi,
|
| 88 |
+
env=env)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def build_obj_loop_body(context, func, builder, arrays, out, offsets,
|
| 92 |
+
store_offset, signature, pyapi, envptr, env):
|
| 93 |
+
env_body = context.get_env_body(builder, envptr)
|
| 94 |
+
env_manager = pyapi.get_env_manager(env, env_body, envptr)
|
| 95 |
+
|
| 96 |
+
def load():
|
| 97 |
+
# Load
|
| 98 |
+
elems = [ary.load_direct(builder.load(off))
|
| 99 |
+
for off, ary in zip(offsets, arrays)]
|
| 100 |
+
# Box
|
| 101 |
+
elems = [pyapi.from_native_value(t, v, env_manager)
|
| 102 |
+
for v, t in zip(elems, signature.args)]
|
| 103 |
+
return elems
|
| 104 |
+
|
| 105 |
+
def store(retval):
|
| 106 |
+
is_ok = cgutils.is_not_null(builder, retval)
|
| 107 |
+
# If an error is raised by the object mode ufunc, it will
|
| 108 |
+
# simply get caught by the Numpy ufunc machinery.
|
| 109 |
+
with builder.if_then(is_ok, likely=True):
|
| 110 |
+
# Unbox
|
| 111 |
+
native = pyapi.to_native_value(signature.return_type, retval)
|
| 112 |
+
assert native.cleanup is None
|
| 113 |
+
# Store
|
| 114 |
+
out.store_direct(native.value, builder.load(store_offset))
|
| 115 |
+
# Release owned reference
|
| 116 |
+
pyapi.decref(retval)
|
| 117 |
+
|
| 118 |
+
return _build_ufunc_loop_body_objmode(load, store, context, func, builder,
|
| 119 |
+
arrays, out, offsets, store_offset,
|
| 120 |
+
signature, envptr, pyapi)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def build_fast_loop_body(context, func, builder, arrays, out, offsets,
|
| 124 |
+
store_offset, signature, ind, pyapi, env):
|
| 125 |
+
def load():
|
| 126 |
+
elems = [ary.load_aligned(ind)
|
| 127 |
+
for ary in arrays]
|
| 128 |
+
return elems
|
| 129 |
+
|
| 130 |
+
def store(retval):
|
| 131 |
+
out.store_aligned(retval, ind)
|
| 132 |
+
|
| 133 |
+
return _build_ufunc_loop_body(load, store, context, func, builder, arrays,
|
| 134 |
+
out, offsets, store_offset, signature, pyapi,
|
| 135 |
+
env=env)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def build_ufunc_wrapper(library, context, fname, signature, objmode, cres):
|
| 139 |
+
"""
|
| 140 |
+
Wrap the scalar function with a loop that iterates over the arguments
|
| 141 |
+
|
| 142 |
+
Returns
|
| 143 |
+
-------
|
| 144 |
+
(library, env, name)
|
| 145 |
+
"""
|
| 146 |
+
assert isinstance(fname, str)
|
| 147 |
+
byte_t = ir.IntType(8)
|
| 148 |
+
byte_ptr_t = ir.PointerType(byte_t)
|
| 149 |
+
byte_ptr_ptr_t = ir.PointerType(byte_ptr_t)
|
| 150 |
+
intp_t = context.get_value_type(types.intp)
|
| 151 |
+
intp_ptr_t = ir.PointerType(intp_t)
|
| 152 |
+
|
| 153 |
+
fnty = ir.FunctionType(ir.VoidType(), [byte_ptr_ptr_t, intp_ptr_t,
|
| 154 |
+
intp_ptr_t, byte_ptr_t])
|
| 155 |
+
|
| 156 |
+
wrapperlib = context.codegen().create_library('ufunc_wrapper')
|
| 157 |
+
wrapper_module = wrapperlib.create_ir_module('')
|
| 158 |
+
if objmode:
|
| 159 |
+
func_type = context.call_conv.get_function_type(
|
| 160 |
+
types.pyobject, [types.pyobject] * len(signature.args))
|
| 161 |
+
else:
|
| 162 |
+
func_type = context.call_conv.get_function_type(
|
| 163 |
+
signature.return_type, signature.args)
|
| 164 |
+
|
| 165 |
+
func = ir.Function(wrapper_module, func_type, name=fname)
|
| 166 |
+
func.attributes.add("alwaysinline")
|
| 167 |
+
|
| 168 |
+
wrapper = ir.Function(wrapper_module, fnty, "__ufunc__." + func.name)
|
| 169 |
+
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
|
| 170 |
+
arg_args.name = "args"
|
| 171 |
+
arg_dims.name = "dims"
|
| 172 |
+
arg_steps.name = "steps"
|
| 173 |
+
arg_data.name = "data"
|
| 174 |
+
|
| 175 |
+
builder = IRBuilder(wrapper.append_basic_block("entry"))
|
| 176 |
+
|
| 177 |
+
# Prepare Environment
|
| 178 |
+
envname = context.get_env_name(cres.fndesc)
|
| 179 |
+
env = cres.environment
|
| 180 |
+
envptr = builder.load(context.declare_env_global(builder.module, envname))
|
| 181 |
+
|
| 182 |
+
# Emit loop
|
| 183 |
+
loopcount = builder.load(arg_dims, name="loopcount")
|
| 184 |
+
|
| 185 |
+
# Prepare inputs
|
| 186 |
+
arrays = []
|
| 187 |
+
for i, typ in enumerate(signature.args):
|
| 188 |
+
arrays.append(UArrayArg(context, builder, arg_args, arg_steps, i, typ))
|
| 189 |
+
|
| 190 |
+
# Prepare output
|
| 191 |
+
out = UArrayArg(context, builder, arg_args, arg_steps, len(arrays),
|
| 192 |
+
signature.return_type)
|
| 193 |
+
|
| 194 |
+
# Setup indices
|
| 195 |
+
offsets = []
|
| 196 |
+
zero = context.get_constant(types.intp, 0)
|
| 197 |
+
for _ in arrays:
|
| 198 |
+
p = cgutils.alloca_once(builder, intp_t)
|
| 199 |
+
offsets.append(p)
|
| 200 |
+
builder.store(zero, p)
|
| 201 |
+
|
| 202 |
+
store_offset = cgutils.alloca_once(builder, intp_t)
|
| 203 |
+
builder.store(zero, store_offset)
|
| 204 |
+
|
| 205 |
+
unit_strided = cgutils.true_bit
|
| 206 |
+
for ary in arrays:
|
| 207 |
+
unit_strided = builder.and_(unit_strided, ary.is_unit_strided)
|
| 208 |
+
|
| 209 |
+
pyapi = context.get_python_api(builder)
|
| 210 |
+
if objmode:
|
| 211 |
+
# General loop
|
| 212 |
+
gil = pyapi.gil_ensure()
|
| 213 |
+
with cgutils.for_range(builder, loopcount, intp=intp_t):
|
| 214 |
+
build_obj_loop_body(
|
| 215 |
+
context, func, builder, arrays, out, offsets,
|
| 216 |
+
store_offset, signature, pyapi, envptr, env,
|
| 217 |
+
)
|
| 218 |
+
pyapi.gil_release(gil)
|
| 219 |
+
builder.ret_void()
|
| 220 |
+
|
| 221 |
+
else:
|
| 222 |
+
with builder.if_else(unit_strided) as (is_unit_strided, is_strided):
|
| 223 |
+
with is_unit_strided:
|
| 224 |
+
with cgutils.for_range(builder, loopcount, intp=intp_t) as loop:
|
| 225 |
+
build_fast_loop_body(
|
| 226 |
+
context, func, builder, arrays, out, offsets,
|
| 227 |
+
store_offset, signature, loop.index, pyapi,
|
| 228 |
+
env=envptr,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
with is_strided:
|
| 232 |
+
# General loop
|
| 233 |
+
with cgutils.for_range(builder, loopcount, intp=intp_t):
|
| 234 |
+
build_slow_loop_body(
|
| 235 |
+
context, func, builder, arrays, out, offsets,
|
| 236 |
+
store_offset, signature, pyapi,
|
| 237 |
+
env=envptr,
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
builder.ret_void()
|
| 241 |
+
del builder
|
| 242 |
+
|
| 243 |
+
# Link and finalize
|
| 244 |
+
wrapperlib.add_ir_module(wrapper_module)
|
| 245 |
+
wrapperlib.add_linking_library(library)
|
| 246 |
+
return _wrapper_info(library=wrapperlib, env=env, name=wrapper.name)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class UArrayArg(object):
|
| 250 |
+
def __init__(self, context, builder, args, steps, i, fe_type):
|
| 251 |
+
self.context = context
|
| 252 |
+
self.builder = builder
|
| 253 |
+
self.fe_type = fe_type
|
| 254 |
+
offset = self.context.get_constant(types.intp, i)
|
| 255 |
+
offseted_args = self.builder.load(builder.gep(args, [offset]))
|
| 256 |
+
data_type = context.get_data_type(fe_type)
|
| 257 |
+
self.dataptr = self.builder.bitcast(offseted_args,
|
| 258 |
+
data_type.as_pointer())
|
| 259 |
+
sizeof = self.context.get_abi_sizeof(data_type)
|
| 260 |
+
self.abisize = self.context.get_constant(types.intp, sizeof)
|
| 261 |
+
offseted_step = self.builder.gep(steps, [offset])
|
| 262 |
+
self.step = self.builder.load(offseted_step)
|
| 263 |
+
self.is_unit_strided = builder.icmp_unsigned('==',
|
| 264 |
+
self.abisize, self.step)
|
| 265 |
+
self.builder = builder
|
| 266 |
+
|
| 267 |
+
def load_direct(self, byteoffset):
|
| 268 |
+
"""
|
| 269 |
+
Generic load from the given *byteoffset*. load_aligned() is
|
| 270 |
+
preferred if possible.
|
| 271 |
+
"""
|
| 272 |
+
ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset)
|
| 273 |
+
return self.context.unpack_value(self.builder, self.fe_type, ptr)
|
| 274 |
+
|
| 275 |
+
def load_aligned(self, ind):
|
| 276 |
+
# Using gep() instead of explicit pointer addition helps LLVM
|
| 277 |
+
# vectorize the loop.
|
| 278 |
+
ptr = self.builder.gep(self.dataptr, [ind])
|
| 279 |
+
return self.context.unpack_value(self.builder, self.fe_type, ptr)
|
| 280 |
+
|
| 281 |
+
def store_direct(self, value, byteoffset):
|
| 282 |
+
ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset)
|
| 283 |
+
self.context.pack_value(self.builder, self.fe_type, value, ptr)
|
| 284 |
+
|
| 285 |
+
def store_aligned(self, value, ind):
|
| 286 |
+
ptr = self.builder.gep(self.dataptr, [ind])
|
| 287 |
+
self.context.pack_value(self.builder, self.fe_type, value, ptr)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
GufWrapperCache = make_library_cache('guf')
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class _GufuncWrapper(object):
|
| 294 |
+
def __init__(self, py_func, cres, sin, sout, cache, is_parfors):
|
| 295 |
+
"""
|
| 296 |
+
The *is_parfors* argument is a boolean that indicates if the GUfunc
|
| 297 |
+
being built is to be used as a ParFors kernel. If True, it disables
|
| 298 |
+
the caching on the wrapper as a separate unit because it will be linked
|
| 299 |
+
into the caller function and cached along with it.
|
| 300 |
+
"""
|
| 301 |
+
self.py_func = py_func
|
| 302 |
+
self.cres = cres
|
| 303 |
+
self.sin = sin
|
| 304 |
+
self.sout = sout
|
| 305 |
+
self.is_objectmode = self.signature.return_type == types.pyobject
|
| 306 |
+
self.cache = (GufWrapperCache(py_func=self.py_func)
|
| 307 |
+
if cache else NullCache())
|
| 308 |
+
self.is_parfors = bool(is_parfors)
|
| 309 |
+
|
| 310 |
+
@property
|
| 311 |
+
def library(self):
|
| 312 |
+
return self.cres.library
|
| 313 |
+
|
| 314 |
+
@property
|
| 315 |
+
def context(self):
|
| 316 |
+
return self.cres.target_context
|
| 317 |
+
|
| 318 |
+
@property
|
| 319 |
+
def call_conv(self):
|
| 320 |
+
return self.context.call_conv
|
| 321 |
+
|
| 322 |
+
@property
|
| 323 |
+
def signature(self):
|
| 324 |
+
return self.cres.signature
|
| 325 |
+
|
| 326 |
+
@property
|
| 327 |
+
def fndesc(self):
|
| 328 |
+
return self.cres.fndesc
|
| 329 |
+
|
| 330 |
+
@property
|
| 331 |
+
def env(self):
|
| 332 |
+
return self.cres.environment
|
| 333 |
+
|
| 334 |
+
def _wrapper_function_type(self):
|
| 335 |
+
byte_t = ir.IntType(8)
|
| 336 |
+
byte_ptr_t = ir.PointerType(byte_t)
|
| 337 |
+
byte_ptr_ptr_t = ir.PointerType(byte_ptr_t)
|
| 338 |
+
intp_t = self.context.get_value_type(types.intp)
|
| 339 |
+
intp_ptr_t = ir.PointerType(intp_t)
|
| 340 |
+
|
| 341 |
+
fnty = ir.FunctionType(ir.VoidType(), [byte_ptr_ptr_t, intp_ptr_t,
|
| 342 |
+
intp_ptr_t, byte_ptr_t])
|
| 343 |
+
return fnty
|
| 344 |
+
|
| 345 |
+
def _build_wrapper(self, library, name):
|
| 346 |
+
"""
|
| 347 |
+
The LLVM IRBuilder code to create the gufunc wrapper.
|
| 348 |
+
The *library* arg is the CodeLibrary to which the wrapper should
|
| 349 |
+
be added. The *name* arg is the name of the wrapper function being
|
| 350 |
+
created.
|
| 351 |
+
"""
|
| 352 |
+
intp_t = self.context.get_value_type(types.intp)
|
| 353 |
+
fnty = self._wrapper_function_type()
|
| 354 |
+
|
| 355 |
+
wrapper_module = library.create_ir_module('_gufunc_wrapper')
|
| 356 |
+
func_type = self.call_conv.get_function_type(self.fndesc.restype,
|
| 357 |
+
self.fndesc.argtypes)
|
| 358 |
+
fname = self.fndesc.llvm_func_name
|
| 359 |
+
func = ir.Function(wrapper_module, func_type, name=fname)
|
| 360 |
+
|
| 361 |
+
func.attributes.add("alwaysinline")
|
| 362 |
+
wrapper = ir.Function(wrapper_module, fnty, name)
|
| 363 |
+
# The use of weak_odr linkage avoids the function being dropped due
|
| 364 |
+
# to the order in which the wrappers and the user function are linked.
|
| 365 |
+
wrapper.linkage = 'weak_odr'
|
| 366 |
+
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
|
| 367 |
+
arg_args.name = "args"
|
| 368 |
+
arg_dims.name = "dims"
|
| 369 |
+
arg_steps.name = "steps"
|
| 370 |
+
arg_data.name = "data"
|
| 371 |
+
|
| 372 |
+
builder = IRBuilder(wrapper.append_basic_block("entry"))
|
| 373 |
+
loopcount = builder.load(arg_dims, name="loopcount")
|
| 374 |
+
pyapi = self.context.get_python_api(builder)
|
| 375 |
+
|
| 376 |
+
# Unpack shapes
|
| 377 |
+
unique_syms = set()
|
| 378 |
+
for grp in (self.sin, self.sout):
|
| 379 |
+
for syms in grp:
|
| 380 |
+
unique_syms |= set(syms)
|
| 381 |
+
|
| 382 |
+
sym_map = {}
|
| 383 |
+
for syms in self.sin:
|
| 384 |
+
for s in syms:
|
| 385 |
+
if s not in sym_map:
|
| 386 |
+
sym_map[s] = len(sym_map)
|
| 387 |
+
|
| 388 |
+
sym_dim = {}
|
| 389 |
+
for s, i in sym_map.items():
|
| 390 |
+
sym_dim[s] = builder.load(builder.gep(arg_dims,
|
| 391 |
+
[self.context.get_constant(
|
| 392 |
+
types.intp,
|
| 393 |
+
i + 1)]))
|
| 394 |
+
|
| 395 |
+
# Prepare inputs
|
| 396 |
+
arrays = []
|
| 397 |
+
step_offset = len(self.sin) + len(self.sout)
|
| 398 |
+
for i, (typ, sym) in enumerate(zip(self.signature.args,
|
| 399 |
+
self.sin + self.sout)):
|
| 400 |
+
ary = GUArrayArg(self.context, builder, arg_args,
|
| 401 |
+
arg_steps, i, step_offset, typ, sym, sym_dim)
|
| 402 |
+
step_offset += len(sym)
|
| 403 |
+
arrays.append(ary)
|
| 404 |
+
|
| 405 |
+
bbreturn = builder.append_basic_block('.return')
|
| 406 |
+
|
| 407 |
+
# Prologue
|
| 408 |
+
self.gen_prologue(builder, pyapi)
|
| 409 |
+
|
| 410 |
+
# Loop
|
| 411 |
+
with cgutils.for_range(builder, loopcount, intp=intp_t) as loop:
|
| 412 |
+
args = [a.get_array_at_offset(loop.index) for a in arrays]
|
| 413 |
+
innercall, error = self.gen_loop_body(builder, pyapi, func, args)
|
| 414 |
+
# If error, escape
|
| 415 |
+
cgutils.cbranch_or_continue(builder, error, bbreturn)
|
| 416 |
+
|
| 417 |
+
builder.branch(bbreturn)
|
| 418 |
+
builder.position_at_end(bbreturn)
|
| 419 |
+
|
| 420 |
+
# Epilogue
|
| 421 |
+
self.gen_epilogue(builder, pyapi)
|
| 422 |
+
|
| 423 |
+
builder.ret_void()
|
| 424 |
+
|
| 425 |
+
# Link
|
| 426 |
+
library.add_ir_module(wrapper_module)
|
| 427 |
+
library.add_linking_library(self.library)
|
| 428 |
+
|
| 429 |
+
def _compile_wrapper(self, wrapper_name):
|
| 430 |
+
# Gufunc created by Parfors?
|
| 431 |
+
if self.is_parfors:
|
| 432 |
+
# No wrapper caching for parfors
|
| 433 |
+
wrapperlib = self.context.codegen().create_library(str(self))
|
| 434 |
+
# Build wrapper
|
| 435 |
+
self._build_wrapper(wrapperlib, wrapper_name)
|
| 436 |
+
# Non-parfors?
|
| 437 |
+
else:
|
| 438 |
+
# Use cache and compiler in a critical section
|
| 439 |
+
wrapperlib = self.cache.load_overload(
|
| 440 |
+
self.cres.signature, self.cres.target_context,
|
| 441 |
+
)
|
| 442 |
+
if wrapperlib is None:
|
| 443 |
+
# Create library and enable caching
|
| 444 |
+
wrapperlib = self.context.codegen().create_library(str(self))
|
| 445 |
+
wrapperlib.enable_object_caching()
|
| 446 |
+
# Build wrapper
|
| 447 |
+
self._build_wrapper(wrapperlib, wrapper_name)
|
| 448 |
+
# Cache
|
| 449 |
+
self.cache.save_overload(self.cres.signature, wrapperlib)
|
| 450 |
+
|
| 451 |
+
return wrapperlib
|
| 452 |
+
|
| 453 |
+
@global_compiler_lock
|
| 454 |
+
def build(self):
|
| 455 |
+
wrapper_name = "__gufunc__." + self.fndesc.mangled_name
|
| 456 |
+
wrapperlib = self._compile_wrapper(wrapper_name)
|
| 457 |
+
return _wrapper_info(
|
| 458 |
+
library=wrapperlib, env=self.env, name=wrapper_name,
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
def gen_loop_body(self, builder, pyapi, func, args):
|
| 462 |
+
status, retval = self.call_conv.call_function(
|
| 463 |
+
builder, func, self.signature.return_type, self.signature.args,
|
| 464 |
+
args)
|
| 465 |
+
|
| 466 |
+
with builder.if_then(status.is_error, likely=False):
|
| 467 |
+
gil = pyapi.gil_ensure()
|
| 468 |
+
self.context.call_conv.raise_error(builder, pyapi, status)
|
| 469 |
+
pyapi.gil_release(gil)
|
| 470 |
+
|
| 471 |
+
return status.code, status.is_error
|
| 472 |
+
|
| 473 |
+
def gen_prologue(self, builder, pyapi):
|
| 474 |
+
pass # Do nothing
|
| 475 |
+
|
| 476 |
+
def gen_epilogue(self, builder, pyapi):
|
| 477 |
+
pass # Do nothing
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
class _GufuncObjectWrapper(_GufuncWrapper):
|
| 481 |
+
def gen_loop_body(self, builder, pyapi, func, args):
|
| 482 |
+
innercall, error = _prepare_call_to_object_mode(self.context,
|
| 483 |
+
builder, pyapi, func,
|
| 484 |
+
self.signature,
|
| 485 |
+
args)
|
| 486 |
+
return innercall, error
|
| 487 |
+
|
| 488 |
+
def gen_prologue(self, builder, pyapi):
|
| 489 |
+
# Acquire the GIL
|
| 490 |
+
self.gil = pyapi.gil_ensure()
|
| 491 |
+
|
| 492 |
+
def gen_epilogue(self, builder, pyapi):
|
| 493 |
+
# Release GIL
|
| 494 |
+
pyapi.gil_release(self.gil)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def build_gufunc_wrapper(py_func, cres, sin, sout, cache, is_parfors):
|
| 498 |
+
signature = cres.signature
|
| 499 |
+
wrapcls = (_GufuncObjectWrapper
|
| 500 |
+
if signature.return_type == types.pyobject
|
| 501 |
+
else _GufuncWrapper)
|
| 502 |
+
return wrapcls(
|
| 503 |
+
py_func, cres, sin, sout, cache, is_parfors=is_parfors,
|
| 504 |
+
).build()
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def _prepare_call_to_object_mode(context, builder, pyapi, func,
|
| 508 |
+
signature, args):
|
| 509 |
+
mod = builder.module
|
| 510 |
+
|
| 511 |
+
bb_core_return = builder.append_basic_block('ufunc.core.return')
|
| 512 |
+
|
| 513 |
+
# Call to
|
| 514 |
+
# PyObject* ndarray_new(int nd,
|
| 515 |
+
# npy_intp *dims, /* shape */
|
| 516 |
+
# npy_intp *strides,
|
| 517 |
+
# void* data,
|
| 518 |
+
# int type_num,
|
| 519 |
+
# int itemsize)
|
| 520 |
+
|
| 521 |
+
ll_int = context.get_value_type(types.int32)
|
| 522 |
+
ll_intp = context.get_value_type(types.intp)
|
| 523 |
+
ll_intp_ptr = ir.PointerType(ll_intp)
|
| 524 |
+
ll_voidptr = context.get_value_type(types.voidptr)
|
| 525 |
+
ll_pyobj = context.get_value_type(types.pyobject)
|
| 526 |
+
fnty = ir.FunctionType(ll_pyobj, [ll_int, ll_intp_ptr,
|
| 527 |
+
ll_intp_ptr, ll_voidptr,
|
| 528 |
+
ll_int, ll_int])
|
| 529 |
+
|
| 530 |
+
fn_array_new = cgutils.get_or_insert_function(mod, fnty,
|
| 531 |
+
"numba_ndarray_new")
|
| 532 |
+
|
| 533 |
+
# Convert each llarray into pyobject
|
| 534 |
+
error_pointer = cgutils.alloca_once(builder, ir.IntType(1), name='error')
|
| 535 |
+
builder.store(cgutils.true_bit, error_pointer)
|
| 536 |
+
|
| 537 |
+
# The PyObject* arguments to the kernel function
|
| 538 |
+
object_args = []
|
| 539 |
+
object_pointers = []
|
| 540 |
+
|
| 541 |
+
for i, (arg, argty) in enumerate(zip(args, signature.args)):
|
| 542 |
+
# Allocate NULL-initialized slot for this argument
|
| 543 |
+
objptr = cgutils.alloca_once(builder, ll_pyobj, zfill=True)
|
| 544 |
+
object_pointers.append(objptr)
|
| 545 |
+
|
| 546 |
+
if isinstance(argty, types.Array):
|
| 547 |
+
# Special case arrays: we don't need full-blown NRT reflection
|
| 548 |
+
# since the argument will be gone at the end of the kernel
|
| 549 |
+
arycls = context.make_array(argty)
|
| 550 |
+
array = arycls(context, builder, value=arg)
|
| 551 |
+
|
| 552 |
+
zero = Constant(ll_int, 0)
|
| 553 |
+
|
| 554 |
+
# Extract members of the llarray
|
| 555 |
+
nd = Constant(ll_int, argty.ndim)
|
| 556 |
+
dims = builder.gep(array._get_ptr_by_name('shape'), [zero, zero])
|
| 557 |
+
strides = builder.gep(array._get_ptr_by_name('strides'),
|
| 558 |
+
[zero, zero])
|
| 559 |
+
data = builder.bitcast(array.data, ll_voidptr)
|
| 560 |
+
dtype = np.dtype(str(argty.dtype))
|
| 561 |
+
|
| 562 |
+
# Prepare other info for reconstruction of the PyArray
|
| 563 |
+
type_num = Constant(ll_int, dtype.num)
|
| 564 |
+
itemsize = Constant(ll_int, dtype.itemsize)
|
| 565 |
+
|
| 566 |
+
# Call helper to reconstruct PyArray objects
|
| 567 |
+
obj = builder.call(fn_array_new, [nd, dims, strides, data,
|
| 568 |
+
type_num, itemsize])
|
| 569 |
+
else:
|
| 570 |
+
# Other argument types => use generic boxing
|
| 571 |
+
obj = pyapi.from_native_value(argty, arg)
|
| 572 |
+
|
| 573 |
+
builder.store(obj, objptr)
|
| 574 |
+
object_args.append(obj)
|
| 575 |
+
|
| 576 |
+
obj_is_null = cgutils.is_null(builder, obj)
|
| 577 |
+
builder.store(obj_is_null, error_pointer)
|
| 578 |
+
cgutils.cbranch_or_continue(builder, obj_is_null, bb_core_return)
|
| 579 |
+
|
| 580 |
+
# Call ufunc core function
|
| 581 |
+
object_sig = [types.pyobject] * len(object_args)
|
| 582 |
+
|
| 583 |
+
status, retval = context.call_conv.call_function(
|
| 584 |
+
builder, func, types.pyobject, object_sig,
|
| 585 |
+
object_args)
|
| 586 |
+
builder.store(status.is_error, error_pointer)
|
| 587 |
+
|
| 588 |
+
# Release returned object
|
| 589 |
+
pyapi.decref(retval)
|
| 590 |
+
|
| 591 |
+
builder.branch(bb_core_return)
|
| 592 |
+
# At return block
|
| 593 |
+
builder.position_at_end(bb_core_return)
|
| 594 |
+
|
| 595 |
+
# Release argument objects
|
| 596 |
+
for objptr in object_pointers:
|
| 597 |
+
pyapi.decref(builder.load(objptr))
|
| 598 |
+
|
| 599 |
+
innercall = status.code
|
| 600 |
+
return innercall, builder.load(error_pointer)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
class GUArrayArg(object):
|
| 604 |
+
def __init__(self, context, builder, args, steps, i, step_offset,
|
| 605 |
+
typ, syms, sym_dim):
|
| 606 |
+
|
| 607 |
+
self.context = context
|
| 608 |
+
self.builder = builder
|
| 609 |
+
|
| 610 |
+
offset = context.get_constant(types.intp, i)
|
| 611 |
+
|
| 612 |
+
data = builder.load(builder.gep(args, [offset], name="data.ptr"),
|
| 613 |
+
name="data")
|
| 614 |
+
self.data = data
|
| 615 |
+
|
| 616 |
+
core_step_ptr = builder.gep(steps, [offset], name="core.step.ptr")
|
| 617 |
+
core_step = builder.load(core_step_ptr)
|
| 618 |
+
|
| 619 |
+
if isinstance(typ, types.Array):
|
| 620 |
+
as_scalar = not syms
|
| 621 |
+
|
| 622 |
+
# number of symbol in the shape spec should match the dimension
|
| 623 |
+
# of the array type.
|
| 624 |
+
if len(syms) != typ.ndim:
|
| 625 |
+
if len(syms) == 0 and typ.ndim == 1:
|
| 626 |
+
# This is an exception for handling scalar argument.
|
| 627 |
+
# The type can be 1D array for scalar.
|
| 628 |
+
# In the future, we may deprecate this exception.
|
| 629 |
+
pass
|
| 630 |
+
else:
|
| 631 |
+
raise TypeError("type and shape signature mismatch for arg "
|
| 632 |
+
"#{0}".format(i + 1))
|
| 633 |
+
|
| 634 |
+
ndim = typ.ndim
|
| 635 |
+
shape = [sym_dim[s] for s in syms]
|
| 636 |
+
strides = []
|
| 637 |
+
|
| 638 |
+
for j in range(ndim):
|
| 639 |
+
stepptr = builder.gep(steps,
|
| 640 |
+
[context.get_constant(types.intp,
|
| 641 |
+
step_offset + j)],
|
| 642 |
+
name="step.ptr")
|
| 643 |
+
step = builder.load(stepptr)
|
| 644 |
+
strides.append(step)
|
| 645 |
+
|
| 646 |
+
ldcls = (_ArrayAsScalarArgLoader
|
| 647 |
+
if as_scalar
|
| 648 |
+
else _ArrayArgLoader)
|
| 649 |
+
|
| 650 |
+
self._loader = ldcls(dtype=typ.dtype,
|
| 651 |
+
ndim=ndim,
|
| 652 |
+
core_step=core_step,
|
| 653 |
+
as_scalar=as_scalar,
|
| 654 |
+
shape=shape,
|
| 655 |
+
strides=strides)
|
| 656 |
+
else:
|
| 657 |
+
# If typ is not an array
|
| 658 |
+
if syms:
|
| 659 |
+
raise TypeError("scalar type {0} given for non scalar "
|
| 660 |
+
"argument #{1}".format(typ, i + 1))
|
| 661 |
+
self._loader = _ScalarArgLoader(dtype=typ, stride=core_step)
|
| 662 |
+
|
| 663 |
+
def get_array_at_offset(self, ind):
|
| 664 |
+
return self._loader.load(context=self.context, builder=self.builder,
|
| 665 |
+
data=self.data, ind=ind)
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
class _ScalarArgLoader(object):
|
| 669 |
+
"""
|
| 670 |
+
Handle GFunc argument loading where a scalar type is used in the core
|
| 671 |
+
function.
|
| 672 |
+
Note: It still has a stride because the input to the gufunc can be an array
|
| 673 |
+
for this argument.
|
| 674 |
+
"""
|
| 675 |
+
|
| 676 |
+
def __init__(self, dtype, stride):
|
| 677 |
+
self.dtype = dtype
|
| 678 |
+
self.stride = stride
|
| 679 |
+
|
| 680 |
+
def load(self, context, builder, data, ind):
|
| 681 |
+
# Load at base + ind * stride
|
| 682 |
+
data = builder.gep(data, [builder.mul(ind, self.stride)])
|
| 683 |
+
dptr = builder.bitcast(data,
|
| 684 |
+
context.get_data_type(self.dtype).as_pointer())
|
| 685 |
+
return builder.load(dptr)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
class _ArrayArgLoader(object):
|
| 689 |
+
"""
|
| 690 |
+
Handle GUFunc argument loading where an array is expected.
|
| 691 |
+
"""
|
| 692 |
+
|
| 693 |
+
def __init__(self, dtype, ndim, core_step, as_scalar, shape, strides):
|
| 694 |
+
self.dtype = dtype
|
| 695 |
+
self.ndim = ndim
|
| 696 |
+
self.core_step = core_step
|
| 697 |
+
self.as_scalar = as_scalar
|
| 698 |
+
self.shape = shape
|
| 699 |
+
self.strides = strides
|
| 700 |
+
|
| 701 |
+
def load(self, context, builder, data, ind):
|
| 702 |
+
arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A")
|
| 703 |
+
arycls = context.make_array(arytyp)
|
| 704 |
+
|
| 705 |
+
array = arycls(context, builder)
|
| 706 |
+
offseted_data = cgutils.pointer_add(builder,
|
| 707 |
+
data,
|
| 708 |
+
builder.mul(self.core_step,
|
| 709 |
+
ind))
|
| 710 |
+
|
| 711 |
+
shape, strides = self._shape_and_strides(context, builder)
|
| 712 |
+
|
| 713 |
+
itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype))
|
| 714 |
+
context.populate_array(array,
|
| 715 |
+
data=builder.bitcast(offseted_data,
|
| 716 |
+
array.data.type),
|
| 717 |
+
shape=shape,
|
| 718 |
+
strides=strides,
|
| 719 |
+
itemsize=context.get_constant(types.intp,
|
| 720 |
+
itemsize),
|
| 721 |
+
meminfo=None)
|
| 722 |
+
|
| 723 |
+
return array._getvalue()
|
| 724 |
+
|
| 725 |
+
def _shape_and_strides(self, context, builder):
|
| 726 |
+
shape = cgutils.pack_array(builder, self.shape)
|
| 727 |
+
strides = cgutils.pack_array(builder, self.strides)
|
| 728 |
+
return shape, strides
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
class _ArrayAsScalarArgLoader(_ArrayArgLoader):
|
| 732 |
+
"""
|
| 733 |
+
Handle GUFunc argument loading where the shape signature specifies
|
| 734 |
+
a scalar "()" but a 1D array is used for the type of the core function.
|
| 735 |
+
"""
|
| 736 |
+
|
| 737 |
+
def _shape_and_strides(self, context, builder):
|
| 738 |
+
# Set shape and strides for a 1D size 1 array
|
| 739 |
+
one = context.get_constant(types.intp, 1)
|
| 740 |
+
zero = context.get_constant(types.intp, 0)
|
| 741 |
+
shape = cgutils.pack_array(builder, [one])
|
| 742 |
+
strides = cgutils.pack_array(builder, [zero])
|
| 743 |
+
return shape, strides
|
deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/METADATA
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.3
|
| 2 |
+
Name: Jinja2
|
| 3 |
+
Version: 3.1.5
|
| 4 |
+
Summary: A very fast and expressive template engine.
|
| 5 |
+
Maintainer-email: Pallets <contact@palletsprojects.com>
|
| 6 |
+
Requires-Python: >=3.7
|
| 7 |
+
Description-Content-Type: text/markdown
|
| 8 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 9 |
+
Classifier: Environment :: Web Environment
|
| 10 |
+
Classifier: Intended Audience :: Developers
|
| 11 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 12 |
+
Classifier: Operating System :: OS Independent
|
| 13 |
+
Classifier: Programming Language :: Python
|
| 14 |
+
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
| 15 |
+
Classifier: Topic :: Text Processing :: Markup :: HTML
|
| 16 |
+
Classifier: Typing :: Typed
|
| 17 |
+
Requires-Dist: MarkupSafe>=2.0
|
| 18 |
+
Requires-Dist: Babel>=2.7 ; extra == "i18n"
|
| 19 |
+
Project-URL: Changes, https://jinja.palletsprojects.com/changes/
|
| 20 |
+
Project-URL: Chat, https://discord.gg/pallets
|
| 21 |
+
Project-URL: Documentation, https://jinja.palletsprojects.com/
|
| 22 |
+
Project-URL: Donate, https://palletsprojects.com/donate
|
| 23 |
+
Project-URL: Source, https://github.com/pallets/jinja/
|
| 24 |
+
Provides-Extra: i18n
|
| 25 |
+
|
| 26 |
+
# Jinja
|
| 27 |
+
|
| 28 |
+
Jinja is a fast, expressive, extensible templating engine. Special
|
| 29 |
+
placeholders in the template allow writing code similar to Python
|
| 30 |
+
syntax. Then the template is passed data to render the final document.
|
| 31 |
+
|
| 32 |
+
It includes:
|
| 33 |
+
|
| 34 |
+
- Template inheritance and inclusion.
|
| 35 |
+
- Define and import macros within templates.
|
| 36 |
+
- HTML templates can use autoescaping to prevent XSS from untrusted
|
| 37 |
+
user input.
|
| 38 |
+
- A sandboxed environment can safely render untrusted templates.
|
| 39 |
+
- AsyncIO support for generating templates and calling async
|
| 40 |
+
functions.
|
| 41 |
+
- I18N support with Babel.
|
| 42 |
+
- Templates are compiled to optimized Python code just-in-time and
|
| 43 |
+
cached, or can be compiled ahead-of-time.
|
| 44 |
+
- Exceptions point to the correct line in templates to make debugging
|
| 45 |
+
easier.
|
| 46 |
+
- Extensible filters, tests, functions, and even syntax.
|
| 47 |
+
|
| 48 |
+
Jinja's philosophy is that while application logic belongs in Python if
|
| 49 |
+
possible, it shouldn't make the template designer's job difficult by
|
| 50 |
+
restricting functionality too much.
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
## In A Nutshell
|
| 54 |
+
|
| 55 |
+
```jinja
|
| 56 |
+
{% extends "base.html" %}
|
| 57 |
+
{% block title %}Members{% endblock %}
|
| 58 |
+
{% block content %}
|
| 59 |
+
<ul>
|
| 60 |
+
{% for user in users %}
|
| 61 |
+
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
| 62 |
+
{% endfor %}
|
| 63 |
+
</ul>
|
| 64 |
+
{% endblock %}
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
## Donate
|
| 68 |
+
|
| 69 |
+
The Pallets organization develops and supports Jinja and other popular
|
| 70 |
+
packages. In order to grow the community of contributors and users, and
|
| 71 |
+
allow the maintainers to devote more time to the projects, [please
|
| 72 |
+
donate today][].
|
| 73 |
+
|
| 74 |
+
[please donate today]: https://palletsprojects.com/donate
|
| 75 |
+
|
deepseekvl2/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: flit 3.10.1
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
deepseekvl2/lib/python3.10/site-packages/lit-18.1.8.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
lit
|
deepseekvl2/lib/python3.10/site-packages/triton/__init__.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""isort:skip_file"""
|
| 2 |
+
__version__ = '2.0.0'
|
| 3 |
+
|
| 4 |
+
# ---------------------------------------
|
| 5 |
+
# Note: import order is significant here.
|
| 6 |
+
|
| 7 |
+
# TODO: torch needs to be imported first
|
| 8 |
+
# or pybind11 shows `munmap_chunk(): invalid pointer`
|
| 9 |
+
import torch # noqa: F401
|
| 10 |
+
|
| 11 |
+
# submodules
|
| 12 |
+
from . import impl
|
| 13 |
+
from .utils import (
|
| 14 |
+
cdiv,
|
| 15 |
+
MockTensor,
|
| 16 |
+
next_power_of_2,
|
| 17 |
+
reinterpret,
|
| 18 |
+
TensorWrapper,
|
| 19 |
+
)
|
| 20 |
+
from .runtime import (
|
| 21 |
+
autotune,
|
| 22 |
+
Config,
|
| 23 |
+
heuristics,
|
| 24 |
+
JITFunction,
|
| 25 |
+
KernelInterface,
|
| 26 |
+
)
|
| 27 |
+
from .runtime.jit import jit
|
| 28 |
+
from .compiler import compile, CompilationError
|
| 29 |
+
from . import language
|
| 30 |
+
from . import testing
|
| 31 |
+
from . import ops
|
| 32 |
+
|
| 33 |
+
__all__ = [
|
| 34 |
+
"autotune",
|
| 35 |
+
"cdiv",
|
| 36 |
+
"CompilationError",
|
| 37 |
+
"compile",
|
| 38 |
+
"Config",
|
| 39 |
+
"heuristics",
|
| 40 |
+
"impl",
|
| 41 |
+
"jit",
|
| 42 |
+
"JITFunction",
|
| 43 |
+
"KernelInterface",
|
| 44 |
+
"language",
|
| 45 |
+
"MockTensor",
|
| 46 |
+
"next_power_of_2",
|
| 47 |
+
"ops",
|
| 48 |
+
"reinterpret",
|
| 49 |
+
"runtime",
|
| 50 |
+
"TensorWrapper",
|
| 51 |
+
"testing",
|
| 52 |
+
]
|
deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (829 Bytes). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/compiler.cpython-310.pyc
ADDED
|
Binary file (60.7 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/compiler.py
ADDED
|
@@ -0,0 +1,1854 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import ast
|
| 4 |
+
import contextlib
|
| 5 |
+
import functools
|
| 6 |
+
import hashlib
|
| 7 |
+
import io
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
import re
|
| 11 |
+
import shutil
|
| 12 |
+
import subprocess
|
| 13 |
+
import sys
|
| 14 |
+
import sysconfig
|
| 15 |
+
import tempfile
|
| 16 |
+
import warnings
|
| 17 |
+
from collections import namedtuple
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from sysconfig import get_paths
|
| 20 |
+
from typing import Any, Callable, Dict, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import setuptools
|
| 23 |
+
import torch
|
| 24 |
+
from filelock import FileLock
|
| 25 |
+
|
| 26 |
+
import triton
|
| 27 |
+
import triton._C.libtriton.triton as _triton
|
| 28 |
+
from . import impl
|
| 29 |
+
from .tools.disasm import extract
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def str_to_ty(name):
|
| 33 |
+
if name[0] == "*":
|
| 34 |
+
ty = str_to_ty(name[1:])
|
| 35 |
+
return triton.language.pointer_type(ty)
|
| 36 |
+
tys = {
|
| 37 |
+
"fp8": triton.language.float8,
|
| 38 |
+
"fp16": triton.language.float16,
|
| 39 |
+
"bf16": triton.language.bfloat16,
|
| 40 |
+
"fp32": triton.language.float32,
|
| 41 |
+
"fp64": triton.language.float64,
|
| 42 |
+
"i1": triton.language.int1,
|
| 43 |
+
"i8": triton.language.int8,
|
| 44 |
+
"i16": triton.language.int16,
|
| 45 |
+
"i32": triton.language.int32,
|
| 46 |
+
"i64": triton.language.int64,
|
| 47 |
+
"u8": triton.language.uint8,
|
| 48 |
+
"u16": triton.language.uint16,
|
| 49 |
+
"u32": triton.language.uint32,
|
| 50 |
+
"u64": triton.language.uint64,
|
| 51 |
+
"B": triton.language.int1,
|
| 52 |
+
}
|
| 53 |
+
return tys[name]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def mangle_ty(ty):
|
| 57 |
+
if ty.is_ptr():
|
| 58 |
+
return 'P' + mangle_ty(ty.element_ty)
|
| 59 |
+
if ty.is_int():
|
| 60 |
+
return 'i' + str(ty.int_bitwidth)
|
| 61 |
+
if ty.is_fp8():
|
| 62 |
+
return 'fp8'
|
| 63 |
+
if ty.is_fp16():
|
| 64 |
+
return 'fp16'
|
| 65 |
+
if ty.is_bf16():
|
| 66 |
+
return 'bf16'
|
| 67 |
+
if ty.is_fp32():
|
| 68 |
+
return 'fp32'
|
| 69 |
+
if ty.is_fp64():
|
| 70 |
+
return 'fp64'
|
| 71 |
+
if ty.is_block():
|
| 72 |
+
elt = mangle_ty(ty.scalar)
|
| 73 |
+
shape = '_'.join(map(str, ty.shape))
|
| 74 |
+
return f'{elt}S{shape}S'
|
| 75 |
+
if ty.is_void():
|
| 76 |
+
return 'V'
|
| 77 |
+
assert False, "Unsupported type"
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def mangle_fn(name, arg_tys, constants):
|
| 81 |
+
# doesn't mangle ret type, which must be a function of arg tys
|
| 82 |
+
mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys])
|
| 83 |
+
mangled_constants = '_'.join([f'{i}c{repr(constants[i])}' for i in sorted(constants)])
|
| 84 |
+
mangled_constants = mangled_constants.replace('.', '_d_')
|
| 85 |
+
mangled_constants = mangled_constants.replace("'", '_sq_')
|
| 86 |
+
ret = f'{name}__{mangled_arg_names}__{mangled_constants}'
|
| 87 |
+
return ret
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class enter_sub_region:
|
| 91 |
+
def __init__(self, generator: CodeGenerator):
|
| 92 |
+
self.generator = generator
|
| 93 |
+
|
| 94 |
+
def __enter__(self):
|
| 95 |
+
# record lscope & local_defs in the parent scope
|
| 96 |
+
self.liveins = self.generator.lscope.copy()
|
| 97 |
+
self.prev_defs = self.generator.local_defs.copy()
|
| 98 |
+
self.generator.local_defs = {}
|
| 99 |
+
self.insert_block = self.generator.builder.get_insertion_block()
|
| 100 |
+
self.insert_point = self.generator.builder.get_insertion_point()
|
| 101 |
+
return self.liveins, self.insert_block
|
| 102 |
+
|
| 103 |
+
def __exit__(self, *args, **kwargs):
|
| 104 |
+
self.generator.builder.restore_insertion_point(self.insert_point)
|
| 105 |
+
self.generator.lscope = self.liveins
|
| 106 |
+
self.generator.local_defs = self.prev_defs
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class CodeGenerator(ast.NodeVisitor):
|
| 110 |
+
def __init__(self, context, prototype, gscope, attributes, constants, function_name, module=None, is_kernel=False, function_types=dict()):
|
| 111 |
+
self.builder = _triton.ir.builder(context)
|
| 112 |
+
self.module = self.builder.create_module() if module is None else module
|
| 113 |
+
self.function_ret_types = function_types
|
| 114 |
+
self.prototype = prototype
|
| 115 |
+
self.gscope = gscope
|
| 116 |
+
self.lscope = dict()
|
| 117 |
+
self.attributes = attributes
|
| 118 |
+
self.constants = constants
|
| 119 |
+
self.function_name = function_name
|
| 120 |
+
self.is_kernel = is_kernel
|
| 121 |
+
self.last_node = None
|
| 122 |
+
self.builtins = {
|
| 123 |
+
'range': range,
|
| 124 |
+
'min': triton.language.minimum,
|
| 125 |
+
'float': float,
|
| 126 |
+
'int': int,
|
| 127 |
+
'print': print,
|
| 128 |
+
'isinstance': isinstance,
|
| 129 |
+
'getattr': getattr,
|
| 130 |
+
}
|
| 131 |
+
self.scf_stack = []
|
| 132 |
+
# SSA-construction
|
| 133 |
+
# name => triton.language.tensor
|
| 134 |
+
self.local_defs: Dict[str, triton.language.tensor] = {}
|
| 135 |
+
self.global_uses: Dict[str, triton.language.tensor] = {}
|
| 136 |
+
|
| 137 |
+
def get_value(self, name):
|
| 138 |
+
''' This function:
|
| 139 |
+
1. make sure `name` is defined
|
| 140 |
+
2. if `name` is triton.language.tensor, get stored tensor by calling
|
| 141 |
+
`self._get_tensor()`
|
| 142 |
+
'''
|
| 143 |
+
# search node.id in local scope
|
| 144 |
+
ret = None
|
| 145 |
+
if name in self.lscope:
|
| 146 |
+
ret = self.lscope[name]
|
| 147 |
+
if name not in self.local_defs:
|
| 148 |
+
self.global_uses[name] = ret
|
| 149 |
+
# search node.id in global scope
|
| 150 |
+
elif name in self.gscope:
|
| 151 |
+
ret = self.gscope[name]
|
| 152 |
+
# search node.id in builtins
|
| 153 |
+
elif name in self.builtins:
|
| 154 |
+
ret = self.builtins[name]
|
| 155 |
+
else:
|
| 156 |
+
raise ValueError(f'{name} is not defined')
|
| 157 |
+
return ret
|
| 158 |
+
|
| 159 |
+
def set_value(self, name: str,
|
| 160 |
+
value: Union[triton.language.tensor, triton.language.constexpr]) -> None:
|
| 161 |
+
''' This function:
|
| 162 |
+
called by visit_Assign() & visit_FuncDef() to store left value (lvalue)
|
| 163 |
+
1. record local defined name (FIXME: should consider control flow)
|
| 164 |
+
2. store tensor in self.lvalue
|
| 165 |
+
'''
|
| 166 |
+
self.lscope[name] = value
|
| 167 |
+
self.local_defs[name] = value
|
| 168 |
+
|
| 169 |
+
def is_triton_tensor(self, value):
|
| 170 |
+
return isinstance(value, triton.language.tensor)
|
| 171 |
+
|
| 172 |
+
#
|
| 173 |
+
# AST visitor
|
| 174 |
+
#
|
| 175 |
+
def visit_compound_statement(self, stmts):
|
| 176 |
+
for stmt in stmts:
|
| 177 |
+
self.last_ret_type = self.visit(stmt)
|
| 178 |
+
if isinstance(stmt, ast.Return):
|
| 179 |
+
break
|
| 180 |
+
return stmts and isinstance(stmt, ast.Return)
|
| 181 |
+
|
| 182 |
+
def visit_Module(self, node):
|
| 183 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 184 |
+
|
| 185 |
+
def visit_List(self, node):
|
| 186 |
+
ctx = self.visit(node.ctx)
|
| 187 |
+
assert ctx is None
|
| 188 |
+
elts = [self.visit(elt) for elt in node.elts]
|
| 189 |
+
return elts
|
| 190 |
+
|
| 191 |
+
# By design, only non-kernel functions can return
|
| 192 |
+
def visit_Return(self, node):
|
| 193 |
+
ret_value = self.visit(node.value)
|
| 194 |
+
# ret_block = self.builder.create_block()
|
| 195 |
+
# post_ret_block = self.builder.create_block()
|
| 196 |
+
# self.builder.create_branch(ret_block)
|
| 197 |
+
# self.builder.set_insertion_point_to_end(ret_block)
|
| 198 |
+
if ret_value is None:
|
| 199 |
+
self.builder.ret([])
|
| 200 |
+
ret_ty = None
|
| 201 |
+
elif isinstance(ret_value, tuple):
|
| 202 |
+
ret_values = [triton.language.core._to_tensor(v, self.builder) for v in ret_value]
|
| 203 |
+
ret_types = [v.type for v in ret_values]
|
| 204 |
+
self.builder.ret([v.handle for v in ret_values])
|
| 205 |
+
ret_ty = tuple(ret_types)
|
| 206 |
+
else:
|
| 207 |
+
ret = triton.language.core._to_tensor(ret_value, self.builder)
|
| 208 |
+
self.builder.ret([ret.handle])
|
| 209 |
+
ret_ty = ret.type
|
| 210 |
+
# self.builder.create_branch(post_ret_block)
|
| 211 |
+
# self.builder.set_insertion_point_to_end(post_ret_block)
|
| 212 |
+
return ret_ty
|
| 213 |
+
|
| 214 |
+
def visit_FunctionDef(self, node):
|
| 215 |
+
arg_names, kwarg_names = self.visit(node.args)
|
| 216 |
+
# initialize defaults
|
| 217 |
+
for i, default_value in enumerate(node.args.defaults):
|
| 218 |
+
arg_node = node.args.args[-i - 1]
|
| 219 |
+
annotation = arg_node.annotation
|
| 220 |
+
name = arg_node.arg
|
| 221 |
+
st_target = ast.Name(id=name, ctx=ast.Store())
|
| 222 |
+
if annotation is None:
|
| 223 |
+
init_node = ast.Assign(targets=[st_target], value=default_value)
|
| 224 |
+
else:
|
| 225 |
+
init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation)
|
| 226 |
+
self.visit(init_node)
|
| 227 |
+
# initialize function
|
| 228 |
+
visibility = "public" if self.is_kernel else "private"
|
| 229 |
+
fn = self.builder.get_or_insert_function(self.module, self.function_name, self.prototype.to_ir(self.builder), visibility)
|
| 230 |
+
self.module.push_back(fn)
|
| 231 |
+
entry = fn.add_entry_block()
|
| 232 |
+
arg_values = []
|
| 233 |
+
idx = 0
|
| 234 |
+
for i, arg_name in enumerate(arg_names):
|
| 235 |
+
if i in self.constants:
|
| 236 |
+
cst = self.constants[i]
|
| 237 |
+
if not isinstance(cst, triton.language.constexpr):
|
| 238 |
+
cst = triton.language.constexpr(self.constants[i])
|
| 239 |
+
arg_values.append(cst)
|
| 240 |
+
continue
|
| 241 |
+
else:
|
| 242 |
+
if i in self.attributes:
|
| 243 |
+
fn.set_arg_attr(idx, "tt.divisibility", self.attributes[i][1])
|
| 244 |
+
arg_values.append(triton.language.tensor(fn.args(idx), self.prototype.param_types[idx]))
|
| 245 |
+
idx += 1
|
| 246 |
+
|
| 247 |
+
insert_pt = self.builder.get_insertion_block()
|
| 248 |
+
for arg_name, arg_value in zip(arg_names, arg_values):
|
| 249 |
+
self.set_value(arg_name, arg_value)
|
| 250 |
+
self.builder.set_insertion_point_to_start(entry)
|
| 251 |
+
# visit function body
|
| 252 |
+
has_ret = self.visit_compound_statement(node.body)
|
| 253 |
+
# finalize function
|
| 254 |
+
if not has_ret:
|
| 255 |
+
self.builder.ret([])
|
| 256 |
+
else:
|
| 257 |
+
# update return type
|
| 258 |
+
if isinstance(self.last_ret_type, tuple):
|
| 259 |
+
self.prototype.ret_types = list(self.last_ret_type)
|
| 260 |
+
fn.reset_type(self.prototype.to_ir(self.builder))
|
| 261 |
+
else:
|
| 262 |
+
self.prototype.ret_types = [self.last_ret_type]
|
| 263 |
+
fn.reset_type(self.prototype.to_ir(self.builder))
|
| 264 |
+
if insert_pt:
|
| 265 |
+
self.builder.set_insertion_point_to_end(insert_pt)
|
| 266 |
+
|
| 267 |
+
def visit_arguments(self, node):
|
| 268 |
+
arg_names = []
|
| 269 |
+
for arg in node.args:
|
| 270 |
+
arg_names += [self.visit(arg)]
|
| 271 |
+
kwarg_names = self.visit(node.kwarg)
|
| 272 |
+
return arg_names, kwarg_names
|
| 273 |
+
|
| 274 |
+
def visit_arg(self, node):
|
| 275 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 276 |
+
return node.arg
|
| 277 |
+
|
| 278 |
+
def visit_AnnAssign(self, node):
|
| 279 |
+
# extract attributes
|
| 280 |
+
annotation = self.visit(node.annotation)
|
| 281 |
+
target = self.visit(node.target)
|
| 282 |
+
value = self.visit(node.value)
|
| 283 |
+
# constexpr
|
| 284 |
+
if annotation == triton.language.constexpr:
|
| 285 |
+
if target in self.lscope:
|
| 286 |
+
raise ValueError(f'{target} is already defined.'
|
| 287 |
+
f' constexpr cannot be reassigned.')
|
| 288 |
+
if not isinstance(value, triton.language.constexpr):
|
| 289 |
+
value = triton.language.constexpr(value)
|
| 290 |
+
self.lscope[target] = value
|
| 291 |
+
return self.lscope[target]
|
| 292 |
+
# default: call visit_Assign
|
| 293 |
+
return self.visit_Assign(node)
|
| 294 |
+
|
| 295 |
+
def visit_Assign(self, node):
|
| 296 |
+
_names = []
|
| 297 |
+
for target in node.targets:
|
| 298 |
+
_names += [self.visit(target)]
|
| 299 |
+
assert len(_names) == 1
|
| 300 |
+
names = _names[0]
|
| 301 |
+
values = self.visit(node.value)
|
| 302 |
+
if not isinstance(names, tuple):
|
| 303 |
+
names = [names]
|
| 304 |
+
if not isinstance(values, tuple):
|
| 305 |
+
values = [values]
|
| 306 |
+
for name, value in zip(names, values):
|
| 307 |
+
# by default, constexpr are assigned into python variable
|
| 308 |
+
if isinstance(value, triton.language.constexpr):
|
| 309 |
+
value = value.value
|
| 310 |
+
if not isinstance(value, triton.language.tensor):
|
| 311 |
+
value = triton.language.core._to_tensor(value, self.builder)
|
| 312 |
+
self.set_value(name, value)
|
| 313 |
+
|
| 314 |
+
def visit_AugAssign(self, node):
|
| 315 |
+
name = node.target.id
|
| 316 |
+
lhs = ast.Name(id=name, ctx=ast.Load())
|
| 317 |
+
rhs = ast.BinOp(lhs, node.op, node.value)
|
| 318 |
+
assign = ast.Assign(targets=[node.target], value=rhs)
|
| 319 |
+
self.visit(assign)
|
| 320 |
+
return self.get_value(name)
|
| 321 |
+
|
| 322 |
+
def visit_Name(self, node):
|
| 323 |
+
if type(node.ctx) == ast.Store:
|
| 324 |
+
return node.id
|
| 325 |
+
return self.get_value(node.id)
|
| 326 |
+
|
| 327 |
+
def visit_Store(self, node):
|
| 328 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 329 |
+
|
| 330 |
+
def visit_Load(self, node):
|
| 331 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 332 |
+
|
| 333 |
+
def visit_Tuple(self, node):
|
| 334 |
+
args = [self.visit(x) for x in node.elts]
|
| 335 |
+
return tuple(args)
|
| 336 |
+
|
| 337 |
+
def visit_BinOp(self, node):
|
| 338 |
+
lhs = self.visit(node.left)
|
| 339 |
+
rhs = self.visit(node.right)
|
| 340 |
+
fn = {
|
| 341 |
+
ast.Add: '__add__',
|
| 342 |
+
ast.Sub: '__sub__',
|
| 343 |
+
ast.Mult: '__mul__',
|
| 344 |
+
ast.Div: '__truediv__',
|
| 345 |
+
ast.FloorDiv: '__floordiv__',
|
| 346 |
+
ast.Mod: '__mod__',
|
| 347 |
+
ast.Pow: '__pow__',
|
| 348 |
+
ast.LShift: '__lshift__',
|
| 349 |
+
ast.RShift: '__rshift__',
|
| 350 |
+
ast.BitAnd: '__and__',
|
| 351 |
+
ast.BitOr: '__or__',
|
| 352 |
+
ast.BitXor: '__xor__',
|
| 353 |
+
}[type(node.op)]
|
| 354 |
+
if self.is_triton_tensor(lhs):
|
| 355 |
+
return getattr(lhs, fn)(rhs, _builder=self.builder)
|
| 356 |
+
elif self.is_triton_tensor(rhs):
|
| 357 |
+
fn = fn[:2] + 'r' + fn[2:]
|
| 358 |
+
return getattr(rhs, fn)(lhs, _builder=self.builder)
|
| 359 |
+
else:
|
| 360 |
+
return getattr(lhs, fn)(rhs)
|
| 361 |
+
|
| 362 |
+
def visit_then_else_blocks(self, node, liveins, then_block, else_block):
|
| 363 |
+
# then block
|
| 364 |
+
self.builder.set_insertion_point_to_start(then_block)
|
| 365 |
+
self.visit_compound_statement(node.body)
|
| 366 |
+
then_block = self.builder.get_insertion_block()
|
| 367 |
+
then_defs = self.local_defs.copy()
|
| 368 |
+
# else block
|
| 369 |
+
else_defs = {}
|
| 370 |
+
if node.orelse:
|
| 371 |
+
self.builder.set_insertion_point_to_start(else_block)
|
| 372 |
+
self.lscope = liveins.copy()
|
| 373 |
+
self.local_defs = {}
|
| 374 |
+
self.visit_compound_statement(node.orelse)
|
| 375 |
+
else_defs = self.local_defs.copy()
|
| 376 |
+
else_block = self.builder.get_insertion_block()
|
| 377 |
+
|
| 378 |
+
# update block arguments
|
| 379 |
+
names = []
|
| 380 |
+
ret_types = []
|
| 381 |
+
ir_ret_types = []
|
| 382 |
+
# variables in livein whose value is updated in `if`
|
| 383 |
+
for name in liveins:
|
| 384 |
+
# check type
|
| 385 |
+
for defs, block_name in [(then_defs, 'then'), (else_defs, 'else')]:
|
| 386 |
+
if name in defs:
|
| 387 |
+
assert defs[name].type == liveins[name].type,\
|
| 388 |
+
f'initial value for `{name}` is of type {liveins[name].type}, '\
|
| 389 |
+
f'but the {block_name} block redefines it as {defs[name].type}'
|
| 390 |
+
if name in then_defs or name in else_defs:
|
| 391 |
+
names.append(name)
|
| 392 |
+
ret_types.append(then_defs[name].type if name in then_defs else else_defs[name].type)
|
| 393 |
+
ir_ret_types.append(then_defs[name].handle.get_type() if name in then_defs else else_defs[name].handle.get_type())
|
| 394 |
+
# variable defined in then but not in else
|
| 395 |
+
if name in then_defs and name not in else_defs:
|
| 396 |
+
else_defs[name] = liveins[name]
|
| 397 |
+
# variable defined in else but not in then
|
| 398 |
+
if name in else_defs and name not in then_defs:
|
| 399 |
+
then_defs[name] = liveins[name]
|
| 400 |
+
# variables that are both in then and else but not in liveins
|
| 401 |
+
# TODO: could probably be cleaned up
|
| 402 |
+
for name in then_defs.keys() & else_defs.keys():
|
| 403 |
+
if name in names:
|
| 404 |
+
continue
|
| 405 |
+
then_ty = then_defs[name].type
|
| 406 |
+
else_ty = else_defs[name].type
|
| 407 |
+
assert then_ty == else_ty,\
|
| 408 |
+
f'mismatched type for {name} between then block ({then_ty}) '\
|
| 409 |
+
f'and else block ({else_ty})'
|
| 410 |
+
names.append(name)
|
| 411 |
+
ret_types.append(then_ty)
|
| 412 |
+
ir_ret_types.append(then_defs[name].handle.get_type())
|
| 413 |
+
|
| 414 |
+
return then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types
|
| 415 |
+
|
| 416 |
+
def visit_if_top_level(self, cond, node):
|
| 417 |
+
with enter_sub_region(self) as sr:
|
| 418 |
+
liveins, ip_block = sr
|
| 419 |
+
then_block = self.builder.create_block()
|
| 420 |
+
else_block = self.builder.create_block()
|
| 421 |
+
# create basic-block after conditional
|
| 422 |
+
endif_block = self.builder.create_block()
|
| 423 |
+
# create branch
|
| 424 |
+
self.builder.set_insertion_point_to_end(ip_block)
|
| 425 |
+
self.builder.create_cond_branch(cond.handle, then_block, else_block)
|
| 426 |
+
# visit then and else blocks
|
| 427 |
+
then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types = \
|
| 428 |
+
self.visit_then_else_blocks(node, liveins, then_block, else_block)
|
| 429 |
+
# then terminator
|
| 430 |
+
self.builder.set_insertion_point_to_end(then_block)
|
| 431 |
+
if not then_block.has_terminator():
|
| 432 |
+
self.builder.create_branch(endif_block, [then_defs[n].handle for n in names])
|
| 433 |
+
# else terminator
|
| 434 |
+
self.builder.set_insertion_point_to_end(else_block)
|
| 435 |
+
if not else_block.has_terminator():
|
| 436 |
+
self.builder.create_branch(endif_block, [else_defs[n].handle for n in names])
|
| 437 |
+
for ty in ir_ret_types:
|
| 438 |
+
endif_block.add_argument(ty)
|
| 439 |
+
# change block
|
| 440 |
+
self.builder.set_insertion_point_to_start(endif_block)
|
| 441 |
+
# update value
|
| 442 |
+
for i, name in enumerate(names):
|
| 443 |
+
new_tensor = triton.language.core.tensor(endif_block.arg(i), ret_types[i])
|
| 444 |
+
self.set_value(name, new_tensor)
|
| 445 |
+
|
| 446 |
+
# TODO: refactor
|
| 447 |
+
def visit_if_scf(self, cond, node):
|
| 448 |
+
with enter_sub_region(self) as sr:
|
| 449 |
+
liveins, _ = sr
|
| 450 |
+
ip = self.builder.get_insertion_point()
|
| 451 |
+
then_block = self.builder.create_block()
|
| 452 |
+
else_block = self.builder.create_block() if node.orelse else None
|
| 453 |
+
then_defs, else_defs, then_block, else_block, names, ret_types, _ = \
|
| 454 |
+
self.visit_then_else_blocks(node, liveins, then_block, else_block)
|
| 455 |
+
# create if op
|
| 456 |
+
self.builder.restore_insertion_point(ip)
|
| 457 |
+
if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, True)
|
| 458 |
+
then_block.merge_block_before(if_op.get_then_block())
|
| 459 |
+
self.builder.set_insertion_point_to_end(if_op.get_then_block())
|
| 460 |
+
if len(names) > 0:
|
| 461 |
+
self.builder.create_yield_op([then_defs[n].handle for n in names])
|
| 462 |
+
if not node.orelse:
|
| 463 |
+
else_block = if_op.get_else_block()
|
| 464 |
+
else:
|
| 465 |
+
else_block.merge_block_before(if_op.get_else_block())
|
| 466 |
+
self.builder.set_insertion_point_to_end(if_op.get_else_block())
|
| 467 |
+
if len(names) > 0:
|
| 468 |
+
self.builder.create_yield_op([else_defs[n].handle for n in names])
|
| 469 |
+
# update values
|
| 470 |
+
for i, name in enumerate(names):
|
| 471 |
+
new_tensor = triton.language.core.tensor(if_op.get_result(i), ret_types[i])
|
| 472 |
+
self.set_value(name, new_tensor)
|
| 473 |
+
|
| 474 |
+
def visit_If(self, node):
|
| 475 |
+
cond = self.visit(node.test)
|
| 476 |
+
if isinstance(cond, triton.language.tensor):
|
| 477 |
+
cond = cond.to(triton.language.int1, _builder=self.builder)
|
| 478 |
+
if self.scf_stack:
|
| 479 |
+
self.visit_if_scf(cond, node)
|
| 480 |
+
else:
|
| 481 |
+
self.visit_if_top_level(cond, node)
|
| 482 |
+
else:
|
| 483 |
+
if isinstance(cond, triton.language.constexpr):
|
| 484 |
+
cond = cond.value
|
| 485 |
+
if cond:
|
| 486 |
+
self.visit_compound_statement(node.body)
|
| 487 |
+
else:
|
| 488 |
+
self.visit_compound_statement(node.orelse)
|
| 489 |
+
|
| 490 |
+
def visit_IfExp(self, node):
|
| 491 |
+
cond = self.visit(node.test)
|
| 492 |
+
if cond.value:
|
| 493 |
+
return self.visit(node.body)
|
| 494 |
+
else:
|
| 495 |
+
return self.visit(node.orelse)
|
| 496 |
+
|
| 497 |
+
def visit_Pass(self, node):
|
| 498 |
+
pass
|
| 499 |
+
|
| 500 |
+
def visit_Compare(self, node):
|
| 501 |
+
assert len(node.comparators) == 1
|
| 502 |
+
assert len(node.ops) == 1
|
| 503 |
+
lhs = self.visit(node.left)
|
| 504 |
+
rhs = self.visit(node.comparators[0])
|
| 505 |
+
if isinstance(lhs, triton.language.constexpr):
|
| 506 |
+
lhs = lhs.value
|
| 507 |
+
if isinstance(rhs, triton.language.constexpr):
|
| 508 |
+
rhs = rhs.value
|
| 509 |
+
if type(node.ops[0]) == ast.Is:
|
| 510 |
+
return triton.language.constexpr(lhs is rhs)
|
| 511 |
+
if type(node.ops[0]) == ast.IsNot:
|
| 512 |
+
return triton.language.constexpr(lhs is not rhs)
|
| 513 |
+
fn = {
|
| 514 |
+
ast.Eq: '__eq__',
|
| 515 |
+
ast.NotEq: '__ne__',
|
| 516 |
+
ast.Lt: '__lt__',
|
| 517 |
+
ast.LtE: '__le__',
|
| 518 |
+
ast.Gt: '__gt__',
|
| 519 |
+
ast.GtE: '__ge__',
|
| 520 |
+
}[type(node.ops[0])]
|
| 521 |
+
if self.is_triton_tensor(lhs):
|
| 522 |
+
return getattr(lhs, fn)(rhs, _builder=self.builder)
|
| 523 |
+
elif self.is_triton_tensor(rhs):
|
| 524 |
+
fn = fn[:2] + 'r' + fn[2:]
|
| 525 |
+
return getattr(rhs, fn)(lhs, _builder=self.builder)
|
| 526 |
+
else:
|
| 527 |
+
return getattr(lhs, fn)(rhs)
|
| 528 |
+
|
| 529 |
+
def visit_UnaryOp(self, node):
|
| 530 |
+
op = self.visit(node.operand)
|
| 531 |
+
fn = {
|
| 532 |
+
ast.USub: '__neg__',
|
| 533 |
+
ast.UAdd: '__pos__',
|
| 534 |
+
ast.Not: '__not__',
|
| 535 |
+
ast.Invert: '__invert__',
|
| 536 |
+
}[type(node.op)]
|
| 537 |
+
if self.is_triton_tensor(op):
|
| 538 |
+
return getattr(op, fn)(_builder=self.builder)
|
| 539 |
+
return getattr(op, fn)()
|
| 540 |
+
|
| 541 |
+
def visit_While(self, node):
|
| 542 |
+
with enter_sub_region(self) as sr:
|
| 543 |
+
liveins, insert_block = sr
|
| 544 |
+
|
| 545 |
+
# loop body (the after region)
|
| 546 |
+
# loop_block = self.builder.create_block()
|
| 547 |
+
dummy = self.builder.create_block()
|
| 548 |
+
self.builder.set_insertion_point_to_start(dummy)
|
| 549 |
+
self.scf_stack.append(node)
|
| 550 |
+
self.visit_compound_statement(node.body)
|
| 551 |
+
self.scf_stack.pop()
|
| 552 |
+
loop_defs = self.local_defs
|
| 553 |
+
|
| 554 |
+
# collect loop-carried values
|
| 555 |
+
names = []
|
| 556 |
+
ret_types = []
|
| 557 |
+
init_args = []
|
| 558 |
+
for name in loop_defs:
|
| 559 |
+
if name in liveins:
|
| 560 |
+
# We should not def new constexpr
|
| 561 |
+
assert self.is_triton_tensor(loop_defs[name])
|
| 562 |
+
assert self.is_triton_tensor(liveins[name])
|
| 563 |
+
assert loop_defs[name].type == liveins[name].type
|
| 564 |
+
# these are loop-carried values
|
| 565 |
+
names.append(name)
|
| 566 |
+
ret_types.append(loop_defs[name].type)
|
| 567 |
+
init_args.append(liveins[name])
|
| 568 |
+
|
| 569 |
+
self.builder.set_insertion_point_to_end(insert_block)
|
| 570 |
+
while_op = self.builder.create_while_op([ty.to_ir(self.builder) for ty in ret_types],
|
| 571 |
+
[arg.handle for arg in init_args])
|
| 572 |
+
# merge the condition region
|
| 573 |
+
before_block = self.builder.create_block_with_parent(while_op.get_before(),
|
| 574 |
+
[ty.to_ir(self.builder) for ty in ret_types])
|
| 575 |
+
self.builder.set_insertion_point_to_start(before_block)
|
| 576 |
+
for i, name in enumerate(names):
|
| 577 |
+
self.lscope[name] = triton.language.core.tensor(before_block.arg(i), ret_types[i])
|
| 578 |
+
self.local_defs[name] = self.lscope[name]
|
| 579 |
+
cond = self.visit(node.test)
|
| 580 |
+
self.builder.set_insertion_point_to_end(before_block)
|
| 581 |
+
# create ConditionOp: e.g., scf.condition(%cond) %arg0, %arg1, ...
|
| 582 |
+
self.builder.create_condition_op(cond.handle, [before_block.arg(i) for i in range(len(init_args))])
|
| 583 |
+
# merge the loop body
|
| 584 |
+
after_block = self.builder.create_block_with_parent(while_op.get_after(),
|
| 585 |
+
[ty.to_ir(self.builder) for ty in ret_types])
|
| 586 |
+
|
| 587 |
+
# generate loop body
|
| 588 |
+
self.builder.set_insertion_point_to_start(after_block)
|
| 589 |
+
for i, name in enumerate(names):
|
| 590 |
+
self.lscope[name] = triton.language.core.tensor(after_block.arg(i), ret_types[i])
|
| 591 |
+
self.local_defs[name] = self.lscope[name]
|
| 592 |
+
self.scf_stack.append(node)
|
| 593 |
+
self.visit_compound_statement(node.body)
|
| 594 |
+
self.scf_stack.pop()
|
| 595 |
+
loop_defs = self.local_defs
|
| 596 |
+
yields = []
|
| 597 |
+
for name in loop_defs:
|
| 598 |
+
if name in liveins:
|
| 599 |
+
yields.append(loop_defs[name])
|
| 600 |
+
self.builder.create_yield_op([y.handle for y in yields])
|
| 601 |
+
|
| 602 |
+
# update global uses in while_op
|
| 603 |
+
for i, name in enumerate(names):
|
| 604 |
+
after_block.replace_use_in_block_with(init_args[i].handle, after_block.arg(i))
|
| 605 |
+
|
| 606 |
+
# WhileOp defines new values, update the symbol table (lscope, local_defs)
|
| 607 |
+
for i, name in enumerate(names):
|
| 608 |
+
new_def = triton.language.core.tensor(while_op.get_result(i), ret_types[i])
|
| 609 |
+
self.lscope[name] = new_def
|
| 610 |
+
self.local_defs[name] = new_def
|
| 611 |
+
|
| 612 |
+
for stmt in node.orelse:
|
| 613 |
+
assert False, "Not implemented"
|
| 614 |
+
ast.NodeVisitor.generic_visit(self, stmt)
|
| 615 |
+
|
| 616 |
+
def visit_Subscript(self, node):
|
| 617 |
+
assert node.ctx.__class__.__name__ == "Load"
|
| 618 |
+
lhs = self.visit(node.value)
|
| 619 |
+
slices = self.visit(node.slice)
|
| 620 |
+
if self.is_triton_tensor(lhs):
|
| 621 |
+
return lhs.__getitem__(slices, _builder=self.builder)
|
| 622 |
+
return lhs[slices]
|
| 623 |
+
|
| 624 |
+
def visit_ExtSlice(self, node):
|
| 625 |
+
return [self.visit(dim) for dim in node.dims]
|
| 626 |
+
|
| 627 |
+
def visit_For(self, node):
|
| 628 |
+
IteratorClass = self.visit(node.iter.func)
|
| 629 |
+
iter_args = [self.visit(arg) for arg in node.iter.args]
|
| 630 |
+
if IteratorClass == triton.language.static_range:
|
| 631 |
+
iterator = IteratorClass(*iter_args)
|
| 632 |
+
static_range = range(iterator.start.value,
|
| 633 |
+
iterator.end.value,
|
| 634 |
+
iterator.step.value)
|
| 635 |
+
for i in static_range:
|
| 636 |
+
self.lscope[node.target.id] = triton.language.constexpr(i)
|
| 637 |
+
self.visit_compound_statement(node.body)
|
| 638 |
+
for stmt in node.orelse:
|
| 639 |
+
ast.NodeVisitor.generic_visit(self, stmt)
|
| 640 |
+
return
|
| 641 |
+
|
| 642 |
+
if IteratorClass != self.builtins['range']:
|
| 643 |
+
raise RuntimeError('Only `range` and `static_range` iterators are currently supported')
|
| 644 |
+
|
| 645 |
+
# visit iterator arguments
|
| 646 |
+
# note: only `range` iterator is supported now
|
| 647 |
+
# collect lower bound (lb), upper bound (ub), and step
|
| 648 |
+
lb = iter_args[0] if len(iter_args) > 1 else self.visit(ast.Num(0))
|
| 649 |
+
ub = iter_args[1] if len(iter_args) > 1 else self.visit(node.iter.args[0])
|
| 650 |
+
step = iter_args[2] if len(iter_args) > 2 else self.visit(ast.Num(1))
|
| 651 |
+
# handle negative constant step (not supported by scf.for in MLIR)
|
| 652 |
+
negative_step = False
|
| 653 |
+
if isinstance(step, triton.language.constexpr) and step.value < 0:
|
| 654 |
+
step = triton.language.constexpr(-step.value)
|
| 655 |
+
negative_step = True
|
| 656 |
+
lb, ub = ub, lb
|
| 657 |
+
# lb/ub/step might be constexpr, we need to cast them to tensor
|
| 658 |
+
lb = triton.language.core._to_tensor(lb, self.builder).handle
|
| 659 |
+
ub = triton.language.core._to_tensor(ub, self.builder).handle
|
| 660 |
+
step = triton.language.core._to_tensor(step, self.builder).handle
|
| 661 |
+
# ForOp can only accept IndexType as lb/ub/step. Cast integer to Index
|
| 662 |
+
lb = self.builder.create_to_index(lb)
|
| 663 |
+
ub = self.builder.create_to_index(ub)
|
| 664 |
+
step = self.builder.create_to_index(step)
|
| 665 |
+
# Create placeholder for the loop induction variable
|
| 666 |
+
iv = self.builder.create_undef(self.builder.get_int32_ty())
|
| 667 |
+
self.set_value(node.target.id, triton.language.core.tensor(iv, triton.language.core.int32))
|
| 668 |
+
|
| 669 |
+
with enter_sub_region(self) as sr:
|
| 670 |
+
liveins, insert_block = sr
|
| 671 |
+
ip = self.builder.get_insertion_point()
|
| 672 |
+
|
| 673 |
+
# create loop body block
|
| 674 |
+
block = self.builder.create_block()
|
| 675 |
+
self.builder.set_insertion_point_to_start(block)
|
| 676 |
+
# dry visit loop body
|
| 677 |
+
self.scf_stack.append(node)
|
| 678 |
+
self.visit_compound_statement(node.body)
|
| 679 |
+
self.scf_stack.pop()
|
| 680 |
+
block.erase()
|
| 681 |
+
|
| 682 |
+
# If a variable (name) is defined in both its parent & itself, then it's
|
| 683 |
+
# a loop-carried variable. (They must be of the same type)
|
| 684 |
+
init_args = []
|
| 685 |
+
yields = []
|
| 686 |
+
names = []
|
| 687 |
+
for name in self.local_defs:
|
| 688 |
+
if name in liveins:
|
| 689 |
+
assert self.is_triton_tensor(self.local_defs[name]), f'{name} is not tensor'
|
| 690 |
+
assert self.is_triton_tensor(liveins[name])
|
| 691 |
+
assert self.local_defs[name].type == liveins[name].type,\
|
| 692 |
+
f'Loop-carried variable {name} has initial type {liveins[name].type} '\
|
| 693 |
+
f'but is re-assigned to {self.local_defs[name].type} in loop! '\
|
| 694 |
+
f'Please make sure that the type stays consistent.'
|
| 695 |
+
|
| 696 |
+
names.append(name)
|
| 697 |
+
init_args.append(triton.language.core._to_tensor(liveins[name], self.builder))
|
| 698 |
+
yields.append(triton.language.core._to_tensor(self.local_defs[name], self.builder))
|
| 699 |
+
|
| 700 |
+
# create ForOp
|
| 701 |
+
self.builder.restore_insertion_point(ip)
|
| 702 |
+
for_op = self.builder.create_for_op(lb, ub, step, [arg.handle for arg in init_args])
|
| 703 |
+
|
| 704 |
+
self.scf_stack.append(node)
|
| 705 |
+
self.builder.set_insertion_point_to_start(for_op.get_body(0))
|
| 706 |
+
for i, name in enumerate(names):
|
| 707 |
+
self.set_value(name, triton.language.core.tensor(for_op.get_body(0).arg(i + 1), yields[i].type))
|
| 708 |
+
self.visit_compound_statement(node.body)
|
| 709 |
+
self.scf_stack.pop()
|
| 710 |
+
yields = []
|
| 711 |
+
for name in self.local_defs:
|
| 712 |
+
if name in liveins:
|
| 713 |
+
yields.append(triton.language.core._to_tensor(self.local_defs[name], self.builder))
|
| 714 |
+
|
| 715 |
+
# create YieldOp
|
| 716 |
+
if len(yields) > 0:
|
| 717 |
+
self.builder.create_yield_op([y.handle for y in yields])
|
| 718 |
+
for_op_region = for_op.get_body(0).get_parent()
|
| 719 |
+
assert for_op_region.size() == 1, "We use SCF, so the loop body should only have one block"
|
| 720 |
+
|
| 721 |
+
# update induction variable with actual value, and replace all uses
|
| 722 |
+
self.builder.set_insertion_point_to_start(for_op.get_body(0))
|
| 723 |
+
iv = self.builder.create_index_to_si(for_op.get_induction_var())
|
| 724 |
+
if negative_step:
|
| 725 |
+
ub_si = self.builder.create_index_to_si(ub)
|
| 726 |
+
iv = self.builder.create_sub(ub_si, iv)
|
| 727 |
+
self.lscope[node.target.id].handle.replace_all_uses_with(iv)
|
| 728 |
+
self.set_value(node.target.id, triton.language.core.tensor(iv, triton.language.core.int32))
|
| 729 |
+
|
| 730 |
+
# update lscope & local_defs (ForOp defines new values)
|
| 731 |
+
for i, name in enumerate(names):
|
| 732 |
+
self.set_value(name, triton.language.core.tensor(for_op.get_result(i), yields[i].type))
|
| 733 |
+
|
| 734 |
+
for stmt in node.orelse:
|
| 735 |
+
assert False, "Don't know what to do with else after for"
|
| 736 |
+
ast.NodeVisitor.generic_visit(self, stmt)
|
| 737 |
+
|
| 738 |
+
def visit_Slice(self, node):
|
| 739 |
+
lower = self.visit(node.lower)
|
| 740 |
+
upper = self.visit(node.upper)
|
| 741 |
+
step = self.visit(node.step)
|
| 742 |
+
return slice(lower, upper, step)
|
| 743 |
+
|
| 744 |
+
def visit_Index(self, node):
|
| 745 |
+
return self.visit(node.value)
|
| 746 |
+
|
| 747 |
+
def visit_keyword(self, node):
|
| 748 |
+
return {node.arg: self.visit(node.value)}
|
| 749 |
+
|
| 750 |
+
def visit_Call(self, node):
|
| 751 |
+
fn = self.visit(node.func)
|
| 752 |
+
if isinstance(fn, triton.language.constexpr):
|
| 753 |
+
fn = fn.value
|
| 754 |
+
kws = dict()
|
| 755 |
+
for keyword in node.keywords:
|
| 756 |
+
kws.update(self.visit(keyword))
|
| 757 |
+
args = [self.visit(arg) for arg in node.args]
|
| 758 |
+
if isinstance(fn, triton.runtime.JITFunction):
|
| 759 |
+
from inspect import getcallargs
|
| 760 |
+
args = getcallargs(fn.fn, *args, **kws)
|
| 761 |
+
args = [args[name] for name in fn.arg_names]
|
| 762 |
+
args = [arg if isinstance(arg, triton.language.tensor)
|
| 763 |
+
else triton.language.constexpr(arg) for arg in args]
|
| 764 |
+
# generate function def
|
| 765 |
+
attributes = dict()
|
| 766 |
+
constexprs = [i for i, arg in enumerate(args) if isinstance(arg, triton.language.constexpr)]
|
| 767 |
+
constants = {i: args[i] for i in constexprs}
|
| 768 |
+
# generate call
|
| 769 |
+
args = [None if i in constexprs else arg for i, arg in enumerate(args)]
|
| 770 |
+
arg_vals = [arg.handle for arg in args if arg is not None]
|
| 771 |
+
arg_types = [arg.type for arg in args if arg is not None]
|
| 772 |
+
fn_name = mangle_fn(fn.__name__, arg_types, constants)
|
| 773 |
+
# generate function def if necessary
|
| 774 |
+
if not self.module.has_function(fn_name):
|
| 775 |
+
prototype = triton.language.function_type([], arg_types)
|
| 776 |
+
gscope = sys.modules[fn.fn.__module__].__dict__
|
| 777 |
+
generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, module=self.module, function_name=fn_name, function_types=self.function_ret_types)
|
| 778 |
+
generator.visit(fn.parse())
|
| 779 |
+
callee_ret_type = generator.last_ret_type
|
| 780 |
+
self.function_ret_types[fn_name] = callee_ret_type
|
| 781 |
+
else:
|
| 782 |
+
callee_ret_type = self.function_ret_types[fn_name]
|
| 783 |
+
symbol = self.module.get_function(fn_name)
|
| 784 |
+
call_op = self.builder.call(symbol, arg_vals)
|
| 785 |
+
if call_op.get_num_results() == 0 or callee_ret_type is None:
|
| 786 |
+
return None
|
| 787 |
+
elif call_op.get_num_results() == 1:
|
| 788 |
+
return triton.language.tensor(call_op.get_result(0), callee_ret_type)
|
| 789 |
+
else:
|
| 790 |
+
# should return a tuple of tl.tensor
|
| 791 |
+
results = []
|
| 792 |
+
for i in range(call_op.get_num_results()):
|
| 793 |
+
results.append(triton.language.tensor(call_op.get_result(i), callee_ret_type[i]))
|
| 794 |
+
return tuple(results)
|
| 795 |
+
if (hasattr(fn, '__self__') and self.is_triton_tensor(fn.__self__)) \
|
| 796 |
+
or impl.is_builtin(fn):
|
| 797 |
+
return fn(*args, _builder=self.builder, **kws)
|
| 798 |
+
if fn in self.builtins.values():
|
| 799 |
+
args = [arg.value if isinstance(arg, triton.language.constexpr) else arg
|
| 800 |
+
for arg in args]
|
| 801 |
+
return fn(*args, **kws)
|
| 802 |
+
|
| 803 |
+
def visit_Constant(self, node):
|
| 804 |
+
return triton.language.constexpr(node.value)
|
| 805 |
+
|
| 806 |
+
def visit_BoolOp(self, node: ast.BoolOp):
|
| 807 |
+
assert len(node.values) == 2
|
| 808 |
+
lhs = self.visit(node.values[0])
|
| 809 |
+
rhs = self.visit(node.values[1])
|
| 810 |
+
|
| 811 |
+
fn = {
|
| 812 |
+
ast.And: 'logical_and',
|
| 813 |
+
ast.Or: 'logical_or',
|
| 814 |
+
}[type(node.op)]
|
| 815 |
+
|
| 816 |
+
if self.is_triton_tensor(lhs):
|
| 817 |
+
return getattr(lhs, fn)(rhs, _builder=self.builder)
|
| 818 |
+
elif self.is_triton_tensor(rhs):
|
| 819 |
+
fn = fn[:2] + 'r' + fn[2:]
|
| 820 |
+
return getattr(rhs, fn)(lhs, _builder=self.builder)
|
| 821 |
+
else:
|
| 822 |
+
return getattr(lhs, fn)(rhs)
|
| 823 |
+
|
| 824 |
+
if sys.version_info < (3, 8):
|
| 825 |
+
def visit_NameConstant(self, node):
|
| 826 |
+
return triton.language.constexpr(node.value)
|
| 827 |
+
|
| 828 |
+
def visit_Num(self, node):
|
| 829 |
+
return triton.language.constexpr(node.n)
|
| 830 |
+
|
| 831 |
+
def visit_Str(self, node):
|
| 832 |
+
return triton.language.constexpr(ast.literal_eval(node))
|
| 833 |
+
|
| 834 |
+
def visit_Attribute(self, node):
|
| 835 |
+
lhs = self.visit(node.value)
|
| 836 |
+
if isinstance(lhs, triton.language.tensor):
|
| 837 |
+
if node.attr == "T":
|
| 838 |
+
return triton.language.semantic.trans(lhs, builder=self.builder)
|
| 839 |
+
return getattr(lhs, node.attr)
|
| 840 |
+
|
| 841 |
+
def visit_Expr(self, node):
|
| 842 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 843 |
+
|
| 844 |
+
def visit_NoneType(self, node):
|
| 845 |
+
return None
|
| 846 |
+
|
| 847 |
+
def visit(self, node):
|
| 848 |
+
if node is not None:
|
| 849 |
+
self.last_node = node
|
| 850 |
+
with warnings.catch_warnings():
|
| 851 |
+
# The ast library added visit_Constant and deprecated some other
|
| 852 |
+
# methods but we can't move to that without breaking Python 3.6 and 3.7.
|
| 853 |
+
warnings.simplefilter("ignore", DeprecationWarning) # python 3.9
|
| 854 |
+
warnings.simplefilter("ignore", PendingDeprecationWarning) # python 3.8
|
| 855 |
+
return super().visit(node)
|
| 856 |
+
|
| 857 |
+
def generic_visit(self, node):
|
| 858 |
+
typename = type(node).__name__
|
| 859 |
+
raise NotImplementedError("Unsupported node: {}".format(typename))
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
class CompilationError(Exception):
|
| 863 |
+
def __init__(self, src, node):
|
| 864 |
+
self.message = f'at {node.lineno}:{node.col_offset}:\n'
|
| 865 |
+
self.message += '\n'.join(src.split('\n')[:node.lineno])
|
| 866 |
+
self.message += '\n' + ' ' * node.col_offset + '^'
|
| 867 |
+
self.src = src
|
| 868 |
+
self.node = node
|
| 869 |
+
super().__init__(self.message)
|
| 870 |
+
|
| 871 |
+
def __reduce__(self):
|
| 872 |
+
# this is necessary to make CompilationError picklable
|
| 873 |
+
return (type(self), (self.src, self.node))
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
class OutOfResources(Exception):
|
| 877 |
+
def __init__(self, required, limit, name):
|
| 878 |
+
self.message = f'out of resource: {name}, '\
|
| 879 |
+
f'Required: {required}, '\
|
| 880 |
+
f'Hardware limit: {limit}'
|
| 881 |
+
self.message += '. Reducing block sizes or `num_stages` may help.'
|
| 882 |
+
self.required = required
|
| 883 |
+
self.limit = limit
|
| 884 |
+
self.name = name
|
| 885 |
+
super().__init__(self.message)
|
| 886 |
+
|
| 887 |
+
def __reduce__(self):
|
| 888 |
+
# this is necessary to make CompilationError picklable
|
| 889 |
+
return (type(self), (self.required, self.limit, self.name))
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
def kernel_suffix(signature, specialization):
|
| 893 |
+
# suffix format:
|
| 894 |
+
# <argid><'c' if equal to 1><'d' if divisible by 16>
|
| 895 |
+
suffix = ''
|
| 896 |
+
for i, _ in enumerate(signature):
|
| 897 |
+
suffix += str(i)
|
| 898 |
+
if i in specialization.equal_to_1:
|
| 899 |
+
suffix += 'c'
|
| 900 |
+
if i in specialization.divisible_by_16:
|
| 901 |
+
suffix += 'd'
|
| 902 |
+
return suffix
|
| 903 |
+
|
| 904 |
+
# ------------------------------------------------------------------------------
|
| 905 |
+
# ------------------------------------------------------------------------------
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
def parse_mlir_module(path, context):
|
| 909 |
+
module = _triton.ir.parse_mlir_module(path, context)
|
| 910 |
+
# module takes ownership of the context
|
| 911 |
+
module.context = context
|
| 912 |
+
return module
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
def build_triton_ir(fn, signature, specialization, constants):
|
| 916 |
+
# canonicalize signature
|
| 917 |
+
if isinstance(signature, str):
|
| 918 |
+
signature = {k: v.strip() for k, v in enumerate(signature.split(","))}
|
| 919 |
+
context = _triton.ir.context()
|
| 920 |
+
context.load_triton()
|
| 921 |
+
# create kernel prototype
|
| 922 |
+
cst_key = lambda i: fn.arg_names.index(i) if isinstance(i, str) else i
|
| 923 |
+
constants = {cst_key(key): value for key, value in constants.items()}
|
| 924 |
+
# visit kernel AST
|
| 925 |
+
gscope = fn.__globals__.copy()
|
| 926 |
+
function_name = '_'.join([fn.__name__, kernel_suffix(signature.values(), specialization)])
|
| 927 |
+
tys = list(signature.values())
|
| 928 |
+
new_constants = {k: True if k in tys and tys[k] == "i1" else 1 for k in specialization.equal_to_1}
|
| 929 |
+
new_attrs = {k: ("multiple_of", 16) for k in specialization.divisible_by_16}
|
| 930 |
+
all_constants = constants.copy()
|
| 931 |
+
all_constants.update(new_constants)
|
| 932 |
+
arg_types = [str_to_ty(v) for k, v in signature.items() if k not in constants]
|
| 933 |
+
|
| 934 |
+
prototype = triton.language.function_type([], arg_types)
|
| 935 |
+
generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants, function_name=function_name, attributes=new_attrs, is_kernel=True)
|
| 936 |
+
try:
|
| 937 |
+
generator.visit(fn.parse())
|
| 938 |
+
except Exception as e:
|
| 939 |
+
node = generator.last_node
|
| 940 |
+
if node is None or isinstance(e, (NotImplementedError, CompilationError)):
|
| 941 |
+
raise e
|
| 942 |
+
raise CompilationError(fn.src, node) from e
|
| 943 |
+
ret = generator.module
|
| 944 |
+
# module takes ownership of the context
|
| 945 |
+
ret.context = context
|
| 946 |
+
return ret, generator
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
def optimize_triton_ir(mod):
|
| 950 |
+
pm = _triton.ir.pass_manager(mod.context)
|
| 951 |
+
pm.enable_debug()
|
| 952 |
+
pm.add_inliner_pass()
|
| 953 |
+
pm.add_triton_combine_pass()
|
| 954 |
+
pm.add_canonicalizer_pass()
|
| 955 |
+
pm.add_cse_pass()
|
| 956 |
+
pm.add_licm_pass()
|
| 957 |
+
pm.run(mod)
|
| 958 |
+
return mod
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
def ast_to_ttir(fn, signature, specialization, constants):
|
| 962 |
+
mod, _ = build_triton_ir(fn, signature, specialization, constants)
|
| 963 |
+
return optimize_triton_ir(mod)
|
| 964 |
+
|
| 965 |
+
|
| 966 |
+
def ttir_to_ttgir(mod, num_warps, num_stages, compute_capability):
|
| 967 |
+
pm = _triton.ir.pass_manager(mod.context)
|
| 968 |
+
pm.add_convert_triton_to_tritongpu_pass(num_warps)
|
| 969 |
+
pm.enable_debug()
|
| 970 |
+
pm.add_coalesce_pass()
|
| 971 |
+
# The combine pass converts blocked layout to mma layout
|
| 972 |
+
# for dot ops so that pipeline can get shared memory swizzled correctly.
|
| 973 |
+
pm.add_tritongpu_combine_pass(compute_capability)
|
| 974 |
+
pm.add_tritongpu_pipeline_pass(num_stages)
|
| 975 |
+
# Prefetch must be done after pipeline pass because pipeline pass
|
| 976 |
+
# extracts slices from the original tensor.
|
| 977 |
+
pm.add_tritongpu_prefetch_pass()
|
| 978 |
+
pm.add_canonicalizer_pass()
|
| 979 |
+
pm.add_cse_pass()
|
| 980 |
+
pm.add_tritongpu_combine_pass(compute_capability)
|
| 981 |
+
pm.add_licm_pass()
|
| 982 |
+
pm.add_tritongpu_combine_pass(compute_capability)
|
| 983 |
+
pm.add_cse_pass()
|
| 984 |
+
pm.add_tritongpu_decompose_conversions_pass()
|
| 985 |
+
if compute_capability // 10 == 7:
|
| 986 |
+
# The update_mma_for_volta pass helps to compute some information for MMA encoding specifically for MMAv1
|
| 987 |
+
# NOTE this pass should be placed after all the passes those modifies mma layout
|
| 988 |
+
pm.add_tritongpu_update_mma_for_volta_pass()
|
| 989 |
+
pm.add_cse_pass()
|
| 990 |
+
pm.add_symbol_dce_pass()
|
| 991 |
+
pm.add_tritongpu_reorder_instructions_pass()
|
| 992 |
+
pm.run(mod)
|
| 993 |
+
return mod
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
def add_external_libs(mod, libs):
|
| 997 |
+
for name, path in libs.items():
|
| 998 |
+
if len(name) == 0 or len(path) == 0:
|
| 999 |
+
return
|
| 1000 |
+
_triton.add_external_libs(mod, list(libs.keys()), list(libs.values()))
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
def ttgir_to_llir(mod, extern_libs, compute_capability):
|
| 1004 |
+
if extern_libs:
|
| 1005 |
+
add_external_libs(mod, extern_libs)
|
| 1006 |
+
return _triton.translate_triton_gpu_to_llvmir(mod, compute_capability)
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
def llir_to_ptx(mod: Any, compute_capability: int, ptx_version: int = None) -> Tuple[str, int]:
|
| 1010 |
+
'''
|
| 1011 |
+
Translate TritonGPU module to PTX code.
|
| 1012 |
+
:param mod: a TritonGPU dialect module
|
| 1013 |
+
:return:
|
| 1014 |
+
- PTX code
|
| 1015 |
+
- shared memory allocation size
|
| 1016 |
+
'''
|
| 1017 |
+
if ptx_version is None:
|
| 1018 |
+
_, cuda_version = path_to_ptxas()
|
| 1019 |
+
ptx_version = ptx_get_version(cuda_version)
|
| 1020 |
+
return _triton.translate_llvmir_to_ptx(mod, compute_capability, ptx_version)
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
def ptx_to_cubin(ptx: str, compute_capability: int):
|
| 1024 |
+
'''
|
| 1025 |
+
Compile TritonGPU module to cubin.
|
| 1026 |
+
:param ptx: ptx code
|
| 1027 |
+
:param compute_capability: compute capability
|
| 1028 |
+
:return: str
|
| 1029 |
+
'''
|
| 1030 |
+
ptxas, _ = path_to_ptxas()
|
| 1031 |
+
return _triton.compile_ptx_to_cubin(ptx, ptxas, compute_capability)
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
def ptx_get_kernel_name(ptx: str) -> str:
|
| 1035 |
+
'''
|
| 1036 |
+
Get kernel name from PTX code.
|
| 1037 |
+
This Kernel name is required when launching the kernel.
|
| 1038 |
+
'''
|
| 1039 |
+
# There is a name mangling in PTX codegen, so the original kernel names in Triton IR are not available in PTX/cubin.
|
| 1040 |
+
assert ptx
|
| 1041 |
+
for line in ptx.split('\n'):
|
| 1042 |
+
line = line.strip()
|
| 1043 |
+
if line.startswith('// .globl'):
|
| 1044 |
+
return line.split()[-1]
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
@functools.lru_cache
|
| 1048 |
+
def ptx_get_version(cuda_version) -> int:
|
| 1049 |
+
'''
|
| 1050 |
+
Get the highest PTX version supported by the current CUDA driver.
|
| 1051 |
+
'''
|
| 1052 |
+
assert isinstance(cuda_version, str)
|
| 1053 |
+
major, minor = map(int, cuda_version.split('.'))
|
| 1054 |
+
if major == 12:
|
| 1055 |
+
return 80 + minor
|
| 1056 |
+
if major == 11:
|
| 1057 |
+
return 70 + minor
|
| 1058 |
+
if major == 10:
|
| 1059 |
+
return 63 + minor
|
| 1060 |
+
raise RuntimeError("Triton only support CUDA 10.0 or higher")
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
def path_to_ptxas():
|
| 1064 |
+
base_dir = os.path.dirname(__file__)
|
| 1065 |
+
paths = [
|
| 1066 |
+
os.environ.get("TRITON_PTXAS_PATH", ""),
|
| 1067 |
+
os.path.join(base_dir, "third_party", "cuda", "bin", "ptxas")
|
| 1068 |
+
]
|
| 1069 |
+
|
| 1070 |
+
for ptxas in paths:
|
| 1071 |
+
if os.path.exists(ptxas) and os.path.isfile(ptxas):
|
| 1072 |
+
result = subprocess.check_output([ptxas, "--version"], stderr=subprocess.STDOUT)
|
| 1073 |
+
if result is not None:
|
| 1074 |
+
version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE)
|
| 1075 |
+
if version is not None:
|
| 1076 |
+
return ptxas, version.group(1)
|
| 1077 |
+
raise RuntimeError("Cannot find ptxas")
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
instance_descriptor = namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"], defaults=[set(), set()])
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
# ------------------------------------------------------------------------------
|
| 1084 |
+
# compiler
|
| 1085 |
+
# ------------------------------------------------------------------------------
|
| 1086 |
+
|
| 1087 |
+
|
| 1088 |
+
def ty_to_cpp(ty):
|
| 1089 |
+
if ty[0] == '*':
|
| 1090 |
+
return "CUdeviceptr"
|
| 1091 |
+
return {
|
| 1092 |
+
"i1": "int32_t",
|
| 1093 |
+
"i8": "int8_t",
|
| 1094 |
+
"i16": "int16_t",
|
| 1095 |
+
"i32": "int32_t",
|
| 1096 |
+
"i64": "int64_t",
|
| 1097 |
+
"u32": "uint32_t",
|
| 1098 |
+
"u64": "uint64_t",
|
| 1099 |
+
"fp16": "float",
|
| 1100 |
+
"bf16": "float",
|
| 1101 |
+
"fp32": "float",
|
| 1102 |
+
"f32": "float",
|
| 1103 |
+
"fp64": "double",
|
| 1104 |
+
}[ty]
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
def generate_name_initializer(signature):
|
| 1108 |
+
src = "int i = 0;\n"
|
| 1109 |
+
tys = signature.split(',')
|
| 1110 |
+
for i, ty in enumerate(tys):
|
| 1111 |
+
src
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
def binary_name_to_header_name(name):
|
| 1115 |
+
if len(name) > 128:
|
| 1116 |
+
# avoid filename too long errors (filename limit is 255)
|
| 1117 |
+
name = "kernel_" + hashlib.sha256(name.encode("utf-8")).hexdigest()
|
| 1118 |
+
return f"{name}.h"
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
def generate_launcher(constants, signature):
|
| 1122 |
+
arg_decls = ', '.join(f"{ty_to_cpp(ty)} arg{i}" for i, ty in signature.items())
|
| 1123 |
+
|
| 1124 |
+
def _extracted_type(ty):
|
| 1125 |
+
if ty[0] == '*':
|
| 1126 |
+
return "PyObject*"
|
| 1127 |
+
return {
|
| 1128 |
+
'i1': 'int32_t',
|
| 1129 |
+
'i32': 'int32_t',
|
| 1130 |
+
'i64': 'int64_t',
|
| 1131 |
+
'u32': 'uint32_t',
|
| 1132 |
+
'u64': 'uint64_t',
|
| 1133 |
+
'fp16': 'float',
|
| 1134 |
+
'bf16': 'float',
|
| 1135 |
+
'fp32': 'float',
|
| 1136 |
+
'f32': 'float',
|
| 1137 |
+
'fp64': 'double',
|
| 1138 |
+
}[ty]
|
| 1139 |
+
|
| 1140 |
+
def format_of(ty):
|
| 1141 |
+
return {
|
| 1142 |
+
"PyObject*": "O",
|
| 1143 |
+
"float": "f",
|
| 1144 |
+
"double": "d",
|
| 1145 |
+
"long": "l",
|
| 1146 |
+
"uint32_t": "I",
|
| 1147 |
+
"int32_t": "i",
|
| 1148 |
+
"uint64_t": "K",
|
| 1149 |
+
"int64_t": "L",
|
| 1150 |
+
}[ty]
|
| 1151 |
+
|
| 1152 |
+
format = "iiiiiKKOOO" + ''.join([format_of(_extracted_type(ty)) for ty in signature.values()])
|
| 1153 |
+
|
| 1154 |
+
# generate glue code
|
| 1155 |
+
src = f"""
|
| 1156 |
+
#include \"cuda.h\"
|
| 1157 |
+
#include <stdbool.h>
|
| 1158 |
+
#include <Python.h>
|
| 1159 |
+
|
| 1160 |
+
static inline void gpuAssert(CUresult code, const char *file, int line)
|
| 1161 |
+
{{
|
| 1162 |
+
if (code != CUDA_SUCCESS)
|
| 1163 |
+
{{
|
| 1164 |
+
const char* prefix = "Triton Error [CUDA]: ";
|
| 1165 |
+
const char* str;
|
| 1166 |
+
cuGetErrorString(code, &str);
|
| 1167 |
+
char err[1024] = {{0}};
|
| 1168 |
+
strcat(err, prefix);
|
| 1169 |
+
strcat(err, str);
|
| 1170 |
+
PyErr_SetString(PyExc_RuntimeError, err);
|
| 1171 |
+
}}
|
| 1172 |
+
}}
|
| 1173 |
+
|
| 1174 |
+
#define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }}
|
| 1175 |
+
|
| 1176 |
+
void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, CUstream stream, CUfunction function, {arg_decls}) {{
|
| 1177 |
+
void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }};
|
| 1178 |
+
if(gridX*gridY*gridZ > 0){{
|
| 1179 |
+
CUDA_CHECK(cuLaunchKernel(function, gridX, gridY, gridZ, 32*num_warps, 1, 1, shared_memory, stream, params, 0));
|
| 1180 |
+
}}
|
| 1181 |
+
}}
|
| 1182 |
+
|
| 1183 |
+
typedef struct _DevicePtrInfo {{
|
| 1184 |
+
CUdeviceptr dev_ptr;
|
| 1185 |
+
bool valid;
|
| 1186 |
+
}} DevicePtrInfo;
|
| 1187 |
+
|
| 1188 |
+
static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
|
| 1189 |
+
DevicePtrInfo ptr_info;
|
| 1190 |
+
ptr_info.dev_ptr = 0;
|
| 1191 |
+
ptr_info.valid = true;
|
| 1192 |
+
if (PyLong_Check(obj)) {{
|
| 1193 |
+
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(obj);
|
| 1194 |
+
return ptr_info;
|
| 1195 |
+
}}
|
| 1196 |
+
if (obj == Py_None) {{
|
| 1197 |
+
// valid nullptr
|
| 1198 |
+
return ptr_info;
|
| 1199 |
+
}}
|
| 1200 |
+
PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
|
| 1201 |
+
if(ptr){{
|
| 1202 |
+
PyObject *empty_tuple = PyTuple_New(0);
|
| 1203 |
+
PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
|
| 1204 |
+
Py_DECREF(empty_tuple);
|
| 1205 |
+
Py_DECREF(ptr);
|
| 1206 |
+
if (!PyLong_Check(ret)) {{
|
| 1207 |
+
PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
|
| 1208 |
+
ptr_info.valid = false;
|
| 1209 |
+
return ptr_info;
|
| 1210 |
+
}}
|
| 1211 |
+
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(ret);
|
| 1212 |
+
unsigned attr;
|
| 1213 |
+
CUresult status =
|
| 1214 |
+
cuPointerGetAttribute(&attr, CU_POINTER_ATTRIBUTE_MEMORY_TYPE, ptr_info.dev_ptr);
|
| 1215 |
+
if (ptr_info.dev_ptr &&
|
| 1216 |
+
(!(attr == CU_MEMORYTYPE_DEVICE || attr == CU_MEMORYTYPE_UNIFIED) ||
|
| 1217 |
+
!(status == CUDA_SUCCESS))) {{
|
| 1218 |
+
PyErr_Format(PyExc_ValueError,
|
| 1219 |
+
"Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
|
| 1220 |
+
ptr_info.valid = false;
|
| 1221 |
+
}}
|
| 1222 |
+
Py_DECREF(ret); // Thanks ChatGPT!
|
| 1223 |
+
return ptr_info;
|
| 1224 |
+
}}
|
| 1225 |
+
PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
|
| 1226 |
+
return ptr_info;
|
| 1227 |
+
}}
|
| 1228 |
+
|
| 1229 |
+
static PyObject* launch(PyObject* self, PyObject* args) {{
|
| 1230 |
+
int gridX, gridY, gridZ;
|
| 1231 |
+
uint64_t _stream;
|
| 1232 |
+
uint64_t _function;
|
| 1233 |
+
int num_warps;
|
| 1234 |
+
int shared_memory;
|
| 1235 |
+
PyObject *launch_enter_hook = NULL;
|
| 1236 |
+
PyObject *launch_exit_hook = NULL;
|
| 1237 |
+
PyObject *compiled_kernel = NULL;
|
| 1238 |
+
PyObject *hook_ret = NULL;
|
| 1239 |
+
{' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
|
| 1240 |
+
if(!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel, {', '.join(f"&_arg{i}" for i, ty in signature.items())})) {{
|
| 1241 |
+
return NULL;
|
| 1242 |
+
}}
|
| 1243 |
+
|
| 1244 |
+
if (launch_enter_hook != Py_None) {{
|
| 1245 |
+
PyObject *new_args = PyTuple_Pack(1, compiled_kernel);
|
| 1246 |
+
hook_ret = PyObject_CallObject(launch_enter_hook, new_args);
|
| 1247 |
+
Py_DECREF(new_args);
|
| 1248 |
+
}}
|
| 1249 |
+
|
| 1250 |
+
|
| 1251 |
+
// raise exception asap
|
| 1252 |
+
{"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
|
| 1253 |
+
_launch(gridX, gridY, gridZ, num_warps, shared_memory, (CUstream)_stream, (CUfunction)_function, {', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}"for i, ty in signature.items())});
|
| 1254 |
+
|
| 1255 |
+
if (launch_exit_hook != Py_None) {{
|
| 1256 |
+
PyObject *new_args = NULL;
|
| 1257 |
+
if (hook_ret) {{
|
| 1258 |
+
new_args = PyTuple_Pack(2, compiled_kernel, hook_ret);
|
| 1259 |
+
}} else {{
|
| 1260 |
+
new_args = PyTuple_Pack(1, compiled_kernel);
|
| 1261 |
+
}}
|
| 1262 |
+
hook_ret = PyObject_CallObject(launch_exit_hook, new_args);
|
| 1263 |
+
Py_DECREF(new_args);
|
| 1264 |
+
}}
|
| 1265 |
+
|
| 1266 |
+
if (hook_ret) {{
|
| 1267 |
+
Py_DECREF(hook_ret);
|
| 1268 |
+
}}
|
| 1269 |
+
if(PyErr_Occurred()) {{
|
| 1270 |
+
return NULL;
|
| 1271 |
+
}}
|
| 1272 |
+
// return None
|
| 1273 |
+
Py_INCREF(Py_None);
|
| 1274 |
+
return Py_None;
|
| 1275 |
+
}}
|
| 1276 |
+
|
| 1277 |
+
static PyMethodDef ModuleMethods[] = {{
|
| 1278 |
+
{{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}},
|
| 1279 |
+
{{NULL, NULL, 0, NULL}} // sentinel
|
| 1280 |
+
}};
|
| 1281 |
+
|
| 1282 |
+
static struct PyModuleDef ModuleDef = {{
|
| 1283 |
+
PyModuleDef_HEAD_INIT,
|
| 1284 |
+
\"__triton_launcher\",
|
| 1285 |
+
NULL, //documentation
|
| 1286 |
+
-1, //size
|
| 1287 |
+
ModuleMethods
|
| 1288 |
+
}};
|
| 1289 |
+
|
| 1290 |
+
PyMODINIT_FUNC PyInit___triton_launcher(void) {{
|
| 1291 |
+
PyObject *m = PyModule_Create(&ModuleDef);
|
| 1292 |
+
if(m == NULL) {{
|
| 1293 |
+
return NULL;
|
| 1294 |
+
}}
|
| 1295 |
+
PyModule_AddFunctions(m, ModuleMethods);
|
| 1296 |
+
return m;
|
| 1297 |
+
}}
|
| 1298 |
+
"""
|
| 1299 |
+
|
| 1300 |
+
return src
|
| 1301 |
+
|
| 1302 |
+
|
| 1303 |
+
def default_cache_dir():
|
| 1304 |
+
return os.path.join(os.environ["HOME"], ".triton", "cache")
|
| 1305 |
+
|
| 1306 |
+
|
| 1307 |
+
def default_cuda_dir():
|
| 1308 |
+
default_dir = "/usr/local/cuda"
|
| 1309 |
+
return os.getenv("CUDA_HOME", default=default_dir)
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
class CacheManager:
|
| 1313 |
+
|
| 1314 |
+
def __init__(self, key):
|
| 1315 |
+
self.key = key
|
| 1316 |
+
self.lock_path = None
|
| 1317 |
+
# create cache directory if it doesn't exist
|
| 1318 |
+
self.cache_dir = os.environ.get('TRITON_CACHE_DIR', default_cache_dir())
|
| 1319 |
+
if self.cache_dir:
|
| 1320 |
+
self.cache_dir = os.path.join(self.cache_dir, self.key)
|
| 1321 |
+
self.lock_path = os.path.join(self.cache_dir, "lock")
|
| 1322 |
+
os.makedirs(self.cache_dir, exist_ok=True)
|
| 1323 |
+
|
| 1324 |
+
def _make_path(self, filename):
|
| 1325 |
+
return os.path.join(self.cache_dir, filename)
|
| 1326 |
+
|
| 1327 |
+
def has_file(self, filename):
|
| 1328 |
+
if not self.cache_dir:
|
| 1329 |
+
return False
|
| 1330 |
+
return os.path.exists(self._make_path(filename))
|
| 1331 |
+
|
| 1332 |
+
def put(self, data, filename, binary=True):
|
| 1333 |
+
if not self.cache_dir:
|
| 1334 |
+
return
|
| 1335 |
+
binary = isinstance(data, bytes)
|
| 1336 |
+
if not binary:
|
| 1337 |
+
data = str(data)
|
| 1338 |
+
assert self.lock_path is not None
|
| 1339 |
+
filepath = self._make_path(filename)
|
| 1340 |
+
with FileLock(self.lock_path):
|
| 1341 |
+
# use tempfile to be robust against program interruptions
|
| 1342 |
+
mode = "wb" if binary else "w"
|
| 1343 |
+
with open(filepath + ".tmp", mode) as f:
|
| 1344 |
+
f.write(data)
|
| 1345 |
+
os.rename(filepath + ".tmp", filepath)
|
| 1346 |
+
|
| 1347 |
+
|
| 1348 |
+
# Utilities for generating and compiling C wrappers
|
| 1349 |
+
|
| 1350 |
+
|
| 1351 |
+
@functools.lru_cache()
|
| 1352 |
+
def libcuda_dirs():
|
| 1353 |
+
locs = subprocess.check_output(["whereis", "libcuda.so"]).decode().strip().split()[1:]
|
| 1354 |
+
return [os.path.dirname(loc) for loc in locs]
|
| 1355 |
+
|
| 1356 |
+
|
| 1357 |
+
@contextlib.contextmanager
|
| 1358 |
+
def quiet():
|
| 1359 |
+
old_stdout, old_stderr = sys.stdout, sys.stderr
|
| 1360 |
+
sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
|
| 1361 |
+
try:
|
| 1362 |
+
yield
|
| 1363 |
+
finally:
|
| 1364 |
+
sys.stdout, sys.stderr = old_stdout, old_stderr
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
def _build(name, src, srcdir):
|
| 1368 |
+
cuda_lib_dirs = libcuda_dirs()
|
| 1369 |
+
cuda_path = os.environ.get('CUDA_PATH', default_cuda_dir())
|
| 1370 |
+
cu_include_dir = os.path.join(cuda_path, "include")
|
| 1371 |
+
base_dir = os.path.dirname(__file__)
|
| 1372 |
+
triton_include_dir = os.path.join(base_dir, "third_party/cuda/include")
|
| 1373 |
+
cuda_header = os.path.join(cu_include_dir, "cuda.h")
|
| 1374 |
+
triton_cuda_header = os.path.join(triton_include_dir, "cuda.h")
|
| 1375 |
+
if not os.path.exists(cuda_header) and os.path.exists(triton_cuda_header):
|
| 1376 |
+
cu_include_dir = triton_include_dir
|
| 1377 |
+
suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
| 1378 |
+
so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix))
|
| 1379 |
+
# try to avoid setuptools if possible
|
| 1380 |
+
cc = os.environ.get("CC")
|
| 1381 |
+
if cc is None:
|
| 1382 |
+
# TODO: support more things here.
|
| 1383 |
+
clang = shutil.which("clang")
|
| 1384 |
+
gcc = shutil.which("gcc")
|
| 1385 |
+
cc = gcc if gcc is not None else clang
|
| 1386 |
+
if cc is None:
|
| 1387 |
+
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
|
| 1388 |
+
py_include_dir = get_paths()["include"]
|
| 1389 |
+
|
| 1390 |
+
cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so]
|
| 1391 |
+
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
|
| 1392 |
+
ret = subprocess.check_call(cc_cmd)
|
| 1393 |
+
|
| 1394 |
+
if ret == 0:
|
| 1395 |
+
return so
|
| 1396 |
+
# fallback on setuptools
|
| 1397 |
+
extra_compile_args = []
|
| 1398 |
+
library_dirs = cuda_lib_dirs
|
| 1399 |
+
include_dirs = [srcdir, cu_include_dir]
|
| 1400 |
+
libraries = ['cuda']
|
| 1401 |
+
# extra arguments
|
| 1402 |
+
extra_link_args = []
|
| 1403 |
+
# create extension module
|
| 1404 |
+
ext = setuptools.Extension(
|
| 1405 |
+
name=name,
|
| 1406 |
+
language='c',
|
| 1407 |
+
sources=[src],
|
| 1408 |
+
include_dirs=include_dirs,
|
| 1409 |
+
extra_compile_args=extra_compile_args + ['-O3'],
|
| 1410 |
+
extra_link_args=extra_link_args,
|
| 1411 |
+
library_dirs=library_dirs,
|
| 1412 |
+
libraries=libraries,
|
| 1413 |
+
)
|
| 1414 |
+
# build extension module
|
| 1415 |
+
args = ['build_ext']
|
| 1416 |
+
args.append('--build-temp=' + srcdir)
|
| 1417 |
+
args.append('--build-lib=' + srcdir)
|
| 1418 |
+
args.append('-q')
|
| 1419 |
+
args = dict(
|
| 1420 |
+
name=name,
|
| 1421 |
+
ext_modules=[ext],
|
| 1422 |
+
script_args=args,
|
| 1423 |
+
)
|
| 1424 |
+
with quiet():
|
| 1425 |
+
setuptools.setup(**args)
|
| 1426 |
+
return so
|
| 1427 |
+
|
| 1428 |
+
|
| 1429 |
+
def make_so_cache_key(version_hash, signature, constants):
|
| 1430 |
+
# Get unique key for the compiled code
|
| 1431 |
+
signature = {k: 'ptr' if v[0] == '*' else v for k, v in signature.items()}
|
| 1432 |
+
key = f"{version_hash}-{''.join(signature.values())}{constants}"
|
| 1433 |
+
key = hashlib.md5(key.encode("utf-8")).hexdigest()
|
| 1434 |
+
return key
|
| 1435 |
+
|
| 1436 |
+
|
| 1437 |
+
def make_fn_cache_key(fn_hash, signature, configs, constants, num_warps, num_stages):
|
| 1438 |
+
# Get unique key for the compiled code
|
| 1439 |
+
get_conf_key = lambda conf: (sorted(conf.divisible_by_16), sorted(conf.equal_to_1))
|
| 1440 |
+
configs_key = [get_conf_key(conf) for conf in configs]
|
| 1441 |
+
key = f"{fn_hash}-{''.join(signature.values())}-{configs_key}-{constants}-{num_warps}-{num_stages}"
|
| 1442 |
+
key = hashlib.md5(key.encode("utf-8")).hexdigest()
|
| 1443 |
+
return key
|
| 1444 |
+
|
| 1445 |
+
|
| 1446 |
+
def read_or_execute(cache_manager, force_compile, file_name, metadata,
|
| 1447 |
+
run_if_found: Callable[[str], bytes] = None,
|
| 1448 |
+
run_if_not_found: Callable = None):
|
| 1449 |
+
suffix = file_name.split(".")[1]
|
| 1450 |
+
if not force_compile and cache_manager.has_file(file_name):
|
| 1451 |
+
module = run_if_found(cache_manager._make_path(file_name))
|
| 1452 |
+
data = module if isinstance(module, bytes) else str(module).encode("utf-8")
|
| 1453 |
+
md5 = hashlib.md5(data).hexdigest()
|
| 1454 |
+
has_changed = metadata and md5 != metadata["md5"][suffix]
|
| 1455 |
+
return module, md5, has_changed, True
|
| 1456 |
+
module = run_if_not_found()
|
| 1457 |
+
data = module if isinstance(module, bytes) else str(module).encode("utf-8")
|
| 1458 |
+
md5 = hashlib.md5(data).hexdigest()
|
| 1459 |
+
cache_manager.put(data, file_name, True if isinstance(data, bytes) else data)
|
| 1460 |
+
return module, md5, True, False
|
| 1461 |
+
|
| 1462 |
+
#
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
def make_stub(name, signature, constants):
|
| 1466 |
+
# name of files that are cached
|
| 1467 |
+
so_cache_key = make_so_cache_key(triton.runtime.jit.version_key(), signature, constants)
|
| 1468 |
+
so_cache_manager = CacheManager(so_cache_key)
|
| 1469 |
+
so_name = f"{name}.so"
|
| 1470 |
+
# retrieve stub from cache if it exists
|
| 1471 |
+
if not so_cache_manager.has_file(so_name):
|
| 1472 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 1473 |
+
src = generate_launcher(constants, signature)
|
| 1474 |
+
src_path = os.path.join(tmpdir, "main.c")
|
| 1475 |
+
with open(src_path, "w") as f:
|
| 1476 |
+
f.write(src)
|
| 1477 |
+
so = _build(name, src_path, tmpdir)
|
| 1478 |
+
with open(so, "rb") as f:
|
| 1479 |
+
so_cache_manager.put(f.read(), so_name, binary=True)
|
| 1480 |
+
return so_cache_manager._make_path(so_name)
|
| 1481 |
+
|
| 1482 |
+
|
| 1483 |
+
def convert_type_repr(x):
|
| 1484 |
+
match = re.search(r'!tt\.ptr<(.*)>', x)
|
| 1485 |
+
if match is not None:
|
| 1486 |
+
return '*' + convert_type_repr(match.group(1))
|
| 1487 |
+
return x
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
def make_hash(fn, **kwargs):
|
| 1491 |
+
if isinstance(fn, triton.runtime.JITFunction):
|
| 1492 |
+
configs = kwargs["configs"]
|
| 1493 |
+
signature = kwargs["signature"]
|
| 1494 |
+
constants = kwargs.get("constants", dict())
|
| 1495 |
+
num_warps = kwargs.get("num_warps", 4)
|
| 1496 |
+
num_stages = kwargs.get("num_stages", 3)
|
| 1497 |
+
# Get unique key for the compiled code
|
| 1498 |
+
get_conf_key = lambda conf: (sorted(conf.divisible_by_16), sorted(conf.equal_to_1))
|
| 1499 |
+
configs_key = [get_conf_key(conf) for conf in configs]
|
| 1500 |
+
key = f"{fn.cache_key}-{''.join(signature.values())}-{configs_key}-{constants}-{num_warps}-{num_stages}"
|
| 1501 |
+
return hashlib.md5(key.encode("utf-8")).hexdigest()
|
| 1502 |
+
assert isinstance(fn, str)
|
| 1503 |
+
return hashlib.md5((Path(fn).read_text() + triton.runtime.jit.version_key()).encode("utf-8")).hexdigest()
|
| 1504 |
+
|
| 1505 |
+
|
| 1506 |
+
# - ^\s*func\s+ : match the start of the string, any leading whitespace, the keyword func,
|
| 1507 |
+
# and any following whitespace
|
| 1508 |
+
# - (public\s+)? : optionally match the keyword public and any following whitespace
|
| 1509 |
+
# - (@\w+) : match an @ symbol followed by one or more word characters
|
| 1510 |
+
# (letters, digits, or underscores), and capture it as group 1 (the function name)
|
| 1511 |
+
# - (\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\)) : match a pair of parentheses enclosing
|
| 1512 |
+
# zero or more arguments separated by commas, and capture it as group 2 (the argument list)
|
| 1513 |
+
mlir_prototype_pattern = r'^\s*func\s+(?:public\s+)?(@\w+)(\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\))\s*\{\s*$'
|
| 1514 |
+
ptx_prototype_pattern = r"\.(?:visible|extern)\s+\.(?:entry|func)\s+(\w+)\s*\(([^)]*)\)"
|
| 1515 |
+
prototype_pattern = {
|
| 1516 |
+
"ttir": mlir_prototype_pattern,
|
| 1517 |
+
"ttgir": mlir_prototype_pattern,
|
| 1518 |
+
"ptx": ptx_prototype_pattern,
|
| 1519 |
+
}
|
| 1520 |
+
|
| 1521 |
+
mlir_arg_type_pattern = r'%\w+: ([^,^\)\s]+)(?: \{\S+ = \S+ : \S+\})?,?'
|
| 1522 |
+
ptx_arg_type_pattern = r"\.param\s+\.(\w+)"
|
| 1523 |
+
arg_type_pattern = {
|
| 1524 |
+
"ttir": mlir_arg_type_pattern,
|
| 1525 |
+
"ttgir": mlir_arg_type_pattern,
|
| 1526 |
+
"ptx": ptx_arg_type_pattern,
|
| 1527 |
+
}
|
| 1528 |
+
|
| 1529 |
+
|
| 1530 |
+
# def compile(fn, signature: str, device: int = -1, constants=dict(), num_warps: int = 4, num_stages: int = 3, extern_libs=None, configs=None):
|
| 1531 |
+
def compile(fn, **kwargs):
|
| 1532 |
+
capability = kwargs.get("cc", None)
|
| 1533 |
+
if capability is None:
|
| 1534 |
+
device = torch.cuda.current_device()
|
| 1535 |
+
capability = torch.cuda.get_device_capability(device)
|
| 1536 |
+
capability = capability[0] * 10 + capability[1]
|
| 1537 |
+
# we get the kernel, i.e. the first function generated in the module
|
| 1538 |
+
# if fn is not a JITFunction, then it
|
| 1539 |
+
# has to be a path to a file
|
| 1540 |
+
context = _triton.ir.context()
|
| 1541 |
+
asm = dict()
|
| 1542 |
+
constants = kwargs.get("constants", dict())
|
| 1543 |
+
num_warps = kwargs.get("num_warps", 4)
|
| 1544 |
+
num_stages = kwargs.get("num_stages", 3 if capability >= 75 else 2)
|
| 1545 |
+
extern_libs = kwargs.get("extern_libs", dict())
|
| 1546 |
+
# build compilation stages
|
| 1547 |
+
stages = {
|
| 1548 |
+
"ast": (lambda path: fn, None),
|
| 1549 |
+
"ttir": (lambda path: parse_mlir_module(path, context),
|
| 1550 |
+
lambda src: ast_to_ttir(src, signature, configs[0], constants)),
|
| 1551 |
+
"ttgir": (lambda path: parse_mlir_module(path, context),
|
| 1552 |
+
lambda src: ttir_to_ttgir(src, num_warps, num_stages, capability)),
|
| 1553 |
+
"llir": (lambda path: Path(path).read_text(),
|
| 1554 |
+
lambda src: ttgir_to_llir(src, extern_libs, capability)),
|
| 1555 |
+
"ptx": (lambda path: Path(path).read_text(),
|
| 1556 |
+
lambda src: llir_to_ptx(src, capability)),
|
| 1557 |
+
"cubin": (lambda path: Path(path).read_bytes(),
|
| 1558 |
+
lambda src: ptx_to_cubin(src, capability))
|
| 1559 |
+
}
|
| 1560 |
+
# find out the signature of the function
|
| 1561 |
+
if isinstance(fn, triton.runtime.JITFunction):
|
| 1562 |
+
configs = kwargs.get("configs", None)
|
| 1563 |
+
signature = kwargs["signature"]
|
| 1564 |
+
if configs is None:
|
| 1565 |
+
configs = [instance_descriptor()]
|
| 1566 |
+
assert len(configs) == 1
|
| 1567 |
+
kwargs["configs"] = configs
|
| 1568 |
+
name = fn.__name__
|
| 1569 |
+
first_stage = 0
|
| 1570 |
+
if isinstance(signature, str):
|
| 1571 |
+
signature = {k: v.strip() for k, v in enumerate(signature.split(","))}
|
| 1572 |
+
kwargs["signature"] = signature
|
| 1573 |
+
else:
|
| 1574 |
+
assert isinstance(fn, str)
|
| 1575 |
+
_, ir = os.path.basename(fn).split(".")
|
| 1576 |
+
src = Path(fn).read_text()
|
| 1577 |
+
import re
|
| 1578 |
+
match = re.search(prototype_pattern[ir], src, re.MULTILINE)
|
| 1579 |
+
name, signature = match.group(1), match.group(2)
|
| 1580 |
+
# print(name, signature)
|
| 1581 |
+
types = re.findall(arg_type_pattern[ir], signature)
|
| 1582 |
+
# print(types)
|
| 1583 |
+
param_tys = [convert_type_repr(ty) for ty in types]
|
| 1584 |
+
signature = {k: v for k, v in enumerate(param_tys)}
|
| 1585 |
+
first_stage = list(stages.keys()).index(ir)
|
| 1586 |
+
|
| 1587 |
+
# cache manager
|
| 1588 |
+
so_path = make_stub(name, signature, constants)
|
| 1589 |
+
# create cache manager
|
| 1590 |
+
fn_cache_manager = CacheManager(make_hash(fn, **kwargs))
|
| 1591 |
+
# determine name and extension type of provided function
|
| 1592 |
+
if isinstance(fn, triton.runtime.JITFunction):
|
| 1593 |
+
name, ext = fn.__name__, "ast"
|
| 1594 |
+
else:
|
| 1595 |
+
name, ext = os.path.basename(fn).split(".")
|
| 1596 |
+
|
| 1597 |
+
# load metadata if any
|
| 1598 |
+
metadata = None
|
| 1599 |
+
if fn_cache_manager.has_file(f'{name}.json'):
|
| 1600 |
+
with open(fn_cache_manager._make_path(f"{name}.json")) as f:
|
| 1601 |
+
metadata = json.load(f)
|
| 1602 |
+
else:
|
| 1603 |
+
metadata = {"num_warps": num_warps, "num_stages": num_stages, "ctime": dict()}
|
| 1604 |
+
if ext == "ptx":
|
| 1605 |
+
assert "shared" in kwargs, "ptx compilation must provide shared memory size"
|
| 1606 |
+
metadata["shared"] = kwargs["shared"]
|
| 1607 |
+
|
| 1608 |
+
first_stage = list(stages.keys()).index(ext)
|
| 1609 |
+
asm = dict()
|
| 1610 |
+
module = fn
|
| 1611 |
+
# run compilation pipeline and populate metadata
|
| 1612 |
+
for ir, (parse, compile) in list(stages.items())[first_stage:]:
|
| 1613 |
+
path = fn_cache_manager._make_path(f"{name}.{ir}")
|
| 1614 |
+
if ir == ext:
|
| 1615 |
+
next_module = parse(fn)
|
| 1616 |
+
elif os.path.exists(path) and\
|
| 1617 |
+
ir in metadata["ctime"] and\
|
| 1618 |
+
os.path.getctime(path) == metadata["ctime"][ir]:
|
| 1619 |
+
next_module = parse(path)
|
| 1620 |
+
else:
|
| 1621 |
+
next_module = compile(module)
|
| 1622 |
+
fn_cache_manager.put(next_module, f"{name}.{ir}")
|
| 1623 |
+
if os.path.exists(path):
|
| 1624 |
+
metadata["ctime"][ir] = os.path.getctime(path)
|
| 1625 |
+
asm[ir] = next_module if ir == "cubin" else str(next_module)
|
| 1626 |
+
if ir == "llir" and "shared" not in metadata:
|
| 1627 |
+
metadata["shared"] = _triton.get_shared_memory_size(module)
|
| 1628 |
+
if ir == "ptx":
|
| 1629 |
+
metadata["name"] = ptx_get_kernel_name(next_module)
|
| 1630 |
+
module = next_module
|
| 1631 |
+
# write-back metadata
|
| 1632 |
+
fn_cache_manager.put(json.dumps(metadata), f"{name}.json", binary=False)
|
| 1633 |
+
# return handle to compiled kernel
|
| 1634 |
+
return CompiledKernel(so_path, metadata, asm)
|
| 1635 |
+
|
| 1636 |
+
|
| 1637 |
+
class CompiledKernel:
|
| 1638 |
+
|
| 1639 |
+
# Hooks for external tools to monitor the execution of triton kernels
|
| 1640 |
+
launch_enter_hook = None
|
| 1641 |
+
launch_exit_hook = None
|
| 1642 |
+
|
| 1643 |
+
def __init__(self, so_path, metadata, asm):
|
| 1644 |
+
# initialize launcher
|
| 1645 |
+
import importlib.util
|
| 1646 |
+
spec = importlib.util.spec_from_file_location("__triton_launcher", so_path)
|
| 1647 |
+
mod = importlib.util.module_from_spec(spec)
|
| 1648 |
+
spec.loader.exec_module(mod)
|
| 1649 |
+
self.c_wrapper = getattr(mod, "launch")
|
| 1650 |
+
# initialize metadata
|
| 1651 |
+
self.shared = metadata["shared"]
|
| 1652 |
+
self.num_warps = metadata["num_warps"]
|
| 1653 |
+
self.num_stages = metadata["num_stages"]
|
| 1654 |
+
# initialize asm dict
|
| 1655 |
+
self.asm = asm
|
| 1656 |
+
# binaries are lazily initialized
|
| 1657 |
+
# because it involves doing runtime things
|
| 1658 |
+
# (e.g., checking amount of shared memory on current device)
|
| 1659 |
+
self.metadata = metadata
|
| 1660 |
+
self.cu_module = None
|
| 1661 |
+
self.cu_function = None
|
| 1662 |
+
|
| 1663 |
+
def _init_handles(self):
|
| 1664 |
+
if self.cu_module is not None:
|
| 1665 |
+
return
|
| 1666 |
+
device = torch.cuda.current_device()
|
| 1667 |
+
global cuda_utils
|
| 1668 |
+
init_cuda_utils()
|
| 1669 |
+
max_shared = cuda_utils.get_device_properties(device)["max_shared_mem"]
|
| 1670 |
+
if self.shared > max_shared:
|
| 1671 |
+
raise OutOfResources(self.shared, max_shared, "shared memory")
|
| 1672 |
+
mod, func, n_regs, n_spills = cuda_utils.load_binary(self.metadata["name"], self.asm["cubin"], self.shared, device)
|
| 1673 |
+
# print(self.shared, n_regs, n_spills)
|
| 1674 |
+
self.cu_module = mod
|
| 1675 |
+
self.cu_function = func
|
| 1676 |
+
|
| 1677 |
+
def __getattribute__(self, name):
|
| 1678 |
+
if name == 'c_wrapper':
|
| 1679 |
+
self._init_handles()
|
| 1680 |
+
return super().__getattribute__(name)
|
| 1681 |
+
|
| 1682 |
+
def __getitem__(self, grid):
|
| 1683 |
+
self._init_handles()
|
| 1684 |
+
|
| 1685 |
+
def runner(*args, stream=None):
|
| 1686 |
+
if stream is None:
|
| 1687 |
+
stream = torch.cuda.current_stream().cuda_stream
|
| 1688 |
+
self.c_wrapper(grid[0], grid[1], grid[2], self.num_warps, self.shared, stream, self.cu_function,
|
| 1689 |
+
CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, self, *args)
|
| 1690 |
+
return runner
|
| 1691 |
+
|
| 1692 |
+
def get_sass(self, fun=None):
|
| 1693 |
+
if 'sass' in self.asm:
|
| 1694 |
+
return self.asm['sass']
|
| 1695 |
+
fd, path = tempfile.mkstemp()
|
| 1696 |
+
try:
|
| 1697 |
+
with open(fd, 'wb') as cubin:
|
| 1698 |
+
cubin.write(self.asm['cubin'])
|
| 1699 |
+
self.sass = extract(path, fun)
|
| 1700 |
+
finally:
|
| 1701 |
+
os.remove(path)
|
| 1702 |
+
self.asm['sass'] = self.sass
|
| 1703 |
+
return self.sass
|
| 1704 |
+
|
| 1705 |
+
|
| 1706 |
+
class CudaUtils(object):
|
| 1707 |
+
|
| 1708 |
+
def __new__(cls):
|
| 1709 |
+
if not hasattr(cls, 'instance'):
|
| 1710 |
+
cls.instance = super(CudaUtils, cls).__new__(cls)
|
| 1711 |
+
return cls.instance
|
| 1712 |
+
|
| 1713 |
+
@staticmethod
|
| 1714 |
+
def _generate_src():
|
| 1715 |
+
return """
|
| 1716 |
+
#include <cuda.h>
|
| 1717 |
+
|
| 1718 |
+
#include \"cuda.h\"
|
| 1719 |
+
#define PY_SSIZE_T_CLEAN
|
| 1720 |
+
#include <Python.h>
|
| 1721 |
+
|
| 1722 |
+
static inline void gpuAssert(CUresult code, const char *file, int line)
|
| 1723 |
+
{
|
| 1724 |
+
if (code != CUDA_SUCCESS)
|
| 1725 |
+
{
|
| 1726 |
+
const char* prefix = "Triton Error [CUDA]: ";
|
| 1727 |
+
const char* str;
|
| 1728 |
+
cuGetErrorString(code, &str);
|
| 1729 |
+
char err[1024] = {0};
|
| 1730 |
+
strcat(err, prefix);
|
| 1731 |
+
strcat(err, str);
|
| 1732 |
+
PyErr_SetString(PyExc_RuntimeError, err);
|
| 1733 |
+
}
|
| 1734 |
+
}
|
| 1735 |
+
|
| 1736 |
+
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); if(PyErr_Occurred()) return NULL; }
|
| 1737 |
+
|
| 1738 |
+
static PyObject* getDeviceProperties(PyObject* self, PyObject* args){
|
| 1739 |
+
int device_id;
|
| 1740 |
+
if(!PyArg_ParseTuple(args, "i", &device_id))
|
| 1741 |
+
return NULL;
|
| 1742 |
+
// Get device handle
|
| 1743 |
+
CUdevice device;
|
| 1744 |
+
cuDeviceGet(&device, device_id);
|
| 1745 |
+
|
| 1746 |
+
// create a struct to hold device properties
|
| 1747 |
+
int max_shared_mem;
|
| 1748 |
+
int multiprocessor_count;
|
| 1749 |
+
int sm_clock_rate;
|
| 1750 |
+
int mem_clock_rate;
|
| 1751 |
+
int mem_bus_width;
|
| 1752 |
+
CUDA_CHECK(cuDeviceGetAttribute(&max_shared_mem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device));
|
| 1753 |
+
CUDA_CHECK(cuDeviceGetAttribute(&multiprocessor_count, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device));
|
| 1754 |
+
CUDA_CHECK(cuDeviceGetAttribute(&sm_clock_rate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device));
|
| 1755 |
+
CUDA_CHECK(cuDeviceGetAttribute(&mem_clock_rate, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device));
|
| 1756 |
+
CUDA_CHECK(cuDeviceGetAttribute(&mem_bus_width, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, device));
|
| 1757 |
+
|
| 1758 |
+
|
| 1759 |
+
return Py_BuildValue("{s:i, s:i, s:i, s:i, s:i}", "max_shared_mem", max_shared_mem,
|
| 1760 |
+
"multiprocessor_count", multiprocessor_count,
|
| 1761 |
+
"sm_clock_rate", sm_clock_rate,
|
| 1762 |
+
"mem_clock_rate", mem_clock_rate,
|
| 1763 |
+
"mem_bus_width", mem_bus_width);
|
| 1764 |
+
}
|
| 1765 |
+
|
| 1766 |
+
static PyObject* loadBinary(PyObject* self, PyObject* args) {
|
| 1767 |
+
const char* name;
|
| 1768 |
+
const char* data;
|
| 1769 |
+
Py_ssize_t data_size;
|
| 1770 |
+
int shared;
|
| 1771 |
+
int device;
|
| 1772 |
+
if(!PyArg_ParseTuple(args, "ss#ii", &name, &data, &data_size, &shared, &device)) {
|
| 1773 |
+
return NULL;
|
| 1774 |
+
}
|
| 1775 |
+
CUfunction fun;
|
| 1776 |
+
CUmodule mod;
|
| 1777 |
+
int32_t n_regs = 0;
|
| 1778 |
+
int32_t n_spills = 0;
|
| 1779 |
+
// create driver handles
|
| 1780 |
+
CUDA_CHECK(cuModuleLoadData(&mod, data));
|
| 1781 |
+
CUDA_CHECK(cuModuleGetFunction(&fun, mod, name));
|
| 1782 |
+
// get allocated registers and spilled registers from the function
|
| 1783 |
+
CUDA_CHECK(cuFuncGetAttribute(&n_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, fun));
|
| 1784 |
+
CUDA_CHECK(cuFuncGetAttribute(&n_spills, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, fun));
|
| 1785 |
+
n_spills /= 4;
|
| 1786 |
+
// set dynamic shared memory if necessary
|
| 1787 |
+
int shared_optin;
|
| 1788 |
+
CUDA_CHECK(cuDeviceGetAttribute(&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device));
|
| 1789 |
+
if (shared > 49152 && shared_optin > 49152) {
|
| 1790 |
+
CUDA_CHECK(cuFuncSetCacheConfig(fun, CU_FUNC_CACHE_PREFER_SHARED));
|
| 1791 |
+
int shared_total, shared_static;
|
| 1792 |
+
CUDA_CHECK(cuDeviceGetAttribute(&shared_total, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, device));
|
| 1793 |
+
CUDA_CHECK(cuFuncGetAttribute(&shared_static, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, fun));
|
| 1794 |
+
CUDA_CHECK(cuFuncSetAttribute(fun, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_optin - shared_static));
|
| 1795 |
+
}
|
| 1796 |
+
|
| 1797 |
+
if(PyErr_Occurred()) {
|
| 1798 |
+
return NULL;
|
| 1799 |
+
}
|
| 1800 |
+
return Py_BuildValue("(KKii)", (uint64_t)mod, (uint64_t)fun, n_regs, n_spills);
|
| 1801 |
+
}
|
| 1802 |
+
|
| 1803 |
+
static PyMethodDef ModuleMethods[] = {
|
| 1804 |
+
{"load_binary", loadBinary, METH_VARARGS, "Load provided cubin into CUDA driver"},
|
| 1805 |
+
{"get_device_properties", getDeviceProperties, METH_VARARGS, "Get the properties for a given device"},
|
| 1806 |
+
{NULL, NULL, 0, NULL} // sentinel
|
| 1807 |
+
};
|
| 1808 |
+
|
| 1809 |
+
static struct PyModuleDef ModuleDef = {
|
| 1810 |
+
PyModuleDef_HEAD_INIT,
|
| 1811 |
+
\"cuda_utils\",
|
| 1812 |
+
NULL, //documentation
|
| 1813 |
+
-1, //size
|
| 1814 |
+
ModuleMethods
|
| 1815 |
+
};
|
| 1816 |
+
|
| 1817 |
+
PyMODINIT_FUNC PyInit_cuda_utils(void) {
|
| 1818 |
+
PyObject *m = PyModule_Create(&ModuleDef);
|
| 1819 |
+
if(m == NULL) {
|
| 1820 |
+
return NULL;
|
| 1821 |
+
}
|
| 1822 |
+
PyModule_AddFunctions(m, ModuleMethods);
|
| 1823 |
+
return m;
|
| 1824 |
+
}
|
| 1825 |
+
"""
|
| 1826 |
+
|
| 1827 |
+
def __init__(self):
|
| 1828 |
+
src = self._generate_src()
|
| 1829 |
+
key = hashlib.md5(src.encode("utf-8")).hexdigest()
|
| 1830 |
+
cache = CacheManager(key)
|
| 1831 |
+
fname = "cuda_utils.so"
|
| 1832 |
+
if not cache.has_file(fname):
|
| 1833 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 1834 |
+
src_path = os.path.join(tmpdir, "main.c")
|
| 1835 |
+
with open(src_path, "w") as f:
|
| 1836 |
+
f.write(src)
|
| 1837 |
+
so = _build("cuda_utils", src_path, tmpdir)
|
| 1838 |
+
with open(so, "rb") as f:
|
| 1839 |
+
cache.put(f.read(), fname, binary=True)
|
| 1840 |
+
import importlib.util
|
| 1841 |
+
spec = importlib.util.spec_from_file_location("cuda_utils", cache._make_path(fname))
|
| 1842 |
+
mod = importlib.util.module_from_spec(spec)
|
| 1843 |
+
spec.loader.exec_module(mod)
|
| 1844 |
+
self.load_binary = mod.load_binary
|
| 1845 |
+
self.get_device_properties = mod.get_device_properties
|
| 1846 |
+
|
| 1847 |
+
|
| 1848 |
+
def init_cuda_utils():
|
| 1849 |
+
global cuda_utils
|
| 1850 |
+
if cuda_utils is None:
|
| 1851 |
+
cuda_utils = CudaUtils()
|
| 1852 |
+
|
| 1853 |
+
|
| 1854 |
+
cuda_utils = None
|
deepseekvl2/lib/python3.10/site-packages/triton/impl/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Triton internal implementation details.
|
| 2 |
+
|
| 3 |
+
Client libraries should not import interfaces from the `triton.impl` module;
|
| 4 |
+
as the details are subject to change.
|
| 5 |
+
|
| 6 |
+
APIs defined in the `triton.impl` module which are public will be re-exported
|
| 7 |
+
in other relevant `triton` module namespaces.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from .base import builtin, extern, is_builtin
|
| 11 |
+
from triton._C.libtriton.triton import ir
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"builtin",
|
| 15 |
+
"extern",
|
| 16 |
+
"ir",
|
| 17 |
+
"is_builtin",
|
| 18 |
+
]
|
deepseekvl2/lib/python3.10/site-packages/triton/impl/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (642 Bytes). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/impl/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (1.23 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/impl/base.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from functools import wraps
|
| 4 |
+
from typing import TypeVar
|
| 5 |
+
|
| 6 |
+
T = TypeVar("T")
|
| 7 |
+
|
| 8 |
+
TRITON_BUILTIN = "__triton_builtin__"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def builtin(fn: T) -> T:
|
| 12 |
+
"""Mark a function as a builtin."""
|
| 13 |
+
assert callable(fn)
|
| 14 |
+
|
| 15 |
+
@wraps(fn)
|
| 16 |
+
def wrapper(*args, **kwargs):
|
| 17 |
+
if "_builder" not in kwargs or kwargs["_builder"] is None:
|
| 18 |
+
raise ValueError(
|
| 19 |
+
"Did you forget to add @triton.jit ? "
|
| 20 |
+
"(`_builder` argument must be provided outside of JIT functions.)"
|
| 21 |
+
)
|
| 22 |
+
return fn(*args, **kwargs)
|
| 23 |
+
|
| 24 |
+
setattr(wrapper, TRITON_BUILTIN, True)
|
| 25 |
+
|
| 26 |
+
return wrapper
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def is_builtin(fn) -> bool:
|
| 30 |
+
"""Is this a registered triton builtin function?"""
|
| 31 |
+
return getattr(fn, TRITON_BUILTIN, False)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def extern(fn: T) -> T:
|
| 35 |
+
"""A decorator for external functions."""
|
| 36 |
+
return builtin(fn)
|
deepseekvl2/lib/python3.10/site-packages/triton/language/__pycache__/extern.cpython-310.pyc
ADDED
|
Binary file (2.61 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/language/random.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import triton
|
| 2 |
+
from . import core as tl
|
| 3 |
+
|
| 4 |
+
PHILOX_KEY_A: tl.constexpr = 0x9E3779B9
|
| 5 |
+
PHILOX_KEY_B: tl.constexpr = 0xBB67AE85
|
| 6 |
+
PHILOX_ROUND_A: tl.constexpr = 0xD2511F53
|
| 7 |
+
PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57
|
| 8 |
+
N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox
|
| 9 |
+
|
| 10 |
+
# -------------------
|
| 11 |
+
# randint
|
| 12 |
+
# -------------------
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@triton.jit
|
| 16 |
+
def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 17 |
+
"""
|
| 18 |
+
Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1).
|
| 19 |
+
"""
|
| 20 |
+
for _ in tl.static_range(n_rounds):
|
| 21 |
+
# update random state
|
| 22 |
+
A = PHILOX_ROUND_A
|
| 23 |
+
B = PHILOX_ROUND_B
|
| 24 |
+
_c0, _c2 = c0, c2
|
| 25 |
+
c0 = tl.umulhi(B, _c2) ^ c1 ^ k0
|
| 26 |
+
c2 = tl.umulhi(A, _c0) ^ c3 ^ k1
|
| 27 |
+
c1 = B * _c2
|
| 28 |
+
c3 = A * _c0
|
| 29 |
+
# raise key
|
| 30 |
+
k0 = k0 + PHILOX_KEY_A
|
| 31 |
+
k1 = k1 + PHILOX_KEY_B
|
| 32 |
+
return c0, c1, c2, c3
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@triton.jit
|
| 36 |
+
def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 37 |
+
seed = seed.to(tl.uint64)
|
| 38 |
+
seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32)
|
| 39 |
+
seed_lo = (seed & 0xffffffff).to(tl.uint32)
|
| 40 |
+
c0 = c0.to(tl.uint32, bitcast=True)
|
| 41 |
+
c1 = c1.to(tl.uint32, bitcast=True)
|
| 42 |
+
c2 = c2.to(tl.uint32, bitcast=True)
|
| 43 |
+
c3 = c3.to(tl.uint32, bitcast=True)
|
| 44 |
+
return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@triton.jit
|
| 48 |
+
def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 49 |
+
"""
|
| 50 |
+
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
|
| 51 |
+
block of random :code:`int32`.
|
| 52 |
+
|
| 53 |
+
If you need multiple streams of random numbers,
|
| 54 |
+
using `randint4x` is likely to be faster than calling `randint` 4 times.
|
| 55 |
+
|
| 56 |
+
:param seed: The seed for generating random numbers.
|
| 57 |
+
:param offsets: The offsets to generate random numbers for.
|
| 58 |
+
"""
|
| 59 |
+
ret, _, _, _ = randint4x(seed, offset, n_rounds)
|
| 60 |
+
return ret
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@triton.jit
|
| 64 |
+
def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 65 |
+
"""
|
| 66 |
+
Given a :code:`seed` scalar and an :code:`offset` block, returns four
|
| 67 |
+
blocks of random :code:`int32`.
|
| 68 |
+
|
| 69 |
+
This is the maximally efficient entry point
|
| 70 |
+
to Triton's Philox pseudo-random number generator.
|
| 71 |
+
|
| 72 |
+
:param seed: The seed for generating random numbers.
|
| 73 |
+
:param offsets: The offsets to generate random numbers for.
|
| 74 |
+
"""
|
| 75 |
+
# _0 = tl.zeros(offset.shape, offset.dtype)
|
| 76 |
+
_0 = offset * 0
|
| 77 |
+
return philox(seed, offset, _0, _0, _0, n_rounds)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# -------------------
|
| 81 |
+
# rand
|
| 82 |
+
# -------------------
|
| 83 |
+
|
| 84 |
+
# @triton.jit
|
| 85 |
+
# def uint32_to_uniform_float(x):
|
| 86 |
+
# """
|
| 87 |
+
# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
|
| 88 |
+
# """
|
| 89 |
+
# two_to_the_minus_32: tl.constexpr = 2.328306e-10
|
| 90 |
+
# return x * two_to_the_minus_32
|
| 91 |
+
|
| 92 |
+
@triton.jit
|
| 93 |
+
def uint32_to_uniform_float(x):
|
| 94 |
+
"""
|
| 95 |
+
Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
|
| 96 |
+
"""
|
| 97 |
+
x = x.to(tl.int32, bitcast=True)
|
| 98 |
+
# maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
| 99 |
+
scale = 4.6566127342e-10
|
| 100 |
+
x = tl.where(x < 0, -x - 1, x)
|
| 101 |
+
return x * scale
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@triton.jit
|
| 105 |
+
def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 106 |
+
"""
|
| 107 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 108 |
+
returns a block of random :code:`float32` in :math:`U(0, 1)`
|
| 109 |
+
|
| 110 |
+
:param seed: The seed for generating random numbers.
|
| 111 |
+
:param offsets: The offsets to generate random numbers for.
|
| 112 |
+
"""
|
| 113 |
+
offset = offset.to(tl.uint32, bitcast=True)
|
| 114 |
+
source = randint(seed, offset, n_rounds)
|
| 115 |
+
return uint32_to_uniform_float(source)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@triton.jit
|
| 119 |
+
def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 120 |
+
"""
|
| 121 |
+
Given a :code:`seed` scalar and an :code:`offsets` block,
|
| 122 |
+
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
|
| 123 |
+
|
| 124 |
+
:param seed: The seed for generating random numbers.
|
| 125 |
+
:param offsets: The offsets to generate random numbers for.
|
| 126 |
+
"""
|
| 127 |
+
offsets = offsets.to(tl.uint32, bitcast=True)
|
| 128 |
+
i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds)
|
| 129 |
+
u1 = uint32_to_uniform_float(i1)
|
| 130 |
+
u2 = uint32_to_uniform_float(i2)
|
| 131 |
+
u3 = uint32_to_uniform_float(i3)
|
| 132 |
+
u4 = uint32_to_uniform_float(i4)
|
| 133 |
+
return u1, u2, u3, u4
|
| 134 |
+
|
| 135 |
+
# -------------------
|
| 136 |
+
# randn
|
| 137 |
+
# -------------------
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@triton.jit
|
| 141 |
+
def pair_uniform_to_normal(u1, u2):
|
| 142 |
+
"""Box-Muller transform"""
|
| 143 |
+
u1 = tl.maximum(1.0e-7, u1)
|
| 144 |
+
th = 6.283185307179586 * u2
|
| 145 |
+
r = tl.sqrt(-2.0 * tl.log(u1))
|
| 146 |
+
return r * tl.cos(th), r * tl.sin(th)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@triton.jit
|
| 150 |
+
def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 151 |
+
"""
|
| 152 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 153 |
+
returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
|
| 154 |
+
|
| 155 |
+
:param seed: The seed for generating random numbers.
|
| 156 |
+
:param offsets: The offsets to generate random numbers for.
|
| 157 |
+
"""
|
| 158 |
+
i1, i2, _, _ = randint4x(seed, offset, n_rounds)
|
| 159 |
+
u1 = uint32_to_uniform_float(i1)
|
| 160 |
+
u2 = uint32_to_uniform_float(i2)
|
| 161 |
+
n1, _ = pair_uniform_to_normal(u1, u2)
|
| 162 |
+
return n1
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@triton.jit
|
| 166 |
+
def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 167 |
+
"""
|
| 168 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 169 |
+
returns a 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
|
| 170 |
+
|
| 171 |
+
:param seed: The seed for generating random numbers.
|
| 172 |
+
:param offsets: The offsets to generate random numbers for.
|
| 173 |
+
"""
|
| 174 |
+
u1, u2, u3, u4 = rand4x(seed, offset, n_rounds)
|
| 175 |
+
n1, n2 = pair_uniform_to_normal(u1, u2)
|
| 176 |
+
n3, n4 = pair_uniform_to_normal(u3, u4)
|
| 177 |
+
return n1, n2, n3, n4
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from .conv import _conv, conv
|
| 2 |
+
from . import blocksparse
|
| 3 |
+
from .cross_entropy import _cross_entropy, cross_entropy
|
| 4 |
+
from .flash_attention import attention
|
| 5 |
+
from .matmul import _matmul, matmul
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"blocksparse",
|
| 9 |
+
"_cross_entropy",
|
| 10 |
+
"cross_entropy",
|
| 11 |
+
"_matmul",
|
| 12 |
+
"matmul",
|
| 13 |
+
"attention",
|
| 14 |
+
]
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (411 Bytes). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc
ADDED
|
Binary file (3.58 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc
ADDED
|
Binary file (6.44 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc
ADDED
|
Binary file (4.58 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc
ADDED
|
Binary file (4.33 kB). View file
|
|
|
deepseekvl2/lib/python3.10/site-packages/triton/ops/blocksparse/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .matmul import matmul
|
| 2 |
+
from .softmax import softmax
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"matmul",
|
| 6 |
+
"softmax",
|
| 7 |
+
]
|