Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- evalkit_cambrian/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/common/backend.py +183 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/common/build.py +140 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/compiler/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/compiler/__pycache__/utils.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/core.py +1884 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/math.py +1676 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/random.py +202 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/build_extern.py +376 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.c +67 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.h +14 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.py +145 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/disasm.py +142 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/tools/link.py +322 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_ops.h +50 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long.h +30 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h +25 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h +24 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_cuda_dispatch.h +24 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size.h +30 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size.h +30 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_cuda_dispatch.h +24 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_cuda_dispatch.h +23 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_ops.h +50 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_ops.h +50 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h +87 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_native.h +21 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h +28 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_meta.h +27 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_cuda_dispatch.h +25 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter.h +91 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_ops.h +50 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_ops.h +83 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_cuda_dispatch.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h +23 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cpu_dispatch.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h +23 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h +28 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h +23 -0
evalkit_cambrian/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/common/backend.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import hashlib
|
| 3 |
+
import importlib
|
| 4 |
+
import importlib.util
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import subprocess
|
| 8 |
+
import traceback
|
| 9 |
+
from typing import Dict
|
| 10 |
+
|
| 11 |
+
from ..runtime.driver import DriverBase
|
| 12 |
+
|
| 13 |
+
TRITON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 14 |
+
TRITON_VERSION = "2.2.0"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class BaseBackend:
|
| 18 |
+
|
| 19 |
+
def __init__(self, device_type: str) -> None:
|
| 20 |
+
self.device_type = device_type
|
| 21 |
+
|
| 22 |
+
def add_stages(self, arch, extern_libs, stages):
|
| 23 |
+
"""
|
| 24 |
+
Custom the arch, extern_libs and stages per backend specific requirement
|
| 25 |
+
"""
|
| 26 |
+
raise NotImplementedError
|
| 27 |
+
|
| 28 |
+
def add_meta_info(self, ir, cur_module, next_module, metadata, asm):
|
| 29 |
+
"""
|
| 30 |
+
Custom the ir, module, metadata and asm per backend specific requirement
|
| 31 |
+
"""
|
| 32 |
+
raise NotImplementedError
|
| 33 |
+
|
| 34 |
+
def get_load_binary_fn(self):
|
| 35 |
+
"""
|
| 36 |
+
Return a callable to load binary
|
| 37 |
+
"""
|
| 38 |
+
raise NotImplementedError
|
| 39 |
+
|
| 40 |
+
def get_driver(self) -> DriverBase:
|
| 41 |
+
"""
|
| 42 |
+
Get the backend driver. Please refer to "DriverBase" for more details
|
| 43 |
+
"""
|
| 44 |
+
raise NotImplementedError
|
| 45 |
+
|
| 46 |
+
def get_stream(self):
|
| 47 |
+
"""
|
| 48 |
+
Get stream for current device
|
| 49 |
+
"""
|
| 50 |
+
raise NotImplementedError
|
| 51 |
+
|
| 52 |
+
def get_device_properties(self, device):
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def get_current_device(self):
|
| 56 |
+
"""
|
| 57 |
+
Get current device
|
| 58 |
+
"""
|
| 59 |
+
raise NotImplementedError
|
| 60 |
+
|
| 61 |
+
def set_current_device(self, device):
|
| 62 |
+
"""
|
| 63 |
+
Set current device as the given device
|
| 64 |
+
"""
|
| 65 |
+
raise NotImplementedError
|
| 66 |
+
|
| 67 |
+
def get_kernel_bin(self):
|
| 68 |
+
raise NotImplementedError
|
| 69 |
+
|
| 70 |
+
def make_launcher_stub(self, name, signature, constants):
|
| 71 |
+
"""
|
| 72 |
+
Generate the launcher stub to launch the kernel
|
| 73 |
+
"""
|
| 74 |
+
raise NotImplementedError
|
| 75 |
+
|
| 76 |
+
def get_architecture_descriptor(self, **kwargs):
|
| 77 |
+
"""
|
| 78 |
+
Get the architecture descriptor the backend
|
| 79 |
+
"""
|
| 80 |
+
raise NotImplementedError
|
| 81 |
+
|
| 82 |
+
@classmethod
|
| 83 |
+
def create_backend(cls, device_type: str):
|
| 84 |
+
return cls(device_type)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
_backends: Dict[str, BaseBackend] = {}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def register_backend(device_type: str, backend_cls: type):
|
| 91 |
+
if device_type not in _backends:
|
| 92 |
+
_backends[device_type] = backend_cls.create_backend(device_type)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_backend(device_type: str):
|
| 96 |
+
if device_type not in _backends:
|
| 97 |
+
device_backend_package_name = f"...third_party.{device_type}"
|
| 98 |
+
if importlib.util.find_spec(device_backend_package_name, package=__spec__.name):
|
| 99 |
+
try:
|
| 100 |
+
importlib.import_module(device_backend_package_name, package=__spec__.name)
|
| 101 |
+
except Exception:
|
| 102 |
+
traceback.print_exc()
|
| 103 |
+
else:
|
| 104 |
+
return None
|
| 105 |
+
return _backends[device_type] if device_type in _backends else None
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _path_to_binary(binary: str):
|
| 109 |
+
base_dir = os.path.join(os.path.dirname(__file__), os.pardir)
|
| 110 |
+
paths = [
|
| 111 |
+
os.environ.get(f"TRITON_{binary.upper()}_PATH", ""),
|
| 112 |
+
os.path.join(base_dir, "third_party", "cuda", "bin", binary)
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
for p in paths:
|
| 116 |
+
bin = p.split(" ")[0]
|
| 117 |
+
if os.path.exists(bin) and os.path.isfile(bin):
|
| 118 |
+
result = subprocess.check_output([bin, "--version"], stderr=subprocess.STDOUT)
|
| 119 |
+
if result is not None:
|
| 120 |
+
version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE)
|
| 121 |
+
if version is not None:
|
| 122 |
+
return p, version.group(1)
|
| 123 |
+
raise RuntimeError(f"Cannot find {binary}")
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@functools.lru_cache()
|
| 127 |
+
def path_to_ptxas():
|
| 128 |
+
return _path_to_binary("ptxas")
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@functools.lru_cache()
|
| 132 |
+
def path_to_cuobjdump():
|
| 133 |
+
return _path_to_binary("cuobjdump")
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@functools.lru_cache()
|
| 137 |
+
def path_to_nvdisasm():
|
| 138 |
+
return _path_to_binary("nvdisasm")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@functools.lru_cache()
|
| 142 |
+
def compute_core_version_key():
|
| 143 |
+
import pkgutil
|
| 144 |
+
contents = []
|
| 145 |
+
# frontend
|
| 146 |
+
with open(__file__, "rb") as f:
|
| 147 |
+
contents += [hashlib.sha1(f.read()).hexdigest()]
|
| 148 |
+
# compiler
|
| 149 |
+
compiler_path = os.path.join(TRITON_PATH, 'compiler')
|
| 150 |
+
for lib in pkgutil.iter_modules([compiler_path]):
|
| 151 |
+
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:
|
| 152 |
+
contents += [hashlib.sha1(f.read()).hexdigest()]
|
| 153 |
+
# backend
|
| 154 |
+
libtriton_hash = hashlib.sha1()
|
| 155 |
+
with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f:
|
| 156 |
+
while True:
|
| 157 |
+
chunk = f.read(1024**2)
|
| 158 |
+
if not chunk:
|
| 159 |
+
break
|
| 160 |
+
libtriton_hash.update(chunk)
|
| 161 |
+
contents.append(libtriton_hash.hexdigest())
|
| 162 |
+
# language
|
| 163 |
+
language_path = os.path.join(TRITON_PATH, 'language')
|
| 164 |
+
for lib in pkgutil.iter_modules([language_path]):
|
| 165 |
+
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:
|
| 166 |
+
contents += [hashlib.sha1(f.read()).hexdigest()]
|
| 167 |
+
return '-'.join(TRITON_VERSION) + '-'.join(contents)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
_cached_cuda_version_key = None
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def get_cuda_version_key():
|
| 174 |
+
global _cached_cuda_version_key
|
| 175 |
+
if _cached_cuda_version_key is None:
|
| 176 |
+
key = compute_core_version_key()
|
| 177 |
+
try:
|
| 178 |
+
ptxas = path_to_ptxas()[0]
|
| 179 |
+
ptxas_version = subprocess.check_output([ptxas, "--version"])
|
| 180 |
+
except RuntimeError:
|
| 181 |
+
ptxas_version = b"NO_PTXAS"
|
| 182 |
+
_cached_cuda_version_key = key + '-' + hashlib.sha1(ptxas_version).hexdigest()
|
| 183 |
+
return _cached_cuda_version_key
|
evalkit_cambrian/lib/python3.10/site-packages/triton/common/build.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import functools
|
| 3 |
+
import io
|
| 4 |
+
import os
|
| 5 |
+
import shutil
|
| 6 |
+
import subprocess
|
| 7 |
+
import sys
|
| 8 |
+
import sysconfig
|
| 9 |
+
|
| 10 |
+
import setuptools
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# TODO: is_hip shouldn't be here
|
| 14 |
+
def is_hip():
|
| 15 |
+
import torch
|
| 16 |
+
return torch.version.hip is not None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@functools.lru_cache()
|
| 20 |
+
def libcuda_dirs():
|
| 21 |
+
env_libcuda_path = os.getenv("TRITON_LIBCUDA_PATH")
|
| 22 |
+
if env_libcuda_path:
|
| 23 |
+
return [env_libcuda_path]
|
| 24 |
+
|
| 25 |
+
libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode()
|
| 26 |
+
# each line looks like the following:
|
| 27 |
+
# libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1
|
| 28 |
+
locs = [line.split()[-1] for line in libs.splitlines() if "libcuda.so" in line]
|
| 29 |
+
dirs = [os.path.dirname(loc) for loc in locs]
|
| 30 |
+
env_ld_library_path = os.getenv("LD_LIBRARY_PATH")
|
| 31 |
+
if env_ld_library_path and not dirs:
|
| 32 |
+
dirs = [dir for dir in env_ld_library_path.split(":") if os.path.exists(os.path.join(dir, "libcuda.so"))]
|
| 33 |
+
msg = 'libcuda.so cannot found!\n'
|
| 34 |
+
if locs:
|
| 35 |
+
msg += 'Possible files are located at %s.' % str(locs)
|
| 36 |
+
msg += 'Please create a symlink of libcuda.so to any of the file.'
|
| 37 |
+
else:
|
| 38 |
+
msg += 'Please make sure GPU is setup and then run "/sbin/ldconfig"'
|
| 39 |
+
msg += ' (requires sudo) to refresh the linker cache.'
|
| 40 |
+
assert any(os.path.exists(os.path.join(path, 'libcuda.so')) for path in dirs), msg
|
| 41 |
+
return dirs
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@functools.lru_cache()
|
| 45 |
+
def rocm_path_dir():
|
| 46 |
+
return os.getenv("ROCM_PATH", default="/opt/rocm")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@contextlib.contextmanager
|
| 50 |
+
def quiet():
|
| 51 |
+
old_stdout, old_stderr = sys.stdout, sys.stderr
|
| 52 |
+
sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
|
| 53 |
+
try:
|
| 54 |
+
yield
|
| 55 |
+
finally:
|
| 56 |
+
sys.stdout, sys.stderr = old_stdout, old_stderr
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@functools.lru_cache()
|
| 60 |
+
def cuda_include_dir():
|
| 61 |
+
base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir)
|
| 62 |
+
cuda_path = os.path.join(base_dir, "third_party", "cuda")
|
| 63 |
+
return os.path.join(cuda_path, "include")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _build(name, src, srcdir):
|
| 67 |
+
if is_hip():
|
| 68 |
+
hip_lib_dir = os.path.join(rocm_path_dir(), "lib")
|
| 69 |
+
hip_include_dir = os.path.join(rocm_path_dir(), "include")
|
| 70 |
+
else:
|
| 71 |
+
cuda_lib_dirs = libcuda_dirs()
|
| 72 |
+
cu_include_dir = cuda_include_dir()
|
| 73 |
+
suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
| 74 |
+
so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix))
|
| 75 |
+
# try to avoid setuptools if possible
|
| 76 |
+
cc = os.environ.get("CC")
|
| 77 |
+
if cc is None:
|
| 78 |
+
# TODO: support more things here.
|
| 79 |
+
clang = shutil.which("clang")
|
| 80 |
+
gcc = shutil.which("gcc")
|
| 81 |
+
cc = gcc if gcc is not None else clang
|
| 82 |
+
if cc is None:
|
| 83 |
+
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
|
| 84 |
+
# This function was renamed and made public in Python 3.10
|
| 85 |
+
if hasattr(sysconfig, 'get_default_scheme'):
|
| 86 |
+
scheme = sysconfig.get_default_scheme()
|
| 87 |
+
else:
|
| 88 |
+
scheme = sysconfig._get_default_scheme()
|
| 89 |
+
# 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install
|
| 90 |
+
# path changes to include 'local'. This change is required to use triton with system-wide python.
|
| 91 |
+
if scheme == 'posix_local':
|
| 92 |
+
scheme = 'posix_prefix'
|
| 93 |
+
py_include_dir = sysconfig.get_paths(scheme=scheme)["include"]
|
| 94 |
+
|
| 95 |
+
if is_hip():
|
| 96 |
+
ret = subprocess.check_call([
|
| 97 |
+
cc, src, f"-I{hip_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC",
|
| 98 |
+
f"-L{hip_lib_dir}", "-lamdhip64", "-o", so
|
| 99 |
+
])
|
| 100 |
+
else:
|
| 101 |
+
cc_cmd = [
|
| 102 |
+
cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda",
|
| 103 |
+
"-o", so
|
| 104 |
+
]
|
| 105 |
+
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
|
| 106 |
+
ret = subprocess.check_call(cc_cmd)
|
| 107 |
+
|
| 108 |
+
if ret == 0:
|
| 109 |
+
return so
|
| 110 |
+
# fallback on setuptools
|
| 111 |
+
extra_compile_args = []
|
| 112 |
+
library_dirs = cuda_lib_dirs
|
| 113 |
+
include_dirs = [srcdir, cu_include_dir]
|
| 114 |
+
libraries = ['cuda']
|
| 115 |
+
# extra arguments
|
| 116 |
+
extra_link_args = []
|
| 117 |
+
# create extension module
|
| 118 |
+
ext = setuptools.Extension(
|
| 119 |
+
name=name,
|
| 120 |
+
language='c',
|
| 121 |
+
sources=[src],
|
| 122 |
+
include_dirs=include_dirs,
|
| 123 |
+
extra_compile_args=extra_compile_args + ['-O3'],
|
| 124 |
+
extra_link_args=extra_link_args,
|
| 125 |
+
library_dirs=library_dirs,
|
| 126 |
+
libraries=libraries,
|
| 127 |
+
)
|
| 128 |
+
# build extension module
|
| 129 |
+
args = ['build_ext']
|
| 130 |
+
args.append('--build-temp=' + srcdir)
|
| 131 |
+
args.append('--build-lib=' + srcdir)
|
| 132 |
+
args.append('-q')
|
| 133 |
+
args = dict(
|
| 134 |
+
name=name,
|
| 135 |
+
ext_modules=[ext],
|
| 136 |
+
script_args=args,
|
| 137 |
+
)
|
| 138 |
+
with quiet():
|
| 139 |
+
setuptools.setup(**args)
|
| 140 |
+
return so
|
evalkit_cambrian/lib/python3.10/site-packages/triton/compiler/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (445 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/compiler/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (9.97 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/core.py
ADDED
|
@@ -0,0 +1,1884 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from functools import partial, wraps
|
| 6 |
+
from typing import Callable, List, Sequence, TypeVar
|
| 7 |
+
|
| 8 |
+
from .._C.libtriton.triton import ir
|
| 9 |
+
from . import semantic
|
| 10 |
+
|
| 11 |
+
T = TypeVar('T')
|
| 12 |
+
|
| 13 |
+
TRITON_MAX_TENSOR_NUMEL = 1048576
|
| 14 |
+
|
| 15 |
+
TRITON_BUILTIN = "__triton_builtin__"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def builtin(fn: T) -> T:
|
| 19 |
+
"""Mark a function as a builtin."""
|
| 20 |
+
assert callable(fn)
|
| 21 |
+
|
| 22 |
+
@wraps(fn)
|
| 23 |
+
def wrapper(*args, **kwargs):
|
| 24 |
+
if "_builder" not in kwargs or kwargs["_builder"] is None:
|
| 25 |
+
raise ValueError("Did you forget to add @triton.jit ? "
|
| 26 |
+
"(`_builder` argument must be provided outside of JIT functions.)")
|
| 27 |
+
return fn(*args, **kwargs)
|
| 28 |
+
|
| 29 |
+
setattr(wrapper, TRITON_BUILTIN, True)
|
| 30 |
+
|
| 31 |
+
return wrapper
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def is_builtin(fn) -> bool:
|
| 35 |
+
"""Is this a registered triton builtin function?"""
|
| 36 |
+
return getattr(fn, TRITON_BUILTIN, False)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _to_tensor(x, builder):
|
| 40 |
+
if isinstance(x, bool):
|
| 41 |
+
return tensor(builder.get_int1(x), int1)
|
| 42 |
+
# Note: compile-time const integers are represented by unsigned values
|
| 43 |
+
elif isinstance(x, int):
|
| 44 |
+
if -2**31 <= x < 2**31:
|
| 45 |
+
return tensor(builder.get_int32(x), int32)
|
| 46 |
+
elif 2**31 <= x < 2**32:
|
| 47 |
+
return tensor(builder.get_uint32(x), uint32)
|
| 48 |
+
elif -2**63 <= x < 2**63:
|
| 49 |
+
return tensor(builder.get_int64(x), int64)
|
| 50 |
+
elif 2**63 <= x < 2**64:
|
| 51 |
+
return tensor(builder.get_uint64(x), uint64)
|
| 52 |
+
else:
|
| 53 |
+
raise RuntimeError(f'Nonrepresentable integer {x}.')
|
| 54 |
+
elif isinstance(x, float):
|
| 55 |
+
min_float32 = 2**-126
|
| 56 |
+
max_float32 = (2 - 2**-23) * 2**127
|
| 57 |
+
abs_x = __builtins__['abs'](x)
|
| 58 |
+
if abs_x == float("inf") or\
|
| 59 |
+
abs_x == 0.0 or \
|
| 60 |
+
x != x or \
|
| 61 |
+
min_float32 <= abs_x <= max_float32:
|
| 62 |
+
return tensor(builder.get_fp32(x), float32)
|
| 63 |
+
else:
|
| 64 |
+
return tensor(builder.get_fp64(x), float64)
|
| 65 |
+
|
| 66 |
+
elif isinstance(x, constexpr):
|
| 67 |
+
return _to_tensor(x.value, builder)
|
| 68 |
+
elif isinstance(x, tensor):
|
| 69 |
+
return x
|
| 70 |
+
assert False, f"cannot convert {x} of type {type(x)} to tensor"
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class dtype:
|
| 74 |
+
SINT_TYPES = ['int8', 'int16', 'int32', 'int64']
|
| 75 |
+
UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64']
|
| 76 |
+
FP_TYPES = ['fp8e4b15', 'fp8e4b15x4', 'fp8e4nv', 'fp8e5', 'fp16', 'bf16', 'fp32', 'fp64']
|
| 77 |
+
STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64']
|
| 78 |
+
OTHER_TYPES = ['void']
|
| 79 |
+
|
| 80 |
+
class SIGNEDNESS(Enum):
|
| 81 |
+
SIGNED = 0
|
| 82 |
+
UNSIGNED = 1
|
| 83 |
+
|
| 84 |
+
def __init__(self, name):
|
| 85 |
+
self.name = name
|
| 86 |
+
assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name
|
| 87 |
+
if name in dtype.SINT_TYPES:
|
| 88 |
+
self.int_signedness = dtype.SIGNEDNESS.SIGNED
|
| 89 |
+
self.int_bitwidth = int(name.split('int')[-1])
|
| 90 |
+
self.primitive_bitwidth = self.int_bitwidth
|
| 91 |
+
elif name in dtype.UINT_TYPES:
|
| 92 |
+
self.int_signedness = dtype.SIGNEDNESS.UNSIGNED
|
| 93 |
+
self.int_bitwidth = int(name.split('int')[-1])
|
| 94 |
+
self.primitive_bitwidth = self.int_bitwidth
|
| 95 |
+
elif name in dtype.FP_TYPES:
|
| 96 |
+
if name == 'fp8e4b15':
|
| 97 |
+
self.fp_mantissa_width = 3
|
| 98 |
+
self.primitive_bitwidth = 8
|
| 99 |
+
self.exponent_bias = 15
|
| 100 |
+
elif name == 'fp8e4b15x4':
|
| 101 |
+
self.fp_mantissa_width = 3
|
| 102 |
+
self.primitive_bitwidth = 8
|
| 103 |
+
self.exponent_bias = 15
|
| 104 |
+
elif name == 'fp8e4nv':
|
| 105 |
+
self.fp_mantissa_width = 3
|
| 106 |
+
self.primitive_bitwidth = 8
|
| 107 |
+
self.exponent_bias = 7
|
| 108 |
+
elif name == 'fp8e5':
|
| 109 |
+
self.fp_mantissa_width = 2
|
| 110 |
+
self.primitive_bitwidth = 8
|
| 111 |
+
self.exponent_bias = 15
|
| 112 |
+
elif name == 'fp16':
|
| 113 |
+
self.fp_mantissa_width = 10
|
| 114 |
+
self.primitive_bitwidth = 16
|
| 115 |
+
self.exponent_bias = 15
|
| 116 |
+
elif name == 'bf16':
|
| 117 |
+
self.fp_mantissa_width = 7
|
| 118 |
+
self.primitive_bitwidth = 16
|
| 119 |
+
self.exponent_bias = 127
|
| 120 |
+
elif name == 'fp32':
|
| 121 |
+
self.fp_mantissa_width = 23
|
| 122 |
+
self.primitive_bitwidth = 32
|
| 123 |
+
self.exponent_bias = 127
|
| 124 |
+
elif name == 'fp64':
|
| 125 |
+
self.fp_mantissa_width = 53
|
| 126 |
+
self.primitive_bitwidth = 64
|
| 127 |
+
self.exponent_bias = 1023
|
| 128 |
+
else:
|
| 129 |
+
raise RuntimeError(f'Unsupported floating-point type {name}')
|
| 130 |
+
elif name == 'void':
|
| 131 |
+
self.primitive_bitwidth = 0
|
| 132 |
+
|
| 133 |
+
def is_fp8(self):
|
| 134 |
+
return 'fp8' in self.name
|
| 135 |
+
|
| 136 |
+
def is_fp8e4nv(self):
|
| 137 |
+
return self.name == 'fp8e4nv'
|
| 138 |
+
|
| 139 |
+
def is_fp8e4b15(self):
|
| 140 |
+
return self.name == 'fp8e4b15'
|
| 141 |
+
|
| 142 |
+
def is_fp8e4b15x4(self):
|
| 143 |
+
return self.name == 'fp8e4b15x4'
|
| 144 |
+
|
| 145 |
+
def is_fp8e5(self):
|
| 146 |
+
return self.name == 'fp8e5'
|
| 147 |
+
|
| 148 |
+
def is_fp16(self):
|
| 149 |
+
return self.name == 'fp16'
|
| 150 |
+
|
| 151 |
+
def is_bf16(self):
|
| 152 |
+
return self.name == 'bf16'
|
| 153 |
+
|
| 154 |
+
def is_fp32(self):
|
| 155 |
+
return self.name == 'fp32'
|
| 156 |
+
|
| 157 |
+
def is_fp64(self):
|
| 158 |
+
return self.name == 'fp64'
|
| 159 |
+
|
| 160 |
+
def is_int1(self):
|
| 161 |
+
return self.name == 'int1'
|
| 162 |
+
|
| 163 |
+
def is_int8(self):
|
| 164 |
+
return self.name == 'int8'
|
| 165 |
+
|
| 166 |
+
def is_int16(self):
|
| 167 |
+
return self.name == 'int16'
|
| 168 |
+
|
| 169 |
+
def is_int32(self):
|
| 170 |
+
return self.name == 'int32'
|
| 171 |
+
|
| 172 |
+
def is_int64(self):
|
| 173 |
+
return self.name == 'int64'
|
| 174 |
+
|
| 175 |
+
def is_uint8(self):
|
| 176 |
+
return self.name == 'uint8'
|
| 177 |
+
|
| 178 |
+
def is_uint16(self):
|
| 179 |
+
return self.name == 'uint16'
|
| 180 |
+
|
| 181 |
+
def is_uint32(self):
|
| 182 |
+
return self.name == 'uint32'
|
| 183 |
+
|
| 184 |
+
def is_uint64(self):
|
| 185 |
+
return self.name == 'uint64'
|
| 186 |
+
|
| 187 |
+
def is_floating(self):
|
| 188 |
+
return self.name in dtype.FP_TYPES
|
| 189 |
+
|
| 190 |
+
def is_standard_floating(self):
|
| 191 |
+
return self.name in dtype.STANDARD_FP_TYPES
|
| 192 |
+
|
| 193 |
+
def is_int_signed(self):
|
| 194 |
+
return self.name in dtype.SINT_TYPES
|
| 195 |
+
|
| 196 |
+
def is_int_unsigned(self):
|
| 197 |
+
return self.name in dtype.UINT_TYPES
|
| 198 |
+
|
| 199 |
+
def is_int(self):
|
| 200 |
+
return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES
|
| 201 |
+
|
| 202 |
+
def is_bool(self):
|
| 203 |
+
return self.is_int1()
|
| 204 |
+
|
| 205 |
+
@staticmethod
|
| 206 |
+
def is_dtype(type_str):
|
| 207 |
+
return type_str in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES
|
| 208 |
+
|
| 209 |
+
@staticmethod
|
| 210 |
+
def is_void():
|
| 211 |
+
raise RuntimeError("Not implemented")
|
| 212 |
+
|
| 213 |
+
@staticmethod
|
| 214 |
+
def is_block():
|
| 215 |
+
return False
|
| 216 |
+
|
| 217 |
+
@staticmethod
|
| 218 |
+
def is_ptr():
|
| 219 |
+
return False
|
| 220 |
+
|
| 221 |
+
def __eq__(self, other: dtype):
|
| 222 |
+
if not isinstance(other, dtype):
|
| 223 |
+
return False
|
| 224 |
+
return self.name == other.name
|
| 225 |
+
|
| 226 |
+
def __ne__(self, other: dtype):
|
| 227 |
+
return not self.__eq__(other)
|
| 228 |
+
|
| 229 |
+
def __hash__(self):
|
| 230 |
+
return hash((self.name, ))
|
| 231 |
+
|
| 232 |
+
@property
|
| 233 |
+
def scalar(self):
|
| 234 |
+
return self
|
| 235 |
+
|
| 236 |
+
def to_ir(self, builder: ir.builder) -> ir.type:
|
| 237 |
+
if self.name == 'void':
|
| 238 |
+
return builder.get_void_ty()
|
| 239 |
+
elif self.name == 'int1':
|
| 240 |
+
return builder.get_int1_ty()
|
| 241 |
+
elif self.name in ('int8', 'uint8'):
|
| 242 |
+
return builder.get_int8_ty()
|
| 243 |
+
elif self.name in ('int16', 'uint16'):
|
| 244 |
+
return builder.get_int16_ty()
|
| 245 |
+
elif self.name in ('int32', 'uint32'):
|
| 246 |
+
return builder.get_int32_ty()
|
| 247 |
+
elif self.name in ('int64', 'uint64'):
|
| 248 |
+
return builder.get_int64_ty()
|
| 249 |
+
elif self.name == 'fp8e5':
|
| 250 |
+
return builder.get_fp8e5_ty()
|
| 251 |
+
elif self.name == 'fp8e4nv':
|
| 252 |
+
return builder.get_fp8e4nv_ty()
|
| 253 |
+
elif self.name == 'fp8e4b15':
|
| 254 |
+
return builder.get_fp8e4b15_ty()
|
| 255 |
+
elif self.name == 'fp8e4b15x4':
|
| 256 |
+
return builder.get_fp8e4b15x4_ty()
|
| 257 |
+
elif self.name == 'fp16':
|
| 258 |
+
return builder.get_half_ty()
|
| 259 |
+
elif self.name == 'bf16':
|
| 260 |
+
return builder.get_bf16_ty()
|
| 261 |
+
elif self.name == 'fp32':
|
| 262 |
+
return builder.get_float_ty()
|
| 263 |
+
elif self.name == 'fp64':
|
| 264 |
+
return builder.get_double_ty()
|
| 265 |
+
raise ValueError(f'fail to convert {self} to ir type')
|
| 266 |
+
|
| 267 |
+
def __str__(self):
|
| 268 |
+
return self.name
|
| 269 |
+
|
| 270 |
+
@property
|
| 271 |
+
def cache_key_part(self) -> str:
|
| 272 |
+
"""See cache_key_part() in triton.cc."""
|
| 273 |
+
return self.name
|
| 274 |
+
|
| 275 |
+
def __repr__(self):
|
| 276 |
+
return f'triton.language.{str(self)}'
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class pointer_type(dtype):
|
| 280 |
+
|
| 281 |
+
def __init__(self, element_ty: dtype, address_space: int = 1):
|
| 282 |
+
if not isinstance(element_ty, dtype):
|
| 283 |
+
raise TypeError('element_ty is a {type(element_ty).__name__}.')
|
| 284 |
+
self.element_ty = element_ty
|
| 285 |
+
self.address_space = address_space
|
| 286 |
+
|
| 287 |
+
self.name = self.__str__()
|
| 288 |
+
|
| 289 |
+
def to_ir(self, builder: ir.builder) -> ir.pointer_type:
|
| 290 |
+
return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1)
|
| 291 |
+
|
| 292 |
+
def __str__(self):
|
| 293 |
+
return f'pointer<{self.element_ty}>'
|
| 294 |
+
|
| 295 |
+
def __repr__(self):
|
| 296 |
+
return self.__str__()
|
| 297 |
+
|
| 298 |
+
def is_ptr(self):
|
| 299 |
+
return True
|
| 300 |
+
|
| 301 |
+
def __eq__(self, other: pointer_type) -> bool:
|
| 302 |
+
if not isinstance(other, pointer_type):
|
| 303 |
+
return False
|
| 304 |
+
return self.element_ty == other.element_ty and self.address_space == other.address_space
|
| 305 |
+
|
| 306 |
+
def __ne__(self, other: pointer_type) -> bool:
|
| 307 |
+
return not self.__eq__(other)
|
| 308 |
+
|
| 309 |
+
@property
|
| 310 |
+
def scalar(self):
|
| 311 |
+
return self
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class block_type(dtype):
|
| 315 |
+
|
| 316 |
+
def __init__(self, element_ty: dtype, shape: List):
|
| 317 |
+
self.element_ty = element_ty
|
| 318 |
+
|
| 319 |
+
# Note that block_type's shape is a list of int
|
| 320 |
+
# while tensor's shape is a list of constexpr.
|
| 321 |
+
|
| 322 |
+
# shape can be empty ([]) when an input is a 0D tensor.
|
| 323 |
+
if not shape:
|
| 324 |
+
raise TypeError('0d block_type is forbidden')
|
| 325 |
+
if isinstance(shape[0], constexpr):
|
| 326 |
+
shape = [s.value for s in shape]
|
| 327 |
+
|
| 328 |
+
self.shape = shape
|
| 329 |
+
self.numel = 1
|
| 330 |
+
for s in self.shape:
|
| 331 |
+
self.numel *= s
|
| 332 |
+
if self.numel > TRITON_MAX_TENSOR_NUMEL:
|
| 333 |
+
raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})")
|
| 334 |
+
|
| 335 |
+
self.name = self.__str__()
|
| 336 |
+
|
| 337 |
+
def to_ir(self, builder: ir.builder) -> ir.block_type:
|
| 338 |
+
return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape)
|
| 339 |
+
|
| 340 |
+
def __str__(self):
|
| 341 |
+
return f'<{self.shape}, {self.element_ty}>'
|
| 342 |
+
|
| 343 |
+
def __repr__(self):
|
| 344 |
+
return self.__str__()
|
| 345 |
+
|
| 346 |
+
def is_block(self):
|
| 347 |
+
return True
|
| 348 |
+
|
| 349 |
+
def get_block_shapes(self) -> List[int]:
|
| 350 |
+
return self.shape
|
| 351 |
+
|
| 352 |
+
def __eq__(self, other: block_type) -> bool:
|
| 353 |
+
if not isinstance(other, block_type):
|
| 354 |
+
return False
|
| 355 |
+
return self.element_ty == other.element_ty and self.shape == other.shape
|
| 356 |
+
|
| 357 |
+
def __ne__(self, other: block_type) -> bool:
|
| 358 |
+
return not self.__eq__(other)
|
| 359 |
+
|
| 360 |
+
@property
|
| 361 |
+
def scalar(self):
|
| 362 |
+
return self.element_ty
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class function_type(dtype):
|
| 366 |
+
|
| 367 |
+
def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None:
|
| 368 |
+
self.ret_types = ret_types
|
| 369 |
+
self.param_types = param_types
|
| 370 |
+
|
| 371 |
+
def __str__(self):
|
| 372 |
+
return f'fn ({self.param_types}) -> {self.ret_types}'
|
| 373 |
+
|
| 374 |
+
def to_ir(self, builder: ir.builder):
|
| 375 |
+
ir_param_types = [ty.to_ir(builder) for ty in self.param_types]
|
| 376 |
+
ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types]
|
| 377 |
+
return builder.get_function_ty(ir_param_types, ret_types)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
# scalar types
|
| 381 |
+
void = dtype('void')
|
| 382 |
+
int1 = dtype('int1')
|
| 383 |
+
int8 = dtype('int8')
|
| 384 |
+
int16 = dtype('int16')
|
| 385 |
+
int32 = dtype('int32')
|
| 386 |
+
int64 = dtype('int64')
|
| 387 |
+
uint8 = dtype('uint8')
|
| 388 |
+
uint16 = dtype('uint16')
|
| 389 |
+
uint32 = dtype('uint32')
|
| 390 |
+
uint64 = dtype('uint64')
|
| 391 |
+
float8e5 = dtype('fp8e5')
|
| 392 |
+
float8e4nv = dtype('fp8e4nv')
|
| 393 |
+
float8e4b15 = dtype('fp8e4b15')
|
| 394 |
+
float8e4b15x4 = dtype('fp8e4b15x4')
|
| 395 |
+
float16 = dtype('fp16')
|
| 396 |
+
bfloat16 = dtype('bf16')
|
| 397 |
+
float32 = dtype('fp32')
|
| 398 |
+
float64 = dtype('fp64')
|
| 399 |
+
# pointer types
|
| 400 |
+
pi32_t = pointer_type(int32)
|
| 401 |
+
|
| 402 |
+
# -----------------------
|
| 403 |
+
# constexpr
|
| 404 |
+
# -----------------------
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
class constexpr:
|
| 408 |
+
"""
|
| 409 |
+
This class is used to store a value that is known at compile-time.
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
def __init__(self, value):
|
| 413 |
+
if isinstance(value, constexpr):
|
| 414 |
+
self.value = value.value
|
| 415 |
+
else:
|
| 416 |
+
self.value = value
|
| 417 |
+
|
| 418 |
+
def __repr__(self) -> str:
|
| 419 |
+
return f"constexpr[{self.value}]"
|
| 420 |
+
|
| 421 |
+
def __index__(self):
|
| 422 |
+
return self.value
|
| 423 |
+
|
| 424 |
+
def __add__(self, other):
|
| 425 |
+
return constexpr(self.value + other.value)
|
| 426 |
+
|
| 427 |
+
def __radd__(self, other):
|
| 428 |
+
return constexpr(other.value + self.value)
|
| 429 |
+
|
| 430 |
+
def __sub__(self, other):
|
| 431 |
+
return constexpr(self.value - other.value)
|
| 432 |
+
|
| 433 |
+
def __rsub__(self, other):
|
| 434 |
+
return constexpr(other.value - self.value)
|
| 435 |
+
|
| 436 |
+
def __mul__(self, other):
|
| 437 |
+
return constexpr(self.value * other.value)
|
| 438 |
+
|
| 439 |
+
def __mod__(self, other):
|
| 440 |
+
return constexpr(self.value % other.value)
|
| 441 |
+
|
| 442 |
+
def __rmul__(self, other):
|
| 443 |
+
return constexpr(other.value * self.value)
|
| 444 |
+
|
| 445 |
+
def __truediv__(self, other):
|
| 446 |
+
return constexpr(self.value / other.value)
|
| 447 |
+
|
| 448 |
+
def __rtruediv__(self, other):
|
| 449 |
+
return constexpr(other.value / self.value)
|
| 450 |
+
|
| 451 |
+
def __floordiv__(self, other):
|
| 452 |
+
return constexpr(self.value // other.value)
|
| 453 |
+
|
| 454 |
+
def __rfloordiv__(self, other):
|
| 455 |
+
return constexpr(other.value // self.value)
|
| 456 |
+
|
| 457 |
+
def __gt__(self, other):
|
| 458 |
+
return constexpr(self.value > other.value)
|
| 459 |
+
|
| 460 |
+
def __rgt__(self, other):
|
| 461 |
+
return constexpr(other.value > self.value)
|
| 462 |
+
|
| 463 |
+
def __ge__(self, other):
|
| 464 |
+
return constexpr(self.value >= other.value)
|
| 465 |
+
|
| 466 |
+
def __rge__(self, other):
|
| 467 |
+
return constexpr(other.value >= self.value)
|
| 468 |
+
|
| 469 |
+
def __lt__(self, other):
|
| 470 |
+
return constexpr(self.value < other.value)
|
| 471 |
+
|
| 472 |
+
def __rlt__(self, other):
|
| 473 |
+
return constexpr(other.value < self.value)
|
| 474 |
+
|
| 475 |
+
def __le__(self, other):
|
| 476 |
+
return constexpr(self.value <= other.value)
|
| 477 |
+
|
| 478 |
+
def __rle__(self, other):
|
| 479 |
+
return constexpr(other.value <= self.value)
|
| 480 |
+
|
| 481 |
+
def __eq__(self, other):
|
| 482 |
+
return constexpr(self.value == other.value)
|
| 483 |
+
|
| 484 |
+
def __ne__(self, other):
|
| 485 |
+
return constexpr(self.value != other.value)
|
| 486 |
+
|
| 487 |
+
def __bool__(self):
|
| 488 |
+
return bool(self.value)
|
| 489 |
+
|
| 490 |
+
def __neg__(self):
|
| 491 |
+
return constexpr(-self.value)
|
| 492 |
+
|
| 493 |
+
def __and__(self, other):
|
| 494 |
+
return constexpr(self.value & other.value)
|
| 495 |
+
|
| 496 |
+
def logical_and(self, other):
|
| 497 |
+
return constexpr(self.value and other.value)
|
| 498 |
+
|
| 499 |
+
def __or__(self, other):
|
| 500 |
+
return constexpr(self.value | other.value)
|
| 501 |
+
|
| 502 |
+
def __xor__(self, other):
|
| 503 |
+
return constexpr(self.value ^ other.value)
|
| 504 |
+
|
| 505 |
+
def logical_or(self, other):
|
| 506 |
+
return constexpr(self.value or other.value)
|
| 507 |
+
|
| 508 |
+
def __pos__(self):
|
| 509 |
+
return constexpr(+self.value)
|
| 510 |
+
|
| 511 |
+
def __invert__(self):
|
| 512 |
+
return constexpr(~self.value)
|
| 513 |
+
|
| 514 |
+
def __pow__(self, other):
|
| 515 |
+
return constexpr(self.value**other.value)
|
| 516 |
+
|
| 517 |
+
def __rshift__(self, other):
|
| 518 |
+
return constexpr(self.value >> other.value)
|
| 519 |
+
|
| 520 |
+
def __lshift__(self, other):
|
| 521 |
+
return constexpr(self.value << other.value)
|
| 522 |
+
|
| 523 |
+
def __not__(self):
|
| 524 |
+
return constexpr(not self.value)
|
| 525 |
+
|
| 526 |
+
def __call__(self, *args, **kwds):
|
| 527 |
+
return self.value(*args, **kwds)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
class tensor:
|
| 531 |
+
|
| 532 |
+
def __init__(self, handle, type: dtype):
|
| 533 |
+
# IR handle
|
| 534 |
+
self.handle = handle
|
| 535 |
+
# Block shape
|
| 536 |
+
self.shape = type.shape if type.is_block() else ()
|
| 537 |
+
self.numel = 1
|
| 538 |
+
for s in self.shape:
|
| 539 |
+
self.numel *= s
|
| 540 |
+
self.numel = constexpr(self.numel)
|
| 541 |
+
self.type = type # Tensor type (can be block_type)
|
| 542 |
+
# Following the practice in pytorch, dtype is scalar type
|
| 543 |
+
self.dtype = type.scalar
|
| 544 |
+
self.shape = [constexpr(s) for s in self.shape]
|
| 545 |
+
|
| 546 |
+
def __str__(self) -> str:
|
| 547 |
+
# ex. "float32[16, 32]"
|
| 548 |
+
return str(self.dtype) + '[' + ', '.join(str(s) for s in self.shape) + ']'
|
| 549 |
+
|
| 550 |
+
@builtin
|
| 551 |
+
def __add__(self, other, _builder=None):
|
| 552 |
+
other = _to_tensor(other, _builder)
|
| 553 |
+
return semantic.add(self, other, _builder)
|
| 554 |
+
|
| 555 |
+
@builtin
|
| 556 |
+
def __radd__(self, other, _builder=None):
|
| 557 |
+
return self.__add__(other, _builder=_builder)
|
| 558 |
+
|
| 559 |
+
@builtin
|
| 560 |
+
def __sub__(self, other, _builder=None):
|
| 561 |
+
other = _to_tensor(other, _builder)
|
| 562 |
+
return semantic.sub(self, other, _builder)
|
| 563 |
+
|
| 564 |
+
@builtin
|
| 565 |
+
def __rsub__(self, other, _builder=None):
|
| 566 |
+
other = _to_tensor(other, _builder)
|
| 567 |
+
return semantic.sub(other, self, _builder)
|
| 568 |
+
|
| 569 |
+
@builtin
|
| 570 |
+
def __mul__(self, other, _builder=None):
|
| 571 |
+
other = _to_tensor(other, _builder)
|
| 572 |
+
return semantic.mul(self, other, _builder)
|
| 573 |
+
|
| 574 |
+
@builtin
|
| 575 |
+
def __rmul__(self, other, _builder=None):
|
| 576 |
+
return self.__mul__(other, _builder=_builder)
|
| 577 |
+
|
| 578 |
+
@builtin
|
| 579 |
+
def __truediv__(self, other, _builder=None):
|
| 580 |
+
other = _to_tensor(other, _builder)
|
| 581 |
+
return semantic.truediv(self, other, _builder)
|
| 582 |
+
|
| 583 |
+
@builtin
|
| 584 |
+
def __rtruediv__(self, other, _builder=None):
|
| 585 |
+
other = _to_tensor(other, _builder)
|
| 586 |
+
return semantic.truediv(other, self, _builder)
|
| 587 |
+
|
| 588 |
+
@builtin
|
| 589 |
+
def __floordiv__(self, other, _builder=None):
|
| 590 |
+
other = _to_tensor(other, _builder)
|
| 591 |
+
return semantic.floordiv(self, other, _builder)
|
| 592 |
+
|
| 593 |
+
@builtin
|
| 594 |
+
def __rfloordiv__(self, other, _builder=None):
|
| 595 |
+
other = _to_tensor(other, _builder)
|
| 596 |
+
return semantic.floordiv(other, self, _builder)
|
| 597 |
+
|
| 598 |
+
@builtin
|
| 599 |
+
def __mod__(self, other, _builder=None):
|
| 600 |
+
other = _to_tensor(other, _builder)
|
| 601 |
+
return semantic.mod(self, other, _builder)
|
| 602 |
+
|
| 603 |
+
@builtin
|
| 604 |
+
def __rmod__(self, other, _builder=None):
|
| 605 |
+
other = _to_tensor(other, _builder)
|
| 606 |
+
return semantic.mod(other, self, _builder)
|
| 607 |
+
|
| 608 |
+
# unary operators
|
| 609 |
+
@builtin
|
| 610 |
+
def __neg__(self, _builder=None):
|
| 611 |
+
return semantic.minus(self, _builder)
|
| 612 |
+
|
| 613 |
+
@builtin
|
| 614 |
+
def __invert__(self, _builder=None):
|
| 615 |
+
return semantic.invert(self, _builder)
|
| 616 |
+
|
| 617 |
+
# bitwise operators
|
| 618 |
+
|
| 619 |
+
@builtin
|
| 620 |
+
def __and__(self, other, _builder=None):
|
| 621 |
+
other = _to_tensor(other, _builder)
|
| 622 |
+
return semantic.and_(self, other, _builder)
|
| 623 |
+
|
| 624 |
+
@builtin
|
| 625 |
+
def __rand__(self, other, _builder=None):
|
| 626 |
+
other = _to_tensor(other, _builder)
|
| 627 |
+
return semantic.and_(other, self, _builder)
|
| 628 |
+
|
| 629 |
+
@builtin
|
| 630 |
+
def __or__(self, other, _builder=None):
|
| 631 |
+
other = _to_tensor(other, _builder)
|
| 632 |
+
return semantic.or_(self, other, _builder)
|
| 633 |
+
|
| 634 |
+
@builtin
|
| 635 |
+
def __ror__(self, other, _builder=None):
|
| 636 |
+
other = _to_tensor(other, _builder)
|
| 637 |
+
return semantic.or_(other, self, _builder)
|
| 638 |
+
|
| 639 |
+
@builtin
|
| 640 |
+
def __xor__(self, other, _builder=None):
|
| 641 |
+
other = _to_tensor(other, _builder)
|
| 642 |
+
return semantic.xor_(self, other, _builder)
|
| 643 |
+
|
| 644 |
+
@builtin
|
| 645 |
+
def __rxor__(self, other, _builder=None):
|
| 646 |
+
other = _to_tensor(other, _builder)
|
| 647 |
+
return semantic.xor_(other, self, _builder)
|
| 648 |
+
|
| 649 |
+
@builtin
|
| 650 |
+
def __lshift__(self, other, _builder=None):
|
| 651 |
+
other = _to_tensor(other, _builder)
|
| 652 |
+
return semantic.shl(self, other, _builder)
|
| 653 |
+
|
| 654 |
+
@builtin
|
| 655 |
+
def __rlshift__(self, other, _builder=None):
|
| 656 |
+
other = _to_tensor(other, _builder)
|
| 657 |
+
return semantic.shl(other, self, _builder)
|
| 658 |
+
|
| 659 |
+
@builtin
|
| 660 |
+
def __rshift__(self, other, _builder=None):
|
| 661 |
+
other = _to_tensor(other, _builder)
|
| 662 |
+
if self.dtype.is_int_signed():
|
| 663 |
+
return semantic.ashr(self, other, _builder)
|
| 664 |
+
else:
|
| 665 |
+
return semantic.lshr(self, other, _builder)
|
| 666 |
+
|
| 667 |
+
@builtin
|
| 668 |
+
def __rrshift__(self, other, _builder=None):
|
| 669 |
+
other = _to_tensor(other, _builder)
|
| 670 |
+
if self.dtype.is_int_signed():
|
| 671 |
+
return semantic.ashr(other, self, _builder)
|
| 672 |
+
else:
|
| 673 |
+
return semantic.lshr(other, self, _builder)
|
| 674 |
+
|
| 675 |
+
# >
|
| 676 |
+
@builtin
|
| 677 |
+
def __gt__(self, other, _builder=None):
|
| 678 |
+
other = _to_tensor(other, _builder)
|
| 679 |
+
return semantic.greater_than(self, other, _builder)
|
| 680 |
+
|
| 681 |
+
@builtin
|
| 682 |
+
def __rgt__(self, other, _builder=None):
|
| 683 |
+
other = _to_tensor(other, _builder)
|
| 684 |
+
return semantic.greater_than(other, self, _builder)
|
| 685 |
+
|
| 686 |
+
# >=
|
| 687 |
+
@builtin
|
| 688 |
+
def __ge__(self, other, _builder=None):
|
| 689 |
+
other = _to_tensor(other, _builder)
|
| 690 |
+
return semantic.greater_equal(self, other, _builder)
|
| 691 |
+
|
| 692 |
+
@builtin
|
| 693 |
+
def __rge__(self, other, _builder=None):
|
| 694 |
+
other = _to_tensor(other, _builder)
|
| 695 |
+
return semantic.greater_equal(other, self, _builder)
|
| 696 |
+
|
| 697 |
+
# <
|
| 698 |
+
@builtin
|
| 699 |
+
def __lt__(self, other, _builder=None):
|
| 700 |
+
other = _to_tensor(other, _builder)
|
| 701 |
+
return semantic.less_than(self, other, _builder)
|
| 702 |
+
|
| 703 |
+
@builtin
|
| 704 |
+
def __rlt__(self, other, _builder=None):
|
| 705 |
+
other = _to_tensor(other, _builder)
|
| 706 |
+
return semantic.less_than(other, self, _builder)
|
| 707 |
+
|
| 708 |
+
# <=
|
| 709 |
+
@builtin
|
| 710 |
+
def __le__(self, other, _builder=None):
|
| 711 |
+
other = _to_tensor(other, _builder)
|
| 712 |
+
return semantic.less_equal(self, other, _builder)
|
| 713 |
+
|
| 714 |
+
@builtin
|
| 715 |
+
def __rle__(self, other, _builder=None):
|
| 716 |
+
other = _to_tensor(other, _builder)
|
| 717 |
+
return semantic.less_equal(other, self, _builder)
|
| 718 |
+
|
| 719 |
+
# ==
|
| 720 |
+
@builtin
|
| 721 |
+
def __eq__(self, other, _builder=None):
|
| 722 |
+
other = _to_tensor(other, _builder)
|
| 723 |
+
return semantic.equal(self, other, _builder)
|
| 724 |
+
|
| 725 |
+
@builtin
|
| 726 |
+
def __req__(self, other, _builder=None):
|
| 727 |
+
other = _to_tensor(other, _builder)
|
| 728 |
+
return semantic.equal(other, self, _builder)
|
| 729 |
+
|
| 730 |
+
@builtin
|
| 731 |
+
def __ne__(self, other, _builder=None):
|
| 732 |
+
other = _to_tensor(other, _builder)
|
| 733 |
+
return semantic.not_equal(self, other, _builder)
|
| 734 |
+
|
| 735 |
+
@builtin
|
| 736 |
+
def __rne__(self, other, _builder=None):
|
| 737 |
+
other = _to_tensor(other, _builder)
|
| 738 |
+
return semantic.not_equal(other, self, _builder)
|
| 739 |
+
|
| 740 |
+
@builtin
|
| 741 |
+
def logical_and(self, other, _builder=None):
|
| 742 |
+
other = _to_tensor(other, _builder)
|
| 743 |
+
return semantic.logical_and(self, other, _builder)
|
| 744 |
+
|
| 745 |
+
@builtin
|
| 746 |
+
def logical_or(self, other, _builder=None):
|
| 747 |
+
other = _to_tensor(other, _builder)
|
| 748 |
+
return semantic.logical_or(self, other, _builder)
|
| 749 |
+
|
| 750 |
+
# note: __not__ isn't actually a magic method in python
|
| 751 |
+
# but it's ok because our ASTVisitor handles it
|
| 752 |
+
@builtin
|
| 753 |
+
def __not__(self, _builder=None):
|
| 754 |
+
return semantic.not_(self, _builder)
|
| 755 |
+
|
| 756 |
+
@builtin
|
| 757 |
+
def __getitem__(self, slices, _builder=None):
|
| 758 |
+
if isinstance(slices, (slice, constexpr)):
|
| 759 |
+
slices = [slices]
|
| 760 |
+
ret = self
|
| 761 |
+
for dim, sl in enumerate(slices):
|
| 762 |
+
if sl is None or isinstance(sl, constexpr) and sl.value is None:
|
| 763 |
+
ret = semantic.expand_dims(ret, dim, _builder)
|
| 764 |
+
elif isinstance(sl, slice) and sl.start is None and sl.stop is None and sl.step is None:
|
| 765 |
+
pass
|
| 766 |
+
else:
|
| 767 |
+
assert False, f"unsupported tensor index: {sl}"
|
| 768 |
+
return ret
|
| 769 |
+
|
| 770 |
+
@property
|
| 771 |
+
def T(self):
|
| 772 |
+
assert False, "Transposition must be created by the AST Visitor"
|
| 773 |
+
|
| 774 |
+
@builtin
|
| 775 |
+
def to(self, dtype, bitcast=False, _builder=None):
|
| 776 |
+
if isinstance(bitcast, constexpr):
|
| 777 |
+
bitcast = bitcast.value
|
| 778 |
+
if bitcast:
|
| 779 |
+
return semantic.bitcast(self, dtype, _builder)
|
| 780 |
+
return semantic.cast(self, dtype, _builder)
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
# -----------------------
|
| 784 |
+
# SPMD Programming Model
|
| 785 |
+
# -----------------------
|
| 786 |
+
def _constexpr_to_value(v):
|
| 787 |
+
if isinstance(v, constexpr):
|
| 788 |
+
return v.value
|
| 789 |
+
return v
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
@builtin
|
| 793 |
+
def program_id(axis, _builder=None):
|
| 794 |
+
"""
|
| 795 |
+
Returns the id of the current program instance along the given :code:`axis`.
|
| 796 |
+
|
| 797 |
+
:param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2.
|
| 798 |
+
:type axis: int
|
| 799 |
+
"""
|
| 800 |
+
# if axis == -1:
|
| 801 |
+
# pid0 = program_id(0, _builder)
|
| 802 |
+
# pid1 = program_id(1, _builder)
|
| 803 |
+
# pid2 = program_id(2, _builder)
|
| 804 |
+
# npg0 = num_programs(0, _builder)
|
| 805 |
+
# npg1 = num_programs(0, _builder)
|
| 806 |
+
# return pid0 + pid1*npg0 + pid2*npg0*npg1
|
| 807 |
+
axis = _constexpr_to_value(axis)
|
| 808 |
+
return semantic.program_id(axis, _builder)
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
@builtin
|
| 812 |
+
def num_programs(axis, _builder=None):
|
| 813 |
+
"""
|
| 814 |
+
Returns the number of program instances launched along the given :code:`axis`.
|
| 815 |
+
|
| 816 |
+
:param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2.
|
| 817 |
+
:type axis: int
|
| 818 |
+
"""
|
| 819 |
+
axis = _constexpr_to_value(axis)
|
| 820 |
+
return semantic.num_programs(axis, _builder)
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
# -----------------------
|
| 824 |
+
# Block Initialization
|
| 825 |
+
# -----------------------
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
@builtin
|
| 829 |
+
def arange(start, end, _builder=None):
|
| 830 |
+
"""
|
| 831 |
+
Returns contiguous values within the left-closed and right-open interval [:code:`start`, :code:`end`). \
|
| 832 |
+
End - Start must be less than or equal to TRITON_MAX_TENSOR_NUMEL = 131072
|
| 833 |
+
|
| 834 |
+
:param start: Start of the interval. Must be a power of two.
|
| 835 |
+
:type start: int32
|
| 836 |
+
:param end: End of the interval. Must be a power of two > start.
|
| 837 |
+
:type end: int32
|
| 838 |
+
"""
|
| 839 |
+
start = _constexpr_to_value(start)
|
| 840 |
+
end = _constexpr_to_value(end)
|
| 841 |
+
return semantic.arange(start, end, _builder)
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
def _shape_check_impl(shape):
|
| 845 |
+
shape = _constexpr_to_value(shape)
|
| 846 |
+
for i, d in enumerate(shape):
|
| 847 |
+
if isinstance(d, int):
|
| 848 |
+
d = constexpr(d)
|
| 849 |
+
if not isinstance(d, constexpr):
|
| 850 |
+
raise TypeError(f"Shape element {i} must have type `constexpr`")
|
| 851 |
+
if not isinstance(d.value, int):
|
| 852 |
+
raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
|
| 853 |
+
return [_constexpr_to_value(x) for x in shape]
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
@builtin
|
| 857 |
+
def full(shape, value, dtype, _builder=None):
|
| 858 |
+
"""
|
| 859 |
+
Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`.
|
| 860 |
+
|
| 861 |
+
:param shape: Shape of the new array, e.g., (8, 16) or (8, )
|
| 862 |
+
:value value: A scalar value to fill the array with
|
| 863 |
+
:type shape: tuple of ints
|
| 864 |
+
:param dtype: Data-type of the new array, e.g., :code:`tl.float16`
|
| 865 |
+
:type dtype: DType
|
| 866 |
+
"""
|
| 867 |
+
shape = _shape_check_impl(shape)
|
| 868 |
+
value = _constexpr_to_value(value)
|
| 869 |
+
dtype = _constexpr_to_value(dtype)
|
| 870 |
+
return semantic.full(shape, value, dtype, _builder)
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
# -----------------------
|
| 874 |
+
# Shape Manipulation
|
| 875 |
+
# -----------------------
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
@builtin
|
| 879 |
+
def broadcast(input, other, _builder=None):
|
| 880 |
+
"""
|
| 881 |
+
Tries to broadcast the two given blocks to a common compatible shape.
|
| 882 |
+
|
| 883 |
+
:param input: The first input tensor.
|
| 884 |
+
:type input: Block
|
| 885 |
+
:param other: The second input tensor.
|
| 886 |
+
:type other: Block
|
| 887 |
+
"""
|
| 888 |
+
return semantic.broadcast_impl_value(input, other, _builder)
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
@builtin
|
| 892 |
+
def broadcast_to(input, shape, _builder=None):
|
| 893 |
+
"""
|
| 894 |
+
Tries to broadcast the given tensor to a new :code:`shape`.
|
| 895 |
+
|
| 896 |
+
:param input: The input tensor.
|
| 897 |
+
:type input: Block
|
| 898 |
+
:param shape: The desired shape.
|
| 899 |
+
:type shape: Tuple[int]
|
| 900 |
+
"""
|
| 901 |
+
shape = _shape_check_impl(shape)
|
| 902 |
+
return semantic.broadcast_impl_shape(input, shape, _builder)
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
@builtin
|
| 906 |
+
def trans(input, _builder=None):
|
| 907 |
+
"""
|
| 908 |
+
Returns a transposed tensor.
|
| 909 |
+
|
| 910 |
+
:param input: The input tensor.
|
| 911 |
+
:type input:
|
| 912 |
+
"""
|
| 913 |
+
return semantic.trans(input, _builder)
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
@builtin
|
| 917 |
+
def cat(input, other, can_reorder=False, _builder=None):
|
| 918 |
+
"""
|
| 919 |
+
Concatenate the given blocks
|
| 920 |
+
|
| 921 |
+
:param input: The first input tensor.
|
| 922 |
+
:type input:
|
| 923 |
+
:param other: The second input tensor.
|
| 924 |
+
:type other:
|
| 925 |
+
:param reorder: Compiler hint. If true, the compiler is
|
| 926 |
+
allowed to reorder elements while concatenating inputs. Only use if the
|
| 927 |
+
order does not matter (e.g., result is only used in reduction ops)
|
| 928 |
+
"""
|
| 929 |
+
return semantic.cat(input, other, can_reorder, _builder)
|
| 930 |
+
|
| 931 |
+
|
| 932 |
+
@builtin
|
| 933 |
+
def view(input, shape, _builder=None):
|
| 934 |
+
"""
|
| 935 |
+
Returns a tensor with the same elements as `input` but a different shape.
|
| 936 |
+
The order of the elements may not be preserved.
|
| 937 |
+
|
| 938 |
+
:param input: The input tensor.
|
| 939 |
+
:type input:
|
| 940 |
+
:param shape: The desired shape.
|
| 941 |
+
:type shape: Tuple[int]
|
| 942 |
+
|
| 943 |
+
"""
|
| 944 |
+
shape = _shape_check_impl(shape)
|
| 945 |
+
return semantic.view(input, shape, _builder)
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
@builtin
|
| 949 |
+
def reshape(input, shape, _builder=None):
|
| 950 |
+
"""
|
| 951 |
+
Returns a tensor with the same number of elements as input but with the
|
| 952 |
+
provided shape.
|
| 953 |
+
|
| 954 |
+
:param input: The input tensor.
|
| 955 |
+
:type input:
|
| 956 |
+
:param shape: The new shape.
|
| 957 |
+
:type shape: Tuple[int]
|
| 958 |
+
"""
|
| 959 |
+
shape = _shape_check_impl(shape)
|
| 960 |
+
return semantic.reshape(input, shape, _builder)
|
| 961 |
+
|
| 962 |
+
|
| 963 |
+
def _wrap_axis(axis, ndim):
|
| 964 |
+
if not (-ndim <= axis < ndim):
|
| 965 |
+
raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}")
|
| 966 |
+
|
| 967 |
+
return axis if axis >= 0 else axis + ndim
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
@builtin
|
| 971 |
+
def expand_dims(input, axis, _builder=None):
|
| 972 |
+
"""
|
| 973 |
+
Expand the shape of a tensor, by inserting new length-1 dimensions.
|
| 974 |
+
|
| 975 |
+
Axis indices are with respect to the resulting tensor, so
|
| 976 |
+
``result.shape[axis]`` will be 1 for each axis.
|
| 977 |
+
|
| 978 |
+
:param input: The input tensor.
|
| 979 |
+
:type input: tl.tensor
|
| 980 |
+
:param axis: The indices to add new axes
|
| 981 |
+
:type axis: int | Sequence[int]
|
| 982 |
+
|
| 983 |
+
"""
|
| 984 |
+
axis = _constexpr_to_value(axis)
|
| 985 |
+
axes = list(axis) if isinstance(axis, Sequence) else [axis]
|
| 986 |
+
new_ndim = len(input.shape) + len(axes)
|
| 987 |
+
axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes]
|
| 988 |
+
|
| 989 |
+
if len(set(axes)) != len(axes):
|
| 990 |
+
raise ValueError(f"expand_dims recieved duplicate axes, normalized axes = {axes}")
|
| 991 |
+
|
| 992 |
+
ret = input
|
| 993 |
+
for a in sorted(axes):
|
| 994 |
+
ret = semantic.expand_dims(ret, a, _builder)
|
| 995 |
+
return ret
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
# -----------------------
|
| 999 |
+
# Linear Algebra
|
| 1000 |
+
# -----------------------
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
@builtin
|
| 1004 |
+
def dot(input, other, acc=None, allow_tf32=True, max_num_imprecise_acc=None, out_dtype=float32, _builder=None):
|
| 1005 |
+
"""
|
| 1006 |
+
Returns the matrix product of two blocks.
|
| 1007 |
+
|
| 1008 |
+
The two blocks must be two-dimensional and have compatible inner dimensions.
|
| 1009 |
+
|
| 1010 |
+
:param input: The first tensor to be multiplied.
|
| 1011 |
+
:type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`}
|
| 1012 |
+
:param other: The second tensor to be multiplied.
|
| 1013 |
+
:type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`}
|
| 1014 |
+
"""
|
| 1015 |
+
allow_tf32 = _constexpr_to_value(allow_tf32)
|
| 1016 |
+
out_dtype = _constexpr_to_value(out_dtype)
|
| 1017 |
+
max_num_imprecise_acc = _constexpr_to_value(max_num_imprecise_acc)
|
| 1018 |
+
return semantic.dot(input, other, acc, allow_tf32, max_num_imprecise_acc, out_dtype, _builder)
|
| 1019 |
+
|
| 1020 |
+
|
| 1021 |
+
# -----------------------
|
| 1022 |
+
# Non-Atomic Memory Operations
|
| 1023 |
+
# -----------------------
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
@builtin
|
| 1027 |
+
def load(pointer, mask=None, other=None, boundary_check=tuple(), padding_option="", cache_modifier="",
|
| 1028 |
+
eviction_policy="", volatile=False, _builder=None):
|
| 1029 |
+
"""
|
| 1030 |
+
Return a tensor of data whose values are loaded from memory at location defined by `pointer`:
|
| 1031 |
+
(1) `pointer` could be a single element pointer, then a scalar will be loaded
|
| 1032 |
+
|
| 1033 |
+
- `mask` and `other` must be scalar too
|
| 1034 |
+
- `other` is implicitly typecast to `pointer.dtype.element_ty`
|
| 1035 |
+
- `boundary_check` and `padding_option` must be empty
|
| 1036 |
+
|
| 1037 |
+
(2) `pointer` could be element-wise tensor of pointers, in which case:
|
| 1038 |
+
|
| 1039 |
+
- `mask` and `other` are implicitly broadcast to `pointer.shape`
|
| 1040 |
+
- `other` is implicitly typecast to `pointer.dtype.element_ty`
|
| 1041 |
+
- `boundary_check` and `padding_option` must be empty
|
| 1042 |
+
|
| 1043 |
+
(3) `pointer` could be a block pointer defined by `make_block_ptr`, in which case:
|
| 1044 |
+
|
| 1045 |
+
- `mask` and `other` must be None
|
| 1046 |
+
- `boundary_check` and `padding_option` can be specified to control the behavior of out-of-bound access
|
| 1047 |
+
|
| 1048 |
+
:param pointer: Pointer to the data to be loaded
|
| 1049 |
+
:type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType`
|
| 1050 |
+
:param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]`
|
| 1051 |
+
(must be `None` with block pointers)
|
| 1052 |
+
:type mask: Block of `triton.int1`, optional
|
| 1053 |
+
:param other: if `mask[idx]` is false, return `other[idx]`
|
| 1054 |
+
:type other: Block, optional
|
| 1055 |
+
:param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check
|
| 1056 |
+
:type boundary_check: tuple of ints, optional
|
| 1057 |
+
:param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound
|
| 1058 |
+
:param cache_modifier: changes cache option in NVIDIA PTX
|
| 1059 |
+
:type cache_modifier: str, optional
|
| 1060 |
+
:param eviction_policy: changes eviction policy in NVIDIA PTX
|
| 1061 |
+
:type eviction_policy: str, optional
|
| 1062 |
+
:param volatile: changes volatile option in NVIDIA PTX
|
| 1063 |
+
:type volatile: bool, optional
|
| 1064 |
+
"""
|
| 1065 |
+
# `mask` and `other` can be constexpr
|
| 1066 |
+
if _constexpr_to_value(mask) is not None:
|
| 1067 |
+
mask = _to_tensor(mask, _builder)
|
| 1068 |
+
if _constexpr_to_value(other) is not None:
|
| 1069 |
+
other = _to_tensor(other, _builder)
|
| 1070 |
+
padding_option = _constexpr_to_value(padding_option)
|
| 1071 |
+
cache_modifier = _constexpr_to_value(cache_modifier)
|
| 1072 |
+
eviction_policy = _constexpr_to_value(eviction_policy)
|
| 1073 |
+
volatile = _constexpr_to_value(volatile)
|
| 1074 |
+
return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy,
|
| 1075 |
+
volatile, _builder)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@builtin
|
| 1079 |
+
def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None):
|
| 1080 |
+
"""
|
| 1081 |
+
Store a tensor of data into memory locations defined by `pointer`:
|
| 1082 |
+
(1) `pointer` could be a single element pointer, then a scalar will be stored
|
| 1083 |
+
|
| 1084 |
+
- `mask` must be scalar too
|
| 1085 |
+
- `boundary_check` and `padding_option` must be empty
|
| 1086 |
+
|
| 1087 |
+
(2) `pointer` could be element-wise tensor of pointers, in which case:
|
| 1088 |
+
|
| 1089 |
+
- `mask` is implicitly broadcast to `pointer.shape`
|
| 1090 |
+
- `boundary_check` must be empty
|
| 1091 |
+
|
| 1092 |
+
(3) or `pointer` could be a block pointer defined by `make_block_ptr`, in which case:
|
| 1093 |
+
|
| 1094 |
+
- `mask` must be None
|
| 1095 |
+
- `boundary_check` can be specified to control the behavior of out-of-bound access
|
| 1096 |
+
|
| 1097 |
+
`value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`.
|
| 1098 |
+
|
| 1099 |
+
:param pointer: The memory location where the elements of `value` are stored
|
| 1100 |
+
:type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType`
|
| 1101 |
+
:param value: The tensor of elements to be stored
|
| 1102 |
+
:type value: Block
|
| 1103 |
+
:param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]`
|
| 1104 |
+
:type mask: Block of triton.int1, optional
|
| 1105 |
+
:param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check
|
| 1106 |
+
:type boundary_check: tuple of ints, optional
|
| 1107 |
+
:param cache_modifier: changes cache option in NVIDIA PTX
|
| 1108 |
+
:type cache_modifier: str, optional
|
| 1109 |
+
:param eviction_policy: changes eviction policy in NVIDIA PTX
|
| 1110 |
+
:type eviction_policy: str, optional
|
| 1111 |
+
"""
|
| 1112 |
+
# `value` can be constexpr
|
| 1113 |
+
value = _to_tensor(value, _builder)
|
| 1114 |
+
if _constexpr_to_value(mask) is not None:
|
| 1115 |
+
mask = _to_tensor(mask, _builder)
|
| 1116 |
+
cache_modifier = _constexpr_to_value(cache_modifier)
|
| 1117 |
+
eviction_policy = _constexpr_to_value(eviction_policy)
|
| 1118 |
+
return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder)
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
@builtin
|
| 1122 |
+
def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None):
|
| 1123 |
+
"""
|
| 1124 |
+
Returns a pointer to a block in a parent tensor
|
| 1125 |
+
|
| 1126 |
+
:param base: The base pointer to the parent tensor
|
| 1127 |
+
:param shape: The shape of the parent tensor
|
| 1128 |
+
:param strides: The strides of the parent tensor
|
| 1129 |
+
:param offsets: The offsets to the block
|
| 1130 |
+
:param block_shape: The shape of the block
|
| 1131 |
+
:param order: The order of the original data format
|
| 1132 |
+
"""
|
| 1133 |
+
return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder)
|
| 1134 |
+
|
| 1135 |
+
|
| 1136 |
+
@builtin
|
| 1137 |
+
def advance(base: tensor, offsets, _builder=None):
|
| 1138 |
+
"""
|
| 1139 |
+
Advance a block pointer
|
| 1140 |
+
|
| 1141 |
+
:param base: the block pointer to advance
|
| 1142 |
+
:param offsets: the offsets to advance, a tuple by dimension
|
| 1143 |
+
"""
|
| 1144 |
+
return semantic.advance(base, offsets, _builder)
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
# -----------------------
|
| 1148 |
+
# Atomic Memory Operations
|
| 1149 |
+
# -----------------------
|
| 1150 |
+
|
| 1151 |
+
|
| 1152 |
+
def _add_atomic_docstr(name: str, has_cmp: bool = False) -> Callable[[T], T]:
|
| 1153 |
+
|
| 1154 |
+
def _decorator(func: T) -> T:
|
| 1155 |
+
docstr = f"""
|
| 1156 |
+
Performs an atomic {name} at the memory location specified by :code:`pointer`.
|
| 1157 |
+
|
| 1158 |
+
Return the data stored at :code:`pointer` before the atomic operation.
|
| 1159 |
+
|
| 1160 |
+
:param pointer: The memory locations to operate on
|
| 1161 |
+
:type pointer: Block of dtype=triton.PointerDType"""
|
| 1162 |
+
if has_cmp:
|
| 1163 |
+
docstr += """
|
| 1164 |
+
:param cmp: The values expected to be found in the atomic object
|
| 1165 |
+
:type cmp: Block of dtype=pointer.dtype.element_ty"""
|
| 1166 |
+
docstr += """
|
| 1167 |
+
:param val: The values with which to perform the atomic operation
|
| 1168 |
+
:type val: Block of dtype=pointer.dtype.element_ty
|
| 1169 |
+
:param sem: Memory semantics to use ("ACQUIRE_RELEASE" (default),
|
| 1170 |
+
"ACQUIRE", "RELEASE", or "RELAXED")
|
| 1171 |
+
:type sem: str
|
| 1172 |
+
:param scope: Scope of threads that observe synchronizing effect of the
|
| 1173 |
+
atomic operation ("GPU" (default), "CTA", or "SYSTEM")
|
| 1174 |
+
:type scope: str
|
| 1175 |
+
"""
|
| 1176 |
+
func.__doc__ = docstr
|
| 1177 |
+
return func
|
| 1178 |
+
|
| 1179 |
+
return _decorator
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
@builtin
|
| 1183 |
+
@_add_atomic_docstr("compare-and-swap", has_cmp=True)
|
| 1184 |
+
def atomic_cas(pointer, cmp, val, sem=None, scope=None, _builder=None):
|
| 1185 |
+
cmp = _to_tensor(cmp, _builder)
|
| 1186 |
+
val = _to_tensor(val, _builder)
|
| 1187 |
+
sem = _constexpr_to_value(sem)
|
| 1188 |
+
scope = _constexpr_to_value(scope)
|
| 1189 |
+
return semantic.atomic_cas(pointer, cmp, val, sem, scope, _builder)
|
| 1190 |
+
|
| 1191 |
+
|
| 1192 |
+
@builtin
|
| 1193 |
+
@_add_atomic_docstr("exchange")
|
| 1194 |
+
def atomic_xchg(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1195 |
+
val = _to_tensor(val, _builder)
|
| 1196 |
+
sem = _constexpr_to_value(sem)
|
| 1197 |
+
scope = _constexpr_to_value(scope)
|
| 1198 |
+
return semantic.atomic_xchg(pointer, val, mask, sem, scope, _builder)
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
@builtin
|
| 1202 |
+
@_add_atomic_docstr("add")
|
| 1203 |
+
def atomic_add(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1204 |
+
val = _to_tensor(val, _builder)
|
| 1205 |
+
sem = _constexpr_to_value(sem)
|
| 1206 |
+
scope = _constexpr_to_value(scope)
|
| 1207 |
+
return semantic.atomic_add(pointer, val, mask, sem, scope, _builder)
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
@builtin
|
| 1211 |
+
@_add_atomic_docstr("max")
|
| 1212 |
+
def atomic_max(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1213 |
+
val = _to_tensor(val, _builder)
|
| 1214 |
+
sem = _constexpr_to_value(sem)
|
| 1215 |
+
scope = _constexpr_to_value(scope)
|
| 1216 |
+
return semantic.atomic_max(pointer, val, mask, sem, scope, _builder)
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
@builtin
|
| 1220 |
+
@_add_atomic_docstr("min")
|
| 1221 |
+
def atomic_min(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1222 |
+
val = _to_tensor(val, _builder)
|
| 1223 |
+
sem = _constexpr_to_value(sem)
|
| 1224 |
+
scope = _constexpr_to_value(scope)
|
| 1225 |
+
return semantic.atomic_min(pointer, val, mask, sem, scope, _builder)
|
| 1226 |
+
|
| 1227 |
+
|
| 1228 |
+
@builtin
|
| 1229 |
+
@_add_atomic_docstr("logical and")
|
| 1230 |
+
def atomic_and(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1231 |
+
val = _to_tensor(val, _builder)
|
| 1232 |
+
sem = _constexpr_to_value(sem)
|
| 1233 |
+
scope = _constexpr_to_value(scope)
|
| 1234 |
+
return semantic.atomic_and(pointer, val, mask, sem, scope, _builder)
|
| 1235 |
+
|
| 1236 |
+
|
| 1237 |
+
@builtin
|
| 1238 |
+
@_add_atomic_docstr("logical or")
|
| 1239 |
+
def atomic_or(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1240 |
+
val = _to_tensor(val, _builder)
|
| 1241 |
+
sem = _constexpr_to_value(sem)
|
| 1242 |
+
scope = _constexpr_to_value(scope)
|
| 1243 |
+
return semantic.atomic_or(pointer, val, mask, sem, scope, _builder)
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
@builtin
|
| 1247 |
+
@_add_atomic_docstr("logical xor")
|
| 1248 |
+
def atomic_xor(pointer, val, mask=None, sem=None, scope=None, _builder=None):
|
| 1249 |
+
val = _to_tensor(val, _builder)
|
| 1250 |
+
sem = _constexpr_to_value(sem)
|
| 1251 |
+
scope = _constexpr_to_value(scope)
|
| 1252 |
+
return semantic.atomic_xor(pointer, val, mask, sem, scope, _builder)
|
| 1253 |
+
|
| 1254 |
+
|
| 1255 |
+
# -----------------------
|
| 1256 |
+
# Conditioning
|
| 1257 |
+
# -----------------------
|
| 1258 |
+
|
| 1259 |
+
|
| 1260 |
+
@builtin
|
| 1261 |
+
def where(condition, x, y, _builder=None):
|
| 1262 |
+
"""
|
| 1263 |
+
Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`.
|
| 1264 |
+
|
| 1265 |
+
Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`.
|
| 1266 |
+
|
| 1267 |
+
If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
|
| 1268 |
+
|
| 1269 |
+
The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`.
|
| 1270 |
+
:code:`x` and :code:`y` must have the same data type.
|
| 1271 |
+
|
| 1272 |
+
:param condition: When True (nonzero), yield x, otherwise yield y.
|
| 1273 |
+
:type condition: Block of triton.bool
|
| 1274 |
+
:param x: values selected at indices where condition is True.
|
| 1275 |
+
:param y: values selected at indices where condition is False.
|
| 1276 |
+
"""
|
| 1277 |
+
condition = _to_tensor(condition, _builder)
|
| 1278 |
+
x = _to_tensor(x, _builder)
|
| 1279 |
+
y = _to_tensor(y, _builder)
|
| 1280 |
+
return semantic.where(condition, x, y, _builder)
|
| 1281 |
+
|
| 1282 |
+
|
| 1283 |
+
# -----------------------
|
| 1284 |
+
# Math
|
| 1285 |
+
# -----------------------
|
| 1286 |
+
|
| 1287 |
+
|
| 1288 |
+
@builtin
|
| 1289 |
+
def umulhi(x, y, _builder=None):
|
| 1290 |
+
"""
|
| 1291 |
+
Returns the most significant 32 bits of the product of x and y.
|
| 1292 |
+
|
| 1293 |
+
:param x: the input tensor
|
| 1294 |
+
:type x: int32
|
| 1295 |
+
:param y: the input tensor
|
| 1296 |
+
:type y: int32
|
| 1297 |
+
"""
|
| 1298 |
+
x = _to_tensor(x, _builder)
|
| 1299 |
+
y = _to_tensor(y, _builder)
|
| 1300 |
+
return semantic.umulhi(x, y, _builder)
|
| 1301 |
+
|
| 1302 |
+
|
| 1303 |
+
@builtin
|
| 1304 |
+
def fdiv(x, y, ieee_rounding=False, _builder=None):
|
| 1305 |
+
"""
|
| 1306 |
+
Returns a floating-point resultant tensor of dividing x by y.
|
| 1307 |
+
|
| 1308 |
+
:param x: the input numerator value.
|
| 1309 |
+
:param y: the input denominator value.
|
| 1310 |
+
:param ieee_rounding: To follow IEEE-754 floating point number
|
| 1311 |
+
rounding mechanism
|
| 1312 |
+
:type ieee_rounding: bool
|
| 1313 |
+
"""
|
| 1314 |
+
ieee_rounding = _constexpr_to_value(ieee_rounding)
|
| 1315 |
+
x = _to_tensor(x, _builder)
|
| 1316 |
+
y = _to_tensor(y, _builder)
|
| 1317 |
+
return semantic.fdiv(x, y, ieee_rounding, _builder)
|
| 1318 |
+
|
| 1319 |
+
|
| 1320 |
+
def _add_math_1arg_docstr(name: str) -> Callable[[T], T]:
|
| 1321 |
+
|
| 1322 |
+
def _decorator(func: T) -> T:
|
| 1323 |
+
docstr = """
|
| 1324 |
+
Computes the element-wise {name} of :code:`x`.
|
| 1325 |
+
|
| 1326 |
+
:param x: the input values
|
| 1327 |
+
:type x: Block
|
| 1328 |
+
"""
|
| 1329 |
+
func.__doc__ = docstr.format(name=name)
|
| 1330 |
+
return func
|
| 1331 |
+
|
| 1332 |
+
return _decorator
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
@builtin
|
| 1336 |
+
@_add_math_1arg_docstr("exponential")
|
| 1337 |
+
def exp(x, _builder=None):
|
| 1338 |
+
x = _to_tensor(x, _builder)
|
| 1339 |
+
return semantic.exp(x, _builder)
|
| 1340 |
+
|
| 1341 |
+
|
| 1342 |
+
@builtin
|
| 1343 |
+
@_add_math_1arg_docstr("natural logarithm")
|
| 1344 |
+
def log(x, _builder=None):
|
| 1345 |
+
x = _to_tensor(x, _builder)
|
| 1346 |
+
return semantic.log(x, _builder)
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
@builtin
|
| 1350 |
+
@_add_math_1arg_docstr("cosine")
|
| 1351 |
+
def cos(x, _builder=None):
|
| 1352 |
+
x = _to_tensor(x, _builder)
|
| 1353 |
+
return semantic.cos(x, _builder)
|
| 1354 |
+
|
| 1355 |
+
|
| 1356 |
+
@builtin
|
| 1357 |
+
@_add_math_1arg_docstr("sine")
|
| 1358 |
+
def sin(x, _builder=None):
|
| 1359 |
+
x = _to_tensor(x, _builder)
|
| 1360 |
+
return semantic.sin(x, _builder)
|
| 1361 |
+
|
| 1362 |
+
|
| 1363 |
+
@builtin
|
| 1364 |
+
@_add_math_1arg_docstr("square root")
|
| 1365 |
+
def sqrt(x, _builder=None):
|
| 1366 |
+
x = _to_tensor(x, _builder)
|
| 1367 |
+
return semantic.sqrt(x, _builder)
|
| 1368 |
+
|
| 1369 |
+
|
| 1370 |
+
@builtin
|
| 1371 |
+
@_add_math_1arg_docstr("absolute value")
|
| 1372 |
+
def abs(x, _builder=None):
|
| 1373 |
+
x = _to_tensor(x, _builder)
|
| 1374 |
+
return semantic.abs(x, _builder)
|
| 1375 |
+
|
| 1376 |
+
|
| 1377 |
+
# -----------------------
|
| 1378 |
+
# Reductions
|
| 1379 |
+
# -----------------------
|
| 1380 |
+
|
| 1381 |
+
|
| 1382 |
+
def _add_reduction_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]:
|
| 1383 |
+
|
| 1384 |
+
def _decorator(func: T) -> T:
|
| 1385 |
+
docstr = """
|
| 1386 |
+
Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis`
|
| 1387 |
+
|
| 1388 |
+
:param input: the input values
|
| 1389 |
+
:param axis: the dimension along which the reduction should be done"""
|
| 1390 |
+
if return_indices_arg is not None:
|
| 1391 |
+
docstr += f"""
|
| 1392 |
+
:param {return_indices_arg}: if true, return index corresponding to the {name} value"""
|
| 1393 |
+
if tie_break_arg is not None:
|
| 1394 |
+
docstr += f"""
|
| 1395 |
+
:param {tie_break_arg}: if true, return the left-most indices in case of ties for values that aren't NaN"""
|
| 1396 |
+
|
| 1397 |
+
func.__doc__ = docstr.format(name=name)
|
| 1398 |
+
return func
|
| 1399 |
+
|
| 1400 |
+
return _decorator
|
| 1401 |
+
|
| 1402 |
+
|
| 1403 |
+
@contextmanager
|
| 1404 |
+
def _insertion_guard(builder):
|
| 1405 |
+
ip = builder.get_insertion_point()
|
| 1406 |
+
yield
|
| 1407 |
+
builder.restore_insertion_point(ip)
|
| 1408 |
+
|
| 1409 |
+
|
| 1410 |
+
@builtin
|
| 1411 |
+
def reduce(input, axis, combine_fn, _builder=None, _generator=None):
|
| 1412 |
+
"""Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis`
|
| 1413 |
+
|
| 1414 |
+
:param input: the input tensor, or tuple of tensors
|
| 1415 |
+
:param axis: the dimension along which the reduction should be done
|
| 1416 |
+
:param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit)
|
| 1417 |
+
|
| 1418 |
+
"""
|
| 1419 |
+
if isinstance(input, tensor):
|
| 1420 |
+
return reduce((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0]
|
| 1421 |
+
|
| 1422 |
+
def make_combine_region(reduce_op):
|
| 1423 |
+
in_scalar_tys = [t.type.scalar for t in input]
|
| 1424 |
+
prototype = function_type(in_scalar_tys, in_scalar_tys * 2)
|
| 1425 |
+
|
| 1426 |
+
region = reduce_op.get_region(0)
|
| 1427 |
+
with _insertion_guard(_builder):
|
| 1428 |
+
param_types = [ty.to_ir(_builder) for ty in prototype.param_types]
|
| 1429 |
+
block = _builder.create_block_with_parent(region, param_types)
|
| 1430 |
+
args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)]
|
| 1431 |
+
results = _generator.call_JitFunction(combine_fn, args, kwargs={})
|
| 1432 |
+
if isinstance(results, tensor):
|
| 1433 |
+
handles = [results.handle]
|
| 1434 |
+
else:
|
| 1435 |
+
handles = [r.handle for r in results]
|
| 1436 |
+
_builder.create_reduce_ret(*handles)
|
| 1437 |
+
|
| 1438 |
+
if axis is not None:
|
| 1439 |
+
axis = _constexpr_to_value(axis)
|
| 1440 |
+
return semantic.reduction(input, axis, make_combine_region, _builder)
|
| 1441 |
+
|
| 1442 |
+
|
| 1443 |
+
@builtin
|
| 1444 |
+
def _promote_reduction_input(t, _builder=None):
|
| 1445 |
+
scalar_ty = t.type.scalar
|
| 1446 |
+
|
| 1447 |
+
# hardware doesn't support FMAX, FMIN, CMP for bfloat16
|
| 1448 |
+
if scalar_ty is bfloat16:
|
| 1449 |
+
return t.to(float32, _builder=_builder)
|
| 1450 |
+
|
| 1451 |
+
return t
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
@builtin
|
| 1455 |
+
def _reduce_with_indices(input, axis, combine_fn, _builder=None, _generator=None):
|
| 1456 |
+
axis = _constexpr_to_value(axis)
|
| 1457 |
+
n = input.shape[axis]
|
| 1458 |
+
index = arange(0, n, _builder=_builder)
|
| 1459 |
+
|
| 1460 |
+
if len(input.shape) > 1:
|
| 1461 |
+
# Broadcast index across the non-reduced axes
|
| 1462 |
+
axes_to_expand = [constexpr(d) for d in range(len(input.shape))]
|
| 1463 |
+
del axes_to_expand[axis]
|
| 1464 |
+
index = expand_dims(index, axes_to_expand, _builder=_builder)
|
| 1465 |
+
index = broadcast_to(index, input.shape, _builder=_builder)
|
| 1466 |
+
|
| 1467 |
+
rvalue, rindices = reduce((input, index), axis, combine_fn, _builder=_builder, _generator=_generator)
|
| 1468 |
+
return rvalue, rindices
|
| 1469 |
+
|
| 1470 |
+
|
| 1471 |
+
# -----------------------
|
| 1472 |
+
# Scans
|
| 1473 |
+
# -----------------------
|
| 1474 |
+
|
| 1475 |
+
|
| 1476 |
+
def _add_scan_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]:
|
| 1477 |
+
|
| 1478 |
+
def _decorator(func: T) -> T:
|
| 1479 |
+
docstr = """
|
| 1480 |
+
Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis`
|
| 1481 |
+
|
| 1482 |
+
:param input: the input values
|
| 1483 |
+
:param axis: the dimension along which the scan should be done"""
|
| 1484 |
+
func.__doc__ = docstr.format(name=name)
|
| 1485 |
+
return func
|
| 1486 |
+
|
| 1487 |
+
return _decorator
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
@builtin
|
| 1491 |
+
def associative_scan(input, axis, combine_fn, _builder=None, _generator=None):
|
| 1492 |
+
"""Applies the combine_fn to each elements with a carry in :code:`input` tensors along the provided :code:`axis` and update the carry
|
| 1493 |
+
|
| 1494 |
+
:param input: the input tensor, or tuple of tensors
|
| 1495 |
+
:param axis: the dimension along which the reduction should be done
|
| 1496 |
+
:param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit)
|
| 1497 |
+
|
| 1498 |
+
"""
|
| 1499 |
+
if isinstance(input, tensor):
|
| 1500 |
+
return associative_scan((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0]
|
| 1501 |
+
|
| 1502 |
+
def make_combine_region(scan_op):
|
| 1503 |
+
in_scalar_tys = [t.type.scalar for t in input]
|
| 1504 |
+
prototype = function_type(in_scalar_tys, in_scalar_tys * 2)
|
| 1505 |
+
|
| 1506 |
+
region = scan_op.get_region(0)
|
| 1507 |
+
with _insertion_guard(_builder):
|
| 1508 |
+
param_types = [ty.to_ir(_builder) for ty in prototype.param_types]
|
| 1509 |
+
block = _builder.create_block_with_parent(region, param_types)
|
| 1510 |
+
args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)]
|
| 1511 |
+
results = _generator.call_JitFunction(combine_fn, args, kwargs={})
|
| 1512 |
+
if isinstance(results, tensor):
|
| 1513 |
+
handles = [results.handle]
|
| 1514 |
+
else:
|
| 1515 |
+
handles = [r.handle for r in results]
|
| 1516 |
+
_builder.create_scan_ret(*handles)
|
| 1517 |
+
|
| 1518 |
+
axis = _constexpr_to_value(axis)
|
| 1519 |
+
return semantic.associative_scan(input, axis, make_combine_region, _builder)
|
| 1520 |
+
|
| 1521 |
+
|
| 1522 |
+
# -----------------------
|
| 1523 |
+
# Compiler Hint Ops
|
| 1524 |
+
# -----------------------
|
| 1525 |
+
|
| 1526 |
+
|
| 1527 |
+
@builtin
|
| 1528 |
+
def debug_barrier(_builder=None):
|
| 1529 |
+
'''
|
| 1530 |
+
Insert a barrier to synchronize all threads in a block.
|
| 1531 |
+
'''
|
| 1532 |
+
return semantic.debug_barrier(_builder)
|
| 1533 |
+
|
| 1534 |
+
|
| 1535 |
+
@builtin
|
| 1536 |
+
def multiple_of(input, values, _builder=None):
|
| 1537 |
+
"""
|
| 1538 |
+
Let the compiler know that the values in :code:`input` are all multiples of :code:`value`.
|
| 1539 |
+
"""
|
| 1540 |
+
if isinstance(values, constexpr):
|
| 1541 |
+
values = [values]
|
| 1542 |
+
for i, d in enumerate(values):
|
| 1543 |
+
if not isinstance(d, constexpr):
|
| 1544 |
+
raise TypeError(f"values element {i} must have type `constexpr`")
|
| 1545 |
+
if not isinstance(d.value, int):
|
| 1546 |
+
raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
|
| 1547 |
+
values = [x.value for x in values]
|
| 1548 |
+
return semantic.multiple_of(input, values)
|
| 1549 |
+
|
| 1550 |
+
|
| 1551 |
+
@builtin
|
| 1552 |
+
def max_contiguous(input, values, _builder=None):
|
| 1553 |
+
"""
|
| 1554 |
+
Let the compiler know that the `value` first values in :code:`input` are contiguous.
|
| 1555 |
+
"""
|
| 1556 |
+
if isinstance(values, constexpr):
|
| 1557 |
+
values = [values]
|
| 1558 |
+
for i, d in enumerate(values):
|
| 1559 |
+
if not isinstance(d, constexpr):
|
| 1560 |
+
raise TypeError(f"values element {i} must have type `constexpr`")
|
| 1561 |
+
if not isinstance(d.value, int):
|
| 1562 |
+
raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
|
| 1563 |
+
values = [x.value for x in values]
|
| 1564 |
+
return semantic.max_contiguous(input, values)
|
| 1565 |
+
|
| 1566 |
+
|
| 1567 |
+
@builtin
|
| 1568 |
+
def max_constancy(input, values, _builder=None):
|
| 1569 |
+
"""
|
| 1570 |
+
Let the compiler know that the `value` first values in :code:`input` are constant.
|
| 1571 |
+
|
| 1572 |
+
e.g. if :code:`values` is [4], then each group of 4 values in :code:`input` should all be equal,
|
| 1573 |
+
for example [0, 0, 0, 0, 1, 1, 1, 1].
|
| 1574 |
+
"""
|
| 1575 |
+
if isinstance(values, constexpr):
|
| 1576 |
+
values = [values]
|
| 1577 |
+
for i, d in enumerate(values):
|
| 1578 |
+
if not isinstance(d, constexpr):
|
| 1579 |
+
raise TypeError(f"values element {i} must have type `constexpr`")
|
| 1580 |
+
if not isinstance(d.value, int):
|
| 1581 |
+
raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
|
| 1582 |
+
values = [x.value for x in values]
|
| 1583 |
+
return semantic.max_constancy(input, values)
|
| 1584 |
+
|
| 1585 |
+
|
| 1586 |
+
# -----------------------
|
| 1587 |
+
# Debugging functions
|
| 1588 |
+
# -----------------------
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
@builtin
|
| 1592 |
+
def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None):
|
| 1593 |
+
'''
|
| 1594 |
+
Print the values at compile time. The parameters are the same as the builtin :code:`print`.
|
| 1595 |
+
|
| 1596 |
+
NOTE: Calling the Python builtin :code:`print` is not the same as calling this, it instead maps to :code:`device_print`,
|
| 1597 |
+
which has special requirements for the arguments.
|
| 1598 |
+
|
| 1599 |
+
.. highlight:: python
|
| 1600 |
+
.. code-block:: python
|
| 1601 |
+
|
| 1602 |
+
tl.static_print(f"{BLOCK_SIZE=}")
|
| 1603 |
+
'''
|
| 1604 |
+
pass
|
| 1605 |
+
|
| 1606 |
+
|
| 1607 |
+
@builtin
|
| 1608 |
+
def static_assert(cond, msg="", _builder=None):
|
| 1609 |
+
'''
|
| 1610 |
+
Assert the condition at compile time. Does not require that the :code:`TRITON_DEBUG` environment variable
|
| 1611 |
+
is set.
|
| 1612 |
+
|
| 1613 |
+
.. highlight:: python
|
| 1614 |
+
.. code-block:: python
|
| 1615 |
+
|
| 1616 |
+
tl.static_assert(BLOCK_SIZE == 1024)
|
| 1617 |
+
'''
|
| 1618 |
+
pass
|
| 1619 |
+
|
| 1620 |
+
|
| 1621 |
+
@builtin
|
| 1622 |
+
def device_print(prefix, *args, _builder=None):
|
| 1623 |
+
'''
|
| 1624 |
+
Print the values at runtime from the device. String formatting does not work for runtime values, so you should
|
| 1625 |
+
provide the values you want to print as arguments. The first value must be a string, all following values must
|
| 1626 |
+
be scalars or tensors.
|
| 1627 |
+
|
| 1628 |
+
Calling the Python builtin :code:`print` is the same as calling this function, and the requirements for the arguments will match
|
| 1629 |
+
this function (not the normal requirements for :code:`print`).
|
| 1630 |
+
|
| 1631 |
+
.. highlight:: python
|
| 1632 |
+
.. code-block:: python
|
| 1633 |
+
|
| 1634 |
+
tl.device_print("pid", pid)
|
| 1635 |
+
print("pid", pid)
|
| 1636 |
+
|
| 1637 |
+
:param prefix: a prefix to print before the values. This is required to be a string literal.
|
| 1638 |
+
:param args: the values to print. They can be any tensor or scalar.
|
| 1639 |
+
'''
|
| 1640 |
+
import string
|
| 1641 |
+
prefix = _constexpr_to_value(prefix)
|
| 1642 |
+
assert isinstance(prefix, str), f"{prefix} is not string"
|
| 1643 |
+
b_ascii = True
|
| 1644 |
+
for ch in prefix:
|
| 1645 |
+
if ch not in string.printable:
|
| 1646 |
+
b_ascii = False
|
| 1647 |
+
break
|
| 1648 |
+
assert b_ascii, f"{prefix} is not an ascii string"
|
| 1649 |
+
new_args = []
|
| 1650 |
+
for arg in args:
|
| 1651 |
+
new_args.append(_to_tensor(arg, _builder))
|
| 1652 |
+
return semantic.device_print(prefix, new_args, _builder)
|
| 1653 |
+
|
| 1654 |
+
|
| 1655 |
+
@builtin
|
| 1656 |
+
def device_assert(cond, msg="", _builder=None):
|
| 1657 |
+
'''
|
| 1658 |
+
Assert the condition at runtime from the device. Requires that the environment variable :code:`TRITON_DEBUG`
|
| 1659 |
+
is set to a value besides :code:`0` in order for this to have any effect.
|
| 1660 |
+
|
| 1661 |
+
Using the Python :code:`assert` statement is the same as calling this function, except that the second argument
|
| 1662 |
+
must be provided and must be a string, e.g. :code:`assert pid == 0, "pid != 0"`. The environment variable must
|
| 1663 |
+
be set for this :code:`assert` statement to have any effect.
|
| 1664 |
+
|
| 1665 |
+
.. highlight:: python
|
| 1666 |
+
.. code-block:: python
|
| 1667 |
+
|
| 1668 |
+
tl.device_assert(pid == 0)
|
| 1669 |
+
assert pid == 0, f"pid != 0"
|
| 1670 |
+
|
| 1671 |
+
:param cond: the condition to assert. This is required to be a boolean tensor.
|
| 1672 |
+
:param msg: the message to print if the assertion fails. This is required to be a string literal.
|
| 1673 |
+
'''
|
| 1674 |
+
msg = _constexpr_to_value(msg)
|
| 1675 |
+
import inspect
|
| 1676 |
+
frame = inspect.currentframe()
|
| 1677 |
+
module = inspect.getmodule(frame)
|
| 1678 |
+
# The triton function module doesn't have the name attribute.
|
| 1679 |
+
# We use this trick to find the caller.
|
| 1680 |
+
while hasattr(module, "__name__"):
|
| 1681 |
+
frame = frame.f_back
|
| 1682 |
+
module = inspect.getmodule(frame)
|
| 1683 |
+
lineno = 0
|
| 1684 |
+
func_name = 'unknown'
|
| 1685 |
+
file_name = 'unknown'
|
| 1686 |
+
if frame is not None and frame.f_back is not None:
|
| 1687 |
+
func_name = frame.f_code.co_name
|
| 1688 |
+
file_name = frame.f_back.f_code.co_filename
|
| 1689 |
+
# TODO: The line number currently indicates the line
|
| 1690 |
+
# where the triton function is called but not where the
|
| 1691 |
+
# device_assert is called. Need to enhance this.
|
| 1692 |
+
lineno = frame.f_back.f_lineno
|
| 1693 |
+
return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder)
|
| 1694 |
+
|
| 1695 |
+
|
| 1696 |
+
@builtin
|
| 1697 |
+
def inline_asm_elementwise(asm: str, constraints: str, args: list, dtype, is_pure: bool, pack: int, _builder=None):
|
| 1698 |
+
'''
|
| 1699 |
+
Execute the inline assembly to a packed of elements of the tensor
|
| 1700 |
+
:param asm: assembly to be inlined, it has to match the target assembly format
|
| 1701 |
+
:param constraints: string representing the mapping of operands to register
|
| 1702 |
+
:param args: the arguments of the operation
|
| 1703 |
+
:param dtype: the element type of the returned variable
|
| 1704 |
+
:param is_pure: whether the operation is pure
|
| 1705 |
+
:param pack: the number of elements to be processed by one instance of inline assembly
|
| 1706 |
+
:param _builder: the builder
|
| 1707 |
+
:return: the return value of the function
|
| 1708 |
+
'''
|
| 1709 |
+
asm = _constexpr_to_value(asm)
|
| 1710 |
+
constraints = _constexpr_to_value(constraints)
|
| 1711 |
+
pack = _constexpr_to_value(pack)
|
| 1712 |
+
is_pure = _constexpr_to_value(is_pure)
|
| 1713 |
+
res_ty = dtype
|
| 1714 |
+
dispatch_args = [_to_tensor(arg, _builder) for arg in args]
|
| 1715 |
+
if dispatch_args:
|
| 1716 |
+
bin_op_type_checking = partial(
|
| 1717 |
+
semantic.binary_op_type_checking_impl,
|
| 1718 |
+
builder=_builder,
|
| 1719 |
+
arithmetic_check=False,
|
| 1720 |
+
allow_lhs_ptr=True,
|
| 1721 |
+
allow_rhs_ptr=True,
|
| 1722 |
+
)
|
| 1723 |
+
broadcast_arg = dispatch_args[0]
|
| 1724 |
+
# Get the broadcast shape over all the arguments
|
| 1725 |
+
for item in dispatch_args:
|
| 1726 |
+
_, broadcast_arg = bin_op_type_checking(item, broadcast_arg)
|
| 1727 |
+
if broadcast_arg.shape:
|
| 1728 |
+
# Change the shape of each argument based on the broadcast shape
|
| 1729 |
+
for i, item in enumerate(dispatch_args):
|
| 1730 |
+
dispatch_args[i], _ = bin_op_type_checking(item, broadcast_arg)
|
| 1731 |
+
res_ty = block_type(dtype, broadcast_arg.shape)
|
| 1732 |
+
handles = [t.handle for t in dispatch_args]
|
| 1733 |
+
call = _builder.create_inline_asm(asm, constraints, handles, res_ty.to_ir(_builder), is_pure, pack)
|
| 1734 |
+
return tensor(call, res_ty)
|
| 1735 |
+
|
| 1736 |
+
|
| 1737 |
+
# -----------------------
|
| 1738 |
+
# Iterators
|
| 1739 |
+
# -----------------------
|
| 1740 |
+
|
| 1741 |
+
|
| 1742 |
+
class static_range:
|
| 1743 |
+
"""
|
| 1744 |
+
Iterator that counts upward forever.
|
| 1745 |
+
|
| 1746 |
+
.. highlight:: python
|
| 1747 |
+
.. code-block:: python
|
| 1748 |
+
|
| 1749 |
+
@triton.jit
|
| 1750 |
+
def kernel(...):
|
| 1751 |
+
for i in tl.static_range(10):
|
| 1752 |
+
...
|
| 1753 |
+
:note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of
|
| 1754 |
+
:code:`triton.jit` functions. In addition, it also guides the compiler to unroll the loop aggressively.
|
| 1755 |
+
:param arg1: the start value.
|
| 1756 |
+
:param arg2: the end value.
|
| 1757 |
+
:param step: the step value.
|
| 1758 |
+
"""
|
| 1759 |
+
|
| 1760 |
+
def __init__(self, arg1, arg2=None, step=None):
|
| 1761 |
+
assert isinstance(arg1, constexpr)
|
| 1762 |
+
if step is None:
|
| 1763 |
+
self.step = constexpr(1)
|
| 1764 |
+
else:
|
| 1765 |
+
assert isinstance(step, constexpr)
|
| 1766 |
+
self.step = step
|
| 1767 |
+
if arg2 is None:
|
| 1768 |
+
self.start = constexpr(0)
|
| 1769 |
+
self.end = arg1
|
| 1770 |
+
else:
|
| 1771 |
+
assert isinstance(arg2, constexpr)
|
| 1772 |
+
self.start = arg1
|
| 1773 |
+
self.end = arg2
|
| 1774 |
+
|
| 1775 |
+
def __iter__(self):
|
| 1776 |
+
raise RuntimeError("static_range can only be used in @triton.jit'd functions")
|
| 1777 |
+
|
| 1778 |
+
def __next__(self):
|
| 1779 |
+
raise RuntimeError("static_range can only be used in @triton.jit'd functions")
|
| 1780 |
+
|
| 1781 |
+
|
| 1782 |
+
# -----------------------
|
| 1783 |
+
# Extern functions
|
| 1784 |
+
# -----------------------
|
| 1785 |
+
|
| 1786 |
+
|
| 1787 |
+
def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple,
|
| 1788 |
+
is_pure: bool, _builder=None):
|
| 1789 |
+
'''
|
| 1790 |
+
Dispatch a function to a library
|
| 1791 |
+
:param func: the function to dispatch
|
| 1792 |
+
:param lib_name: the name of the library
|
| 1793 |
+
:param lib_path: the path of the library
|
| 1794 |
+
:param args: the arguments of the function
|
| 1795 |
+
:param arg_type_symbol_dict: the type of the arguments
|
| 1796 |
+
:param ret_shape: the shape of the return value
|
| 1797 |
+
:param _builder: the builder
|
| 1798 |
+
:return: the return value of the function
|
| 1799 |
+
'''
|
| 1800 |
+
if len(arg_type_symbol_dict) == 0:
|
| 1801 |
+
raise ValueError("arg_type_symbol_dict is empty")
|
| 1802 |
+
|
| 1803 |
+
num_args = len(list(arg_type_symbol_dict.keys())[0])
|
| 1804 |
+
if len(args) != num_args:
|
| 1805 |
+
raise ValueError(f"length of input args does not match."
|
| 1806 |
+
f"Expect {len(args)}, got {num_args}")
|
| 1807 |
+
|
| 1808 |
+
arg_types = []
|
| 1809 |
+
arg_list = []
|
| 1810 |
+
for arg in args:
|
| 1811 |
+
if isinstance(arg, tensor):
|
| 1812 |
+
arg_types.append(arg.dtype)
|
| 1813 |
+
arg_list.append(arg.handle)
|
| 1814 |
+
else:
|
| 1815 |
+
arg_types.append(type(arg))
|
| 1816 |
+
arg_list.append(arg)
|
| 1817 |
+
arg_types = tuple(arg_types)
|
| 1818 |
+
|
| 1819 |
+
if arg_types not in arg_type_symbol_dict:
|
| 1820 |
+
raise ValueError(f"input arg type does not match."
|
| 1821 |
+
f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}")
|
| 1822 |
+
else:
|
| 1823 |
+
symbol = arg_type_symbol_dict[arg_types][0]
|
| 1824 |
+
ret_type = arg_type_symbol_dict[arg_types][1]
|
| 1825 |
+
if ret_shape:
|
| 1826 |
+
ret_type = block_type(ret_type, ret_shape)
|
| 1827 |
+
return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type)
|
| 1828 |
+
|
| 1829 |
+
|
| 1830 |
+
def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool,
|
| 1831 |
+
_builder=None):
|
| 1832 |
+
'''
|
| 1833 |
+
Dispatch an elementwise function to a library
|
| 1834 |
+
:param lib_name: the name of the library
|
| 1835 |
+
:param lib_path: the path of the library
|
| 1836 |
+
:param args: the arguments of the function
|
| 1837 |
+
:param arg_type_symbol_dict: the type of the arguments
|
| 1838 |
+
:param is_pure: whether the function is pure
|
| 1839 |
+
:param _builder: the builder
|
| 1840 |
+
:return: the return value of the function
|
| 1841 |
+
'''
|
| 1842 |
+
dispatch_args = args.copy()
|
| 1843 |
+
all_scalar = True
|
| 1844 |
+
ret_shape = None
|
| 1845 |
+
arg_types = []
|
| 1846 |
+
for i in range(len(dispatch_args)):
|
| 1847 |
+
dispatch_args[i] = _to_tensor(dispatch_args[i], _builder)
|
| 1848 |
+
arg_types.append(dispatch_args[i].dtype)
|
| 1849 |
+
if dispatch_args[i].type.is_block():
|
| 1850 |
+
all_scalar = False
|
| 1851 |
+
if len(arg_types) > 0:
|
| 1852 |
+
arg_types = tuple(arg_types)
|
| 1853 |
+
arithmetic_check = True
|
| 1854 |
+
# If there's a type tuple that is not supported by the library, we will do arithmetic check
|
| 1855 |
+
if arg_types in arg_type_symbol_dict:
|
| 1856 |
+
arithmetic_check = False
|
| 1857 |
+
broadcast_arg = dispatch_args[0]
|
| 1858 |
+
# Get the broadcast shape over all the arguments
|
| 1859 |
+
for i, item in enumerate(dispatch_args):
|
| 1860 |
+
_, broadcast_arg = semantic.binary_op_type_checking_impl(item, broadcast_arg, _builder,
|
| 1861 |
+
arithmetic_check=arithmetic_check)
|
| 1862 |
+
# Change the shape of each argument based on the broadcast shape
|
| 1863 |
+
for i in range(len(dispatch_args)):
|
| 1864 |
+
dispatch_args[i], _ = semantic.binary_op_type_checking_impl(dispatch_args[i], broadcast_arg, _builder,
|
| 1865 |
+
arithmetic_check=arithmetic_check)
|
| 1866 |
+
if not all_scalar:
|
| 1867 |
+
ret_shape = broadcast_arg.shape
|
| 1868 |
+
func = getattr(_builder, "create_extern_elementwise")
|
| 1869 |
+
return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder)
|
| 1870 |
+
|
| 1871 |
+
|
| 1872 |
+
def binary_op_type_legalization(lhs, rhs, builder):
|
| 1873 |
+
'''
|
| 1874 |
+
Convert both operands to a single common type
|
| 1875 |
+
:param lhs: the left operand
|
| 1876 |
+
:param rhs: the right operand
|
| 1877 |
+
:param builder: the builder
|
| 1878 |
+
'''
|
| 1879 |
+
return semantic.binary_op_type_checking_impl(lhs, rhs, builder)
|
| 1880 |
+
|
| 1881 |
+
|
| 1882 |
+
def extern(fn):
|
| 1883 |
+
"""A decorator for external functions."""
|
| 1884 |
+
return builtin(fn)
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/math.py
ADDED
|
@@ -0,0 +1,1676 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from ..common.build import is_hip
|
| 5 |
+
from . import core
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@functools.lru_cache()
|
| 9 |
+
def libdevice_path():
|
| 10 |
+
third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party")
|
| 11 |
+
if is_hip():
|
| 12 |
+
default = os.path.join(third_party_dir, "hip", "lib", "bitcode", "cuda2gcn.bc")
|
| 13 |
+
else:
|
| 14 |
+
default = os.path.join(third_party_dir, "cuda", "lib", "libdevice.10.bc")
|
| 15 |
+
|
| 16 |
+
return os.getenv("TRITON_LIBDEVICE_PATH", default)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@core.extern
|
| 20 |
+
def clz(arg0, _builder=None):
|
| 21 |
+
return core.extern_elementwise(
|
| 22 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 23 |
+
(core.dtype("int32"), ): ("__nv_clz", core.dtype("int32")),
|
| 24 |
+
(core.dtype("int64"), ): ("__nv_clzll", core.dtype("int32")),
|
| 25 |
+
}, is_pure=True, _builder=_builder)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@core.extern
|
| 29 |
+
def popc(arg0, _builder=None):
|
| 30 |
+
return core.extern_elementwise(
|
| 31 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 32 |
+
(core.dtype("int32"), ): ("__nv_popc", core.dtype("int32")),
|
| 33 |
+
(core.dtype("int64"), ): ("__nv_popcll", core.dtype("int32")),
|
| 34 |
+
}, is_pure=True, _builder=_builder)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@core.extern
|
| 38 |
+
def byte_perm(arg0, arg1, arg2, _builder=None):
|
| 39 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 40 |
+
(core.dtype("int32"), core.dtype("int32"), core.dtype("int32")): ("__nv_byte_perm", core.dtype("int32")),
|
| 41 |
+
}, is_pure=True, _builder=_builder)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@core.extern
|
| 45 |
+
def min(arg0, arg1, _builder=None):
|
| 46 |
+
arg0 = core._to_tensor(arg0, _builder)
|
| 47 |
+
arg1 = core._to_tensor(arg1, _builder)
|
| 48 |
+
arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder)
|
| 49 |
+
dtype = arg0.dtype
|
| 50 |
+
if dtype.is_floating():
|
| 51 |
+
return core.tensor(_builder.create_minf(arg0.handle, arg1.handle), arg0.type)
|
| 52 |
+
elif dtype.is_int_signed():
|
| 53 |
+
return core.tensor(_builder.create_minsi(arg0.handle, arg1.handle), arg0.type)
|
| 54 |
+
elif dtype.is_int_unsigned():
|
| 55 |
+
return core.tensor(_builder.create_minui(arg0.handle, arg1.handle), arg0.dtype)
|
| 56 |
+
else:
|
| 57 |
+
assert False, f"Unexpected dtype {dtype}"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@core.extern
|
| 61 |
+
def max(arg0, arg1, _builder=None):
|
| 62 |
+
arg0 = core._to_tensor(arg0, _builder)
|
| 63 |
+
arg1 = core._to_tensor(arg1, _builder)
|
| 64 |
+
arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder)
|
| 65 |
+
dtype = arg0.dtype
|
| 66 |
+
if dtype.is_floating():
|
| 67 |
+
return core.tensor(_builder.create_maxf(arg0.handle, arg1.handle), arg0.type)
|
| 68 |
+
elif dtype.is_int_signed():
|
| 69 |
+
return core.tensor(_builder.create_maxsi(arg0.handle, arg1.handle), arg0.type)
|
| 70 |
+
elif dtype.is_int_unsigned():
|
| 71 |
+
return core.tensor(_builder.create_maxui(arg0.handle, arg1.handle), arg0.dtype)
|
| 72 |
+
else:
|
| 73 |
+
assert False, f"Unexpected dtype {dtype}"
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@core.extern
|
| 77 |
+
def mulhi(arg0, arg1, _builder=None):
|
| 78 |
+
return core.extern_elementwise(
|
| 79 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 80 |
+
(core.dtype("int32"), core.dtype("int32")): ("__nv_mulhi", core.dtype("int32")),
|
| 81 |
+
(core.dtype("uint32"), core.dtype("uint32")): ("__nv_umulhi", core.dtype("uint32")),
|
| 82 |
+
(core.dtype("int64"), core.dtype("int64")): ("__nv_mul64hi", core.dtype("int64")),
|
| 83 |
+
(core.dtype("uint64"), core.dtype("uint64")): ("__nv_umul64hi", core.dtype("uint64")),
|
| 84 |
+
}, is_pure=True, _builder=_builder)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@core.extern
|
| 88 |
+
def mul24(arg0, arg1, _builder=None):
|
| 89 |
+
return core.extern_elementwise(
|
| 90 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 91 |
+
(core.dtype("int32"), core.dtype("int32")): ("__nv_mul24", core.dtype("int32")),
|
| 92 |
+
(core.dtype("uint32"), core.dtype("uint32")): ("__nv_umul24", core.dtype("uint32")),
|
| 93 |
+
}, is_pure=True, _builder=_builder)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@core.extern
|
| 97 |
+
def brev(arg0, _builder=None):
|
| 98 |
+
return core.extern_elementwise(
|
| 99 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 100 |
+
(core.dtype("int32"), ): ("__nv_brev", core.dtype("int32")),
|
| 101 |
+
(core.dtype("int64"), ): ("__nv_brevll", core.dtype("int64")),
|
| 102 |
+
}, is_pure=True, _builder=_builder)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@core.extern
|
| 106 |
+
def sad(arg0, arg1, arg2, _builder=None):
|
| 107 |
+
return core.extern_elementwise(
|
| 108 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 109 |
+
(core.dtype("int32"), core.dtype("int32"), core.dtype("uint32")): ("__nv_sad", core.dtype("int32")),
|
| 110 |
+
(core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32")): ("__nv_usad", core.dtype("uint32")),
|
| 111 |
+
}, is_pure=True, _builder=_builder)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@core.extern
|
| 115 |
+
def abs(arg0, _builder=None):
|
| 116 |
+
return core.extern_elementwise(
|
| 117 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 118 |
+
(core.dtype("int32"), ): ("__nv_abs", core.dtype("int32")),
|
| 119 |
+
(core.dtype("int64"), ): ("__nv_llabs", core.dtype("int64")),
|
| 120 |
+
(core.dtype("fp32"), ): ("__nv_fabsf", core.dtype("fp32")),
|
| 121 |
+
(core.dtype("fp64"), ): ("__nv_fabs", core.dtype("fp64")),
|
| 122 |
+
}, is_pure=True, _builder=_builder)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@core.extern
|
| 126 |
+
def floor(arg0, _builder=None):
|
| 127 |
+
return core.extern_elementwise(
|
| 128 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 129 |
+
(core.dtype("fp32"), ): ("__nv_floorf", core.dtype("fp32")),
|
| 130 |
+
(core.dtype("fp64"), ): ("__nv_floor", core.dtype("fp64")),
|
| 131 |
+
}, is_pure=True, _builder=_builder)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@core.extern
|
| 135 |
+
def rcp64h(arg0, _builder=None):
|
| 136 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 137 |
+
(core.dtype("fp64"), ): ("__nv_rcp64h", core.dtype("fp64")),
|
| 138 |
+
}, is_pure=True, _builder=_builder)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@core.extern
|
| 142 |
+
def rsqrt(arg0, _builder=None):
|
| 143 |
+
return core.extern_elementwise(
|
| 144 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 145 |
+
(core.dtype("fp32"), ): ("__nv_rsqrtf", core.dtype("fp32")),
|
| 146 |
+
(core.dtype("fp64"), ): ("__nv_rsqrt", core.dtype("fp64")),
|
| 147 |
+
}, is_pure=True, _builder=_builder)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@core.extern
|
| 151 |
+
def ceil(arg0, _builder=None):
|
| 152 |
+
return core.extern_elementwise(
|
| 153 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 154 |
+
(core.dtype("fp64"), ): ("__nv_ceil", core.dtype("fp64")),
|
| 155 |
+
(core.dtype("fp32"), ): ("__nv_ceilf", core.dtype("fp32")),
|
| 156 |
+
}, is_pure=True, _builder=_builder)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@core.extern
|
| 160 |
+
def trunc(arg0, _builder=None):
|
| 161 |
+
return core.extern_elementwise(
|
| 162 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 163 |
+
(core.dtype("fp64"), ): ("__nv_trunc", core.dtype("fp64")),
|
| 164 |
+
(core.dtype("fp32"), ): ("__nv_truncf", core.dtype("fp32")),
|
| 165 |
+
}, is_pure=True, _builder=_builder)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@core.extern
|
| 169 |
+
def exp2(arg0, _builder=None):
|
| 170 |
+
return core.extern_elementwise(
|
| 171 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 172 |
+
(core.dtype("fp32"), ): ("__nv_exp2f", core.dtype("fp32")),
|
| 173 |
+
(core.dtype("fp64"), ): ("__nv_exp2", core.dtype("fp64")),
|
| 174 |
+
}, is_pure=True, _builder=_builder)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@core.extern
|
| 178 |
+
def saturatef(arg0, _builder=None):
|
| 179 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 180 |
+
(core.dtype("fp32"), ): ("__nv_saturatef", core.dtype("fp32")),
|
| 181 |
+
}, is_pure=True, _builder=_builder)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@core.extern
|
| 185 |
+
def fma_rn(arg0, arg1, arg2, _builder=None):
|
| 186 |
+
return core.extern_elementwise(
|
| 187 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 188 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rn", core.dtype("fp32")),
|
| 189 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rn", core.dtype("fp64")),
|
| 190 |
+
}, is_pure=True, _builder=_builder)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@core.extern
|
| 194 |
+
def fma_rz(arg0, arg1, arg2, _builder=None):
|
| 195 |
+
return core.extern_elementwise(
|
| 196 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 197 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rz", core.dtype("fp32")),
|
| 198 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rz", core.dtype("fp64")),
|
| 199 |
+
}, is_pure=True, _builder=_builder)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
@core.extern
|
| 203 |
+
def fma_rd(arg0, arg1, arg2, _builder=None):
|
| 204 |
+
return core.extern_elementwise(
|
| 205 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 206 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rd", core.dtype("fp32")),
|
| 207 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rd", core.dtype("fp64")),
|
| 208 |
+
}, is_pure=True, _builder=_builder)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
@core.extern
|
| 212 |
+
def fma_ru(arg0, arg1, arg2, _builder=None):
|
| 213 |
+
return core.extern_elementwise(
|
| 214 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 215 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_ru", core.dtype("fp32")),
|
| 216 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_ru", core.dtype("fp64")),
|
| 217 |
+
}, is_pure=True, _builder=_builder)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@core.extern
|
| 221 |
+
def fast_dividef(arg0, arg1, _builder=None):
|
| 222 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], {
|
| 223 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_fdividef", core.dtype("fp32")),
|
| 224 |
+
}, is_pure=True, _builder=_builder)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
@core.extern
|
| 228 |
+
def div_rn(arg0, arg1, _builder=None):
|
| 229 |
+
return core.extern_elementwise(
|
| 230 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 231 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rn", core.dtype("fp32")),
|
| 232 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rn", core.dtype("fp64")),
|
| 233 |
+
}, is_pure=True, _builder=_builder)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
@core.extern
|
| 237 |
+
def div_rz(arg0, arg1, _builder=None):
|
| 238 |
+
return core.extern_elementwise(
|
| 239 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 240 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rz", core.dtype("fp32")),
|
| 241 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rz", core.dtype("fp64")),
|
| 242 |
+
}, is_pure=True, _builder=_builder)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
@core.extern
|
| 246 |
+
def div_rd(arg0, arg1, _builder=None):
|
| 247 |
+
return core.extern_elementwise(
|
| 248 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 249 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rd", core.dtype("fp32")),
|
| 250 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rd", core.dtype("fp64")),
|
| 251 |
+
}, is_pure=True, _builder=_builder)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
@core.extern
|
| 255 |
+
def div_ru(arg0, arg1, _builder=None):
|
| 256 |
+
return core.extern_elementwise(
|
| 257 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 258 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_ru", core.dtype("fp32")),
|
| 259 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_ru", core.dtype("fp64")),
|
| 260 |
+
}, is_pure=True, _builder=_builder)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@core.extern
|
| 264 |
+
def rcp_rn(arg0, _builder=None):
|
| 265 |
+
return core.extern_elementwise(
|
| 266 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 267 |
+
(core.dtype("fp32"), ): ("__nv_frcp_rn", core.dtype("fp32")),
|
| 268 |
+
(core.dtype("fp64"), ): ("__nv_drcp_rn", core.dtype("fp64")),
|
| 269 |
+
}, is_pure=True, _builder=_builder)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@core.extern
|
| 273 |
+
def rcp_rz(arg0, _builder=None):
|
| 274 |
+
return core.extern_elementwise(
|
| 275 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 276 |
+
(core.dtype("fp32"), ): ("__nv_frcp_rz", core.dtype("fp32")),
|
| 277 |
+
(core.dtype("fp64"), ): ("__nv_drcp_rz", core.dtype("fp64")),
|
| 278 |
+
}, is_pure=True, _builder=_builder)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@core.extern
|
| 282 |
+
def rcp_rd(arg0, _builder=None):
|
| 283 |
+
return core.extern_elementwise(
|
| 284 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 285 |
+
(core.dtype("fp32"), ): ("__nv_frcp_rd", core.dtype("fp32")),
|
| 286 |
+
(core.dtype("fp64"), ): ("__nv_drcp_rd", core.dtype("fp64")),
|
| 287 |
+
}, is_pure=True, _builder=_builder)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
@core.extern
|
| 291 |
+
def rcp_ru(arg0, _builder=None):
|
| 292 |
+
return core.extern_elementwise(
|
| 293 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 294 |
+
(core.dtype("fp32"), ): ("__nv_frcp_ru", core.dtype("fp32")),
|
| 295 |
+
(core.dtype("fp64"), ): ("__nv_drcp_ru", core.dtype("fp64")),
|
| 296 |
+
}, is_pure=True, _builder=_builder)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
@core.extern
|
| 300 |
+
def sqrt_rn(arg0, _builder=None):
|
| 301 |
+
return core.extern_elementwise(
|
| 302 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 303 |
+
(core.dtype("fp32"), ): ("__nv_fsqrt_rn", core.dtype("fp32")),
|
| 304 |
+
(core.dtype("fp64"), ): ("__nv_dsqrt_rn", core.dtype("fp64")),
|
| 305 |
+
}, is_pure=True, _builder=_builder)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
@core.extern
|
| 309 |
+
def sqrt_rz(arg0, _builder=None):
|
| 310 |
+
return core.extern_elementwise(
|
| 311 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 312 |
+
(core.dtype("fp32"), ): ("__nv_fsqrt_rz", core.dtype("fp32")),
|
| 313 |
+
(core.dtype("fp64"), ): ("__nv_dsqrt_rz", core.dtype("fp64")),
|
| 314 |
+
}, is_pure=True, _builder=_builder)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@core.extern
|
| 318 |
+
def sqrt_rd(arg0, _builder=None):
|
| 319 |
+
return core.extern_elementwise(
|
| 320 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 321 |
+
(core.dtype("fp32"), ): ("__nv_fsqrt_rd", core.dtype("fp32")),
|
| 322 |
+
(core.dtype("fp64"), ): ("__nv_dsqrt_rd", core.dtype("fp64")),
|
| 323 |
+
}, is_pure=True, _builder=_builder)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@core.extern
|
| 327 |
+
def sqrt_ru(arg0, _builder=None):
|
| 328 |
+
return core.extern_elementwise(
|
| 329 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 330 |
+
(core.dtype("fp32"), ): ("__nv_fsqrt_ru", core.dtype("fp32")),
|
| 331 |
+
(core.dtype("fp64"), ): ("__nv_dsqrt_ru", core.dtype("fp64")),
|
| 332 |
+
}, is_pure=True, _builder=_builder)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@core.extern
|
| 336 |
+
def sqrt(arg0, _builder=None):
|
| 337 |
+
return core.extern_elementwise(
|
| 338 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 339 |
+
(core.dtype("fp32"), ): ("__nv_sqrtf", core.dtype("fp32")),
|
| 340 |
+
(core.dtype("fp64"), ): ("__nv_sqrt", core.dtype("fp64")),
|
| 341 |
+
}, is_pure=True, _builder=_builder)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
@core.extern
|
| 345 |
+
def add_rn(arg0, arg1, _builder=None):
|
| 346 |
+
return core.extern_elementwise(
|
| 347 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 348 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rn", core.dtype("fp64")),
|
| 349 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rn", core.dtype("fp32")),
|
| 350 |
+
}, is_pure=True, _builder=_builder)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
@core.extern
|
| 354 |
+
def add_rz(arg0, arg1, _builder=None):
|
| 355 |
+
return core.extern_elementwise(
|
| 356 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 357 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rz", core.dtype("fp64")),
|
| 358 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rz", core.dtype("fp32")),
|
| 359 |
+
}, is_pure=True, _builder=_builder)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
@core.extern
|
| 363 |
+
def add_rd(arg0, arg1, _builder=None):
|
| 364 |
+
return core.extern_elementwise(
|
| 365 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 366 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rd", core.dtype("fp64")),
|
| 367 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rd", core.dtype("fp32")),
|
| 368 |
+
}, is_pure=True, _builder=_builder)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@core.extern
|
| 372 |
+
def add_ru(arg0, arg1, _builder=None):
|
| 373 |
+
return core.extern_elementwise(
|
| 374 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 375 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_ru", core.dtype("fp64")),
|
| 376 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_ru", core.dtype("fp32")),
|
| 377 |
+
}, is_pure=True, _builder=_builder)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
@core.extern
|
| 381 |
+
def mul_rn(arg0, arg1, _builder=None):
|
| 382 |
+
return core.extern_elementwise(
|
| 383 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 384 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rn", core.dtype("fp64")),
|
| 385 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rn", core.dtype("fp32")),
|
| 386 |
+
}, is_pure=True, _builder=_builder)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@core.extern
|
| 390 |
+
def mul_rz(arg0, arg1, _builder=None):
|
| 391 |
+
return core.extern_elementwise(
|
| 392 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 393 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rz", core.dtype("fp64")),
|
| 394 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rz", core.dtype("fp32")),
|
| 395 |
+
}, is_pure=True, _builder=_builder)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
@core.extern
|
| 399 |
+
def mul_rd(arg0, arg1, _builder=None):
|
| 400 |
+
return core.extern_elementwise(
|
| 401 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 402 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rd", core.dtype("fp64")),
|
| 403 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rd", core.dtype("fp32")),
|
| 404 |
+
}, is_pure=True, _builder=_builder)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
@core.extern
|
| 408 |
+
def mul_ru(arg0, arg1, _builder=None):
|
| 409 |
+
return core.extern_elementwise(
|
| 410 |
+
"libdevice", libdevice_path(), [
|
| 411 |
+
arg0,
|
| 412 |
+
arg1,
|
| 413 |
+
], {
|
| 414 |
+
(
|
| 415 |
+
core.dtype("fp64"),
|
| 416 |
+
core.dtype("fp64"),
|
| 417 |
+
): ("__nv_dmul_ru", core.dtype("fp64")),
|
| 418 |
+
(
|
| 419 |
+
core.dtype("fp32"),
|
| 420 |
+
core.dtype("fp32"),
|
| 421 |
+
): ("__nv_fmul_ru", core.dtype("fp32")),
|
| 422 |
+
}, is_pure=True, _builder=_builder)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
@core.extern
|
| 426 |
+
def double2float_rn(arg0, _builder=None):
|
| 427 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 428 |
+
(core.dtype("fp64"), ): ("__nv_double2float_rn", core.dtype("fp32")),
|
| 429 |
+
}, is_pure=True, _builder=_builder)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
@core.extern
|
| 433 |
+
def double2float_rz(arg0, _builder=None):
|
| 434 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 435 |
+
(core.dtype("fp64"), ): ("__nv_double2float_rz", core.dtype("fp32")),
|
| 436 |
+
}, is_pure=True, _builder=_builder)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
@core.extern
|
| 440 |
+
def double2float_rd(arg0, _builder=None):
|
| 441 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 442 |
+
(core.dtype("fp64"), ): ("__nv_double2float_rd", core.dtype("fp32")),
|
| 443 |
+
}, is_pure=True, _builder=_builder)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
@core.extern
|
| 447 |
+
def double2float_ru(arg0, _builder=None):
|
| 448 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 449 |
+
(core.dtype("fp64"), ): ("__nv_double2float_ru", core.dtype("fp32")),
|
| 450 |
+
}, is_pure=True, _builder=_builder)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
@core.extern
|
| 454 |
+
def double2int_rn(arg0, _builder=None):
|
| 455 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 456 |
+
(core.dtype("fp64"), ): ("__nv_double2int_rn", core.dtype("int32")),
|
| 457 |
+
}, is_pure=True, _builder=_builder)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
@core.extern
|
| 461 |
+
def double2int_rz(arg0, _builder=None):
|
| 462 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 463 |
+
(core.dtype("fp64"), ): ("__nv_double2int_rz", core.dtype("int32")),
|
| 464 |
+
}, is_pure=True, _builder=_builder)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
@core.extern
|
| 468 |
+
def double2int_rd(arg0, _builder=None):
|
| 469 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 470 |
+
(core.dtype("fp64"), ): ("__nv_double2int_rd", core.dtype("int32")),
|
| 471 |
+
}, is_pure=True, _builder=_builder)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@core.extern
|
| 475 |
+
def double2int_ru(arg0, _builder=None):
|
| 476 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 477 |
+
(core.dtype("fp64"), ): ("__nv_double2int_ru", core.dtype("int32")),
|
| 478 |
+
}, is_pure=True, _builder=_builder)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
@core.extern
|
| 482 |
+
def double2uint_rn(arg0, _builder=None):
|
| 483 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 484 |
+
(core.dtype("fp64"), ): ("__nv_double2uint_rn", core.dtype("int32")),
|
| 485 |
+
}, is_pure=True, _builder=_builder)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
@core.extern
|
| 489 |
+
def double2uint_rz(arg0, _builder=None):
|
| 490 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 491 |
+
(core.dtype("fp64"), ): ("__nv_double2uint_rz", core.dtype("int32")),
|
| 492 |
+
}, is_pure=True, _builder=_builder)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
@core.extern
|
| 496 |
+
def double2uint_rd(arg0, _builder=None):
|
| 497 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 498 |
+
(core.dtype("fp64"), ): ("__nv_double2uint_rd", core.dtype("int32")),
|
| 499 |
+
}, is_pure=True, _builder=_builder)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
@core.extern
|
| 503 |
+
def double2uint_ru(arg0, _builder=None):
|
| 504 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 505 |
+
(core.dtype("fp64"), ): ("__nv_double2uint_ru", core.dtype("int32")),
|
| 506 |
+
}, is_pure=True, _builder=_builder)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
@core.extern
|
| 510 |
+
def int2double_rn(arg0, _builder=None):
|
| 511 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 512 |
+
(core.dtype("int32"), ): ("__nv_int2double_rn", core.dtype("fp64")),
|
| 513 |
+
}, is_pure=True, _builder=_builder)
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
@core.extern
|
| 517 |
+
def uint2double_rn(arg0, _builder=None):
|
| 518 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 519 |
+
(core.dtype("uint32"), ): ("__nv_uint2double_rn", core.dtype("fp64")),
|
| 520 |
+
}, is_pure=True, _builder=_builder)
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
@core.extern
|
| 524 |
+
def float2int_rn(arg0, _builder=None):
|
| 525 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 526 |
+
(core.dtype("fp32"), ): ("__nv_float2int_rn", core.dtype("int32")),
|
| 527 |
+
}, is_pure=True, _builder=_builder)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
@core.extern
|
| 531 |
+
def float2int_rz(arg0, _builder=None):
|
| 532 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 533 |
+
(core.dtype("fp32"), ): ("__nv_float2int_rz", core.dtype("int32")),
|
| 534 |
+
}, is_pure=True, _builder=_builder)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
@core.extern
|
| 538 |
+
def float2int_rd(arg0, _builder=None):
|
| 539 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 540 |
+
(core.dtype("fp32"), ): ("__nv_float2int_rd", core.dtype("int32")),
|
| 541 |
+
}, is_pure=True, _builder=_builder)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
@core.extern
|
| 545 |
+
def float2int_ru(arg0, _builder=None):
|
| 546 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 547 |
+
(core.dtype("fp32"), ): ("__nv_float2int_ru", core.dtype("int32")),
|
| 548 |
+
}, is_pure=True, _builder=_builder)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
@core.extern
|
| 552 |
+
def float2uint_rn(arg0, _builder=None):
|
| 553 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 554 |
+
(core.dtype("fp32"), ): ("__nv_float2uint_rn", core.dtype("int32")),
|
| 555 |
+
}, is_pure=True, _builder=_builder)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
@core.extern
|
| 559 |
+
def float2uint_rz(arg0, _builder=None):
|
| 560 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 561 |
+
(core.dtype("fp32"), ): ("__nv_float2uint_rz", core.dtype("int32")),
|
| 562 |
+
}, is_pure=True, _builder=_builder)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
@core.extern
|
| 566 |
+
def float2uint_rd(arg0, _builder=None):
|
| 567 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 568 |
+
(core.dtype("fp32"), ): ("__nv_float2uint_rd", core.dtype("int32")),
|
| 569 |
+
}, is_pure=True, _builder=_builder)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
@core.extern
|
| 573 |
+
def float2uint_ru(arg0, _builder=None):
|
| 574 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 575 |
+
(core.dtype("fp32"), ): ("__nv_float2uint_ru", core.dtype("int32")),
|
| 576 |
+
}, is_pure=True, _builder=_builder)
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
@core.extern
|
| 580 |
+
def int2float_rn(arg0, _builder=None):
|
| 581 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 582 |
+
(core.dtype("int32"), ): ("__nv_int2float_rn", core.dtype("fp32")),
|
| 583 |
+
}, is_pure=True, _builder=_builder)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
@core.extern
|
| 587 |
+
def int2float_rz(arg0, _builder=None):
|
| 588 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 589 |
+
(core.dtype("int32"), ): ("__nv_int2float_rz", core.dtype("fp32")),
|
| 590 |
+
}, is_pure=True, _builder=_builder)
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
@core.extern
|
| 594 |
+
def int2float_rd(arg0, _builder=None):
|
| 595 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 596 |
+
(core.dtype("int32"), ): ("__nv_int2float_rd", core.dtype("fp32")),
|
| 597 |
+
}, is_pure=True, _builder=_builder)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
@core.extern
|
| 601 |
+
def int2float_ru(arg0, _builder=None):
|
| 602 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 603 |
+
(core.dtype("int32"), ): ("__nv_int2float_ru", core.dtype("fp32")),
|
| 604 |
+
}, is_pure=True, _builder=_builder)
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
@core.extern
|
| 608 |
+
def uint2float_rn(arg0, _builder=None):
|
| 609 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 610 |
+
(core.dtype("uint32"), ): ("__nv_uint2float_rn", core.dtype("fp32")),
|
| 611 |
+
}, is_pure=True, _builder=_builder)
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
@core.extern
|
| 615 |
+
def uint2float_rz(arg0, _builder=None):
|
| 616 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 617 |
+
(core.dtype("uint32"), ): ("__nv_uint2float_rz", core.dtype("fp32")),
|
| 618 |
+
}, is_pure=True, _builder=_builder)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
@core.extern
|
| 622 |
+
def uint2float_rd(arg0, _builder=None):
|
| 623 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 624 |
+
(core.dtype("uint32"), ): ("__nv_uint2float_rd", core.dtype("fp32")),
|
| 625 |
+
}, is_pure=True, _builder=_builder)
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
@core.extern
|
| 629 |
+
def uint2float_ru(arg0, _builder=None):
|
| 630 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 631 |
+
(core.dtype("uint32"), ): ("__nv_uint2float_ru", core.dtype("fp32")),
|
| 632 |
+
}, is_pure=True, _builder=_builder)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
@core.extern
|
| 636 |
+
def hiloint2double(arg0, arg1, _builder=None):
|
| 637 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], {
|
| 638 |
+
(core.dtype("int32"), core.dtype("int32")): ("__nv_hiloint2double", core.dtype("fp64")),
|
| 639 |
+
}, is_pure=True, _builder=_builder)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
@core.extern
|
| 643 |
+
def double2loint(arg0, _builder=None):
|
| 644 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 645 |
+
(core.dtype("fp64"), ): ("__nv_double2loint", core.dtype("int32")),
|
| 646 |
+
}, is_pure=True, _builder=_builder)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
@core.extern
|
| 650 |
+
def double2hiint(arg0, _builder=None):
|
| 651 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 652 |
+
(core.dtype("fp64"), ): ("__nv_double2hiint", core.dtype("int32")),
|
| 653 |
+
}, is_pure=True, _builder=_builder)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@core.extern
|
| 657 |
+
def float2ll_rn(arg0, _builder=None):
|
| 658 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 659 |
+
(core.dtype("fp32"), ): ("__nv_float2ll_rn", core.dtype("int64")),
|
| 660 |
+
}, is_pure=True, _builder=_builder)
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
@core.extern
|
| 664 |
+
def float2ll_rz(arg0, _builder=None):
|
| 665 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 666 |
+
(core.dtype("fp32"), ): ("__nv_float2ll_rz", core.dtype("int64")),
|
| 667 |
+
}, is_pure=True, _builder=_builder)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
@core.extern
|
| 671 |
+
def float2ll_rd(arg0, _builder=None):
|
| 672 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 673 |
+
(core.dtype("fp32"), ): ("__nv_float2ll_rd", core.dtype("int64")),
|
| 674 |
+
}, is_pure=True, _builder=_builder)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@core.extern
|
| 678 |
+
def float2ll_ru(arg0, _builder=None):
|
| 679 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 680 |
+
(core.dtype("fp32"), ): ("__nv_float2ll_ru", core.dtype("int64")),
|
| 681 |
+
}, is_pure=True, _builder=_builder)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
@core.extern
|
| 685 |
+
def float2ull_rn(arg0, _builder=None):
|
| 686 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 687 |
+
(core.dtype("fp32"), ): ("__nv_float2ull_rn", core.dtype("int64")),
|
| 688 |
+
}, is_pure=True, _builder=_builder)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
@core.extern
|
| 692 |
+
def float2ull_rz(arg0, _builder=None):
|
| 693 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 694 |
+
(core.dtype("fp32"), ): ("__nv_float2ull_rz", core.dtype("int64")),
|
| 695 |
+
}, is_pure=True, _builder=_builder)
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
@core.extern
|
| 699 |
+
def float2ull_rd(arg0, _builder=None):
|
| 700 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 701 |
+
(core.dtype("fp32"), ): ("__nv_float2ull_rd", core.dtype("int64")),
|
| 702 |
+
}, is_pure=True, _builder=_builder)
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
@core.extern
|
| 706 |
+
def float2ull_ru(arg0, _builder=None):
|
| 707 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 708 |
+
(core.dtype("fp32"), ): ("__nv_float2ull_ru", core.dtype("int64")),
|
| 709 |
+
}, is_pure=True, _builder=_builder)
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
@core.extern
|
| 713 |
+
def double2ll_rn(arg0, _builder=None):
|
| 714 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 715 |
+
(core.dtype("fp64"), ): ("__nv_double2ll_rn", core.dtype("int64")),
|
| 716 |
+
}, is_pure=True, _builder=_builder)
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
@core.extern
|
| 720 |
+
def double2ll_rz(arg0, _builder=None):
|
| 721 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 722 |
+
(core.dtype("fp64"), ): ("__nv_double2ll_rz", core.dtype("int64")),
|
| 723 |
+
}, is_pure=True, _builder=_builder)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
@core.extern
|
| 727 |
+
def double2ll_rd(arg0, _builder=None):
|
| 728 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 729 |
+
(core.dtype("fp64"), ): ("__nv_double2ll_rd", core.dtype("int64")),
|
| 730 |
+
}, is_pure=True, _builder=_builder)
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
@core.extern
|
| 734 |
+
def double2ll_ru(arg0, _builder=None):
|
| 735 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 736 |
+
(core.dtype("fp64"), ): ("__nv_double2ll_ru", core.dtype("int64")),
|
| 737 |
+
}, is_pure=True, _builder=_builder)
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
@core.extern
|
| 741 |
+
def double2ull_rn(arg0, _builder=None):
|
| 742 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 743 |
+
(core.dtype("fp64"), ): ("__nv_double2ull_rn", core.dtype("int64")),
|
| 744 |
+
}, is_pure=True, _builder=_builder)
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
@core.extern
|
| 748 |
+
def double2ull_rz(arg0, _builder=None):
|
| 749 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 750 |
+
(core.dtype("fp64"), ): ("__nv_double2ull_rz", core.dtype("int64")),
|
| 751 |
+
}, is_pure=True, _builder=_builder)
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
@core.extern
|
| 755 |
+
def double2ull_rd(arg0, _builder=None):
|
| 756 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 757 |
+
(core.dtype("fp64"), ): ("__nv_double2ull_rd", core.dtype("int64")),
|
| 758 |
+
}, is_pure=True, _builder=_builder)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
@core.extern
|
| 762 |
+
def double2ull_ru(arg0, _builder=None):
|
| 763 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 764 |
+
(core.dtype("fp64"), ): ("__nv_double2ull_ru", core.dtype("int64")),
|
| 765 |
+
}, is_pure=True, _builder=_builder)
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
@core.extern
|
| 769 |
+
def ll2float_rn(arg0, _builder=None):
|
| 770 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 771 |
+
(core.dtype("int64"), ): ("__nv_ll2float_rn", core.dtype("fp32")),
|
| 772 |
+
}, is_pure=True, _builder=_builder)
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
@core.extern
|
| 776 |
+
def ll2float_rz(arg0, _builder=None):
|
| 777 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 778 |
+
(core.dtype("int64"), ): ("__nv_ll2float_rz", core.dtype("fp32")),
|
| 779 |
+
}, is_pure=True, _builder=_builder)
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
@core.extern
|
| 783 |
+
def ll2float_rd(arg0, _builder=None):
|
| 784 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 785 |
+
(core.dtype("int64"), ): ("__nv_ll2float_rd", core.dtype("fp32")),
|
| 786 |
+
}, is_pure=True, _builder=_builder)
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
@core.extern
|
| 790 |
+
def ll2float_ru(arg0, _builder=None):
|
| 791 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 792 |
+
(core.dtype("int64"), ): ("__nv_ll2float_ru", core.dtype("fp32")),
|
| 793 |
+
}, is_pure=True, _builder=_builder)
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
@core.extern
|
| 797 |
+
def ull2float_rn(arg0, _builder=None):
|
| 798 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 799 |
+
(core.dtype("uint64"), ): ("__nv_ull2float_rn", core.dtype("fp32")),
|
| 800 |
+
}, is_pure=True, _builder=_builder)
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
@core.extern
|
| 804 |
+
def ull2float_rz(arg0, _builder=None):
|
| 805 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 806 |
+
(core.dtype("uint64"), ): ("__nv_ull2float_rz", core.dtype("fp32")),
|
| 807 |
+
}, is_pure=True, _builder=_builder)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
@core.extern
|
| 811 |
+
def ull2float_rd(arg0, _builder=None):
|
| 812 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 813 |
+
(core.dtype("uint64"), ): ("__nv_ull2float_rd", core.dtype("fp32")),
|
| 814 |
+
}, is_pure=True, _builder=_builder)
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
@core.extern
|
| 818 |
+
def ull2float_ru(arg0, _builder=None):
|
| 819 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 820 |
+
(core.dtype("uint64"), ): ("__nv_ull2float_ru", core.dtype("fp32")),
|
| 821 |
+
}, is_pure=True, _builder=_builder)
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
@core.extern
|
| 825 |
+
def ll2double_rn(arg0, _builder=None):
|
| 826 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 827 |
+
(core.dtype("int64"), ): ("__nv_ll2double_rn", core.dtype("fp64")),
|
| 828 |
+
}, is_pure=True, _builder=_builder)
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
@core.extern
|
| 832 |
+
def ll2double_rz(arg0, _builder=None):
|
| 833 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 834 |
+
(core.dtype("int64"), ): ("__nv_ll2double_rz", core.dtype("fp64")),
|
| 835 |
+
}, is_pure=True, _builder=_builder)
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
@core.extern
|
| 839 |
+
def ll2double_rd(arg0, _builder=None):
|
| 840 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 841 |
+
(core.dtype("int64"), ): ("__nv_ll2double_rd", core.dtype("fp64")),
|
| 842 |
+
}, is_pure=True, _builder=_builder)
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
@core.extern
|
| 846 |
+
def ll2double_ru(arg0, _builder=None):
|
| 847 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 848 |
+
(core.dtype("int64"), ): ("__nv_ll2double_ru", core.dtype("fp64")),
|
| 849 |
+
}, is_pure=True, _builder=_builder)
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
@core.extern
|
| 853 |
+
def ull2double_rn(arg0, _builder=None):
|
| 854 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 855 |
+
(core.dtype("uint64"), ): ("__nv_ull2double_rn", core.dtype("fp64")),
|
| 856 |
+
}, is_pure=True, _builder=_builder)
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
@core.extern
|
| 860 |
+
def ull2double_rz(arg0, _builder=None):
|
| 861 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 862 |
+
(core.dtype("uint64"), ): ("__nv_ull2double_rz", core.dtype("fp64")),
|
| 863 |
+
}, is_pure=True, _builder=_builder)
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
@core.extern
|
| 867 |
+
def ull2double_rd(arg0, _builder=None):
|
| 868 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 869 |
+
(core.dtype("uint64"), ): ("__nv_ull2double_rd", core.dtype("fp64")),
|
| 870 |
+
}, is_pure=True, _builder=_builder)
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
@core.extern
|
| 874 |
+
def ull2double_ru(arg0, _builder=None):
|
| 875 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 876 |
+
(core.dtype("uint64"), ): ("__nv_ull2double_ru", core.dtype("fp64")),
|
| 877 |
+
}, is_pure=True, _builder=_builder)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
@core.extern
|
| 881 |
+
def int_as_float(arg0, _builder=None):
|
| 882 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 883 |
+
(core.dtype("int32"), ): ("__nv_int_as_float", core.dtype("fp32")),
|
| 884 |
+
}, is_pure=True, _builder=_builder)
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
@core.extern
|
| 888 |
+
def float_as_int(arg0, _builder=None):
|
| 889 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 890 |
+
(core.dtype("fp32"), ): ("__nv_float_as_int", core.dtype("int32")),
|
| 891 |
+
}, is_pure=True, _builder=_builder)
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
@core.extern
|
| 895 |
+
def uint_as_float(arg0, _builder=None):
|
| 896 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 897 |
+
(core.dtype("uint32"), ): ("__nv_uint_as_float", core.dtype("fp32")),
|
| 898 |
+
}, is_pure=True, _builder=_builder)
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
@core.extern
|
| 902 |
+
def float_as_uint(arg0, _builder=None):
|
| 903 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 904 |
+
(core.dtype("fp32"), ): ("__nv_float_as_uint", core.dtype("int32")),
|
| 905 |
+
}, is_pure=True, _builder=_builder)
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
@core.extern
|
| 909 |
+
def longlong_as_double(arg0, _builder=None):
|
| 910 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 911 |
+
(core.dtype("int64"), ): ("__nv_longlong_as_double", core.dtype("fp64")),
|
| 912 |
+
}, is_pure=True, _builder=_builder)
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
@core.extern
|
| 916 |
+
def double_as_longlong(arg0, _builder=None):
|
| 917 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 918 |
+
(core.dtype("fp64"), ): ("__nv_double_as_longlong", core.dtype("int64")),
|
| 919 |
+
}, is_pure=True, _builder=_builder)
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
@core.extern
|
| 923 |
+
def fast_sinf(arg0, _builder=None):
|
| 924 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 925 |
+
(core.dtype("fp32"), ): ("__nv_fast_sinf", core.dtype("fp32")),
|
| 926 |
+
}, is_pure=True, _builder=_builder)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
@core.extern
|
| 930 |
+
def fast_cosf(arg0, _builder=None):
|
| 931 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 932 |
+
(core.dtype("fp32"), ): ("__nv_fast_cosf", core.dtype("fp32")),
|
| 933 |
+
}, is_pure=True, _builder=_builder)
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
@core.extern
|
| 937 |
+
def fast_log2f(arg0, _builder=None):
|
| 938 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 939 |
+
(core.dtype("fp32"), ): ("__nv_fast_log2f", core.dtype("fp32")),
|
| 940 |
+
}, is_pure=True, _builder=_builder)
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
@core.extern
|
| 944 |
+
def fast_logf(arg0, _builder=None):
|
| 945 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 946 |
+
(core.dtype("fp32"), ): ("__nv_fast_logf", core.dtype("fp32")),
|
| 947 |
+
}, is_pure=True, _builder=_builder)
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
@core.extern
|
| 951 |
+
def fast_expf(arg0, _builder=None):
|
| 952 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 953 |
+
(core.dtype("fp32"), ): ("__nv_fast_expf", core.dtype("fp32")),
|
| 954 |
+
}, is_pure=True, _builder=_builder)
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
@core.extern
|
| 958 |
+
def fast_tanf(arg0, _builder=None):
|
| 959 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 960 |
+
(core.dtype("fp32"), ): ("__nv_fast_tanf", core.dtype("fp32")),
|
| 961 |
+
}, is_pure=True, _builder=_builder)
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
@core.extern
|
| 965 |
+
def fast_exp10f(arg0, _builder=None):
|
| 966 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 967 |
+
(core.dtype("fp32"), ): ("__nv_fast_exp10f", core.dtype("fp32")),
|
| 968 |
+
}, is_pure=True, _builder=_builder)
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
@core.extern
|
| 972 |
+
def fast_log10f(arg0, _builder=None):
|
| 973 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 974 |
+
(core.dtype("fp32"), ): ("__nv_fast_log10f", core.dtype("fp32")),
|
| 975 |
+
}, is_pure=True, _builder=_builder)
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
@core.extern
|
| 979 |
+
def fast_powf(arg0, arg1, _builder=None):
|
| 980 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], {
|
| 981 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_powf", core.dtype("fp32")),
|
| 982 |
+
}, is_pure=True, _builder=_builder)
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
@core.extern
|
| 986 |
+
def hadd(arg0, arg1, _builder=None):
|
| 987 |
+
return core.extern_elementwise(
|
| 988 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 989 |
+
(core.dtype("int32"), core.dtype("int32")): ("__nv_hadd", core.dtype("int32")),
|
| 990 |
+
(core.dtype("uint32"), core.dtype("uint32")): ("__nv_uhadd", core.dtype("uint32")),
|
| 991 |
+
}, is_pure=True, _builder=_builder)
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
@core.extern
|
| 995 |
+
def rhadd(arg0, arg1, _builder=None):
|
| 996 |
+
return core.extern_elementwise(
|
| 997 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 998 |
+
(core.dtype("int32"), core.dtype("int32")): ("__nv_rhadd", core.dtype("int32")),
|
| 999 |
+
(core.dtype("uint32"), core.dtype("uint32")): ("__nv_urhadd", core.dtype("uint32")),
|
| 1000 |
+
}, is_pure=True, _builder=_builder)
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
@core.extern
|
| 1004 |
+
def sub_rn(arg0, arg1, _builder=None):
|
| 1005 |
+
return core.extern_elementwise(
|
| 1006 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1007 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rn", core.dtype("fp32")),
|
| 1008 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rn", core.dtype("fp64")),
|
| 1009 |
+
}, is_pure=True, _builder=_builder)
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
@core.extern
|
| 1013 |
+
def sub_rz(arg0, arg1, _builder=None):
|
| 1014 |
+
return core.extern_elementwise(
|
| 1015 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1016 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rz", core.dtype("fp32")),
|
| 1017 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rz", core.dtype("fp64")),
|
| 1018 |
+
}, is_pure=True, _builder=_builder)
|
| 1019 |
+
|
| 1020 |
+
|
| 1021 |
+
@core.extern
|
| 1022 |
+
def sub_rd(arg0, arg1, _builder=None):
|
| 1023 |
+
return core.extern_elementwise(
|
| 1024 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1025 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rd", core.dtype("fp32")),
|
| 1026 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rd", core.dtype("fp64")),
|
| 1027 |
+
}, is_pure=True, _builder=_builder)
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
@core.extern
|
| 1031 |
+
def sub_ru(arg0, arg1, _builder=None):
|
| 1032 |
+
return core.extern_elementwise(
|
| 1033 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1034 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_ru", core.dtype("fp32")),
|
| 1035 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_ru", core.dtype("fp64")),
|
| 1036 |
+
}, is_pure=True, _builder=_builder)
|
| 1037 |
+
|
| 1038 |
+
|
| 1039 |
+
@core.extern
|
| 1040 |
+
def rsqrt_rn(arg0, _builder=None):
|
| 1041 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [
|
| 1042 |
+
arg0,
|
| 1043 |
+
], {
|
| 1044 |
+
(core.dtype("fp32"), ): ("__nv_frsqrt_rn", core.dtype("fp32")),
|
| 1045 |
+
}, is_pure=True, _builder=_builder)
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
@core.extern
|
| 1049 |
+
def ffs(arg0, _builder=None):
|
| 1050 |
+
return core.extern_elementwise(
|
| 1051 |
+
"libdevice", libdevice_path(), [
|
| 1052 |
+
arg0,
|
| 1053 |
+
], {
|
| 1054 |
+
(core.dtype("int32"), ): ("__nv_ffs", core.dtype("int32")),
|
| 1055 |
+
(core.dtype("int64"), ): ("__nv_ffsll", core.dtype("int32")),
|
| 1056 |
+
}, is_pure=True, _builder=_builder)
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
@core.extern
|
| 1060 |
+
def rint(arg0, _builder=None):
|
| 1061 |
+
return core.extern_elementwise(
|
| 1062 |
+
"libdevice", libdevice_path(), [
|
| 1063 |
+
arg0,
|
| 1064 |
+
], {
|
| 1065 |
+
(core.dtype("fp32"), ): ("__nv_rintf", core.dtype("fp32")),
|
| 1066 |
+
(core.dtype("fp64"), ): ("__nv_rint", core.dtype("fp64")),
|
| 1067 |
+
}, is_pure=True, _builder=_builder)
|
| 1068 |
+
|
| 1069 |
+
|
| 1070 |
+
@core.extern
|
| 1071 |
+
def llrint(arg0, _builder=None):
|
| 1072 |
+
return core.extern_elementwise(
|
| 1073 |
+
"libdevice", libdevice_path(), [
|
| 1074 |
+
arg0,
|
| 1075 |
+
], {
|
| 1076 |
+
(core.dtype("fp32"), ): ("__nv_llrintf", core.dtype("int64")),
|
| 1077 |
+
(core.dtype("fp64"), ): ("__nv_llrint", core.dtype("int64")),
|
| 1078 |
+
}, is_pure=True, _builder=_builder)
|
| 1079 |
+
|
| 1080 |
+
|
| 1081 |
+
@core.extern
|
| 1082 |
+
def nearbyint(arg0, _builder=None):
|
| 1083 |
+
return core.extern_elementwise(
|
| 1084 |
+
"libdevice", libdevice_path(), [
|
| 1085 |
+
arg0,
|
| 1086 |
+
], {
|
| 1087 |
+
(core.dtype("fp32"), ): ("__nv_nearbyintf", core.dtype("fp32")),
|
| 1088 |
+
(core.dtype("fp64"), ): ("__nv_nearbyint", core.dtype("fp64")),
|
| 1089 |
+
}, is_pure=True, _builder=_builder)
|
| 1090 |
+
|
| 1091 |
+
|
| 1092 |
+
@core.extern
|
| 1093 |
+
def isnan(arg0, _builder=None):
|
| 1094 |
+
return core.extern_elementwise(
|
| 1095 |
+
"libdevice", libdevice_path(), [
|
| 1096 |
+
arg0,
|
| 1097 |
+
], {
|
| 1098 |
+
(core.dtype("fp32"), ): ("__nv_isnanf", core.dtype("int32")),
|
| 1099 |
+
(core.dtype("fp64"), ): ("__nv_isnand", core.dtype("int32")),
|
| 1100 |
+
}, is_pure=True, _builder=_builder)
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
@core.extern
|
| 1104 |
+
def signbit(arg0, _builder=None):
|
| 1105 |
+
return core.extern_elementwise(
|
| 1106 |
+
"libdevice", libdevice_path(), [
|
| 1107 |
+
arg0,
|
| 1108 |
+
], {
|
| 1109 |
+
(core.dtype("fp32"), ): ("__nv_signbitf", core.dtype("int32")),
|
| 1110 |
+
(core.dtype("fp64"), ): ("__nv_signbitd", core.dtype("int32")),
|
| 1111 |
+
}, is_pure=True, _builder=_builder)
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
@core.extern
|
| 1115 |
+
def copysign(arg0, arg1, _builder=None):
|
| 1116 |
+
return core.extern_elementwise(
|
| 1117 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1118 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_copysignf", core.dtype("fp32")),
|
| 1119 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_copysign", core.dtype("fp64")),
|
| 1120 |
+
}, is_pure=True, _builder=_builder)
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
@core.extern
|
| 1124 |
+
def finitef(arg0, _builder=None):
|
| 1125 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1126 |
+
(core.dtype("fp32"), ): ("__nv_finitef", core.dtype("int32")),
|
| 1127 |
+
}, is_pure=True, _builder=_builder)
|
| 1128 |
+
|
| 1129 |
+
|
| 1130 |
+
@core.extern
|
| 1131 |
+
def isinf(arg0, _builder=None):
|
| 1132 |
+
return core.extern_elementwise(
|
| 1133 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1134 |
+
(core.dtype("fp32"), ): ("__nv_isinff", core.dtype("int32")),
|
| 1135 |
+
(core.dtype("fp64"), ): ("__nv_isinfd", core.dtype("int32")),
|
| 1136 |
+
}, is_pure=True, _builder=_builder)
|
| 1137 |
+
|
| 1138 |
+
|
| 1139 |
+
@core.extern
|
| 1140 |
+
def nextafter(arg0, arg1, _builder=None):
|
| 1141 |
+
return core.extern_elementwise(
|
| 1142 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1143 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_nextafterf", core.dtype("fp32")),
|
| 1144 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_nextafter", core.dtype("fp64")),
|
| 1145 |
+
}, is_pure=True, _builder=_builder)
|
| 1146 |
+
|
| 1147 |
+
|
| 1148 |
+
@core.extern
|
| 1149 |
+
def sin(arg0, _builder=None):
|
| 1150 |
+
return core.extern_elementwise(
|
| 1151 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1152 |
+
(core.dtype("fp32"), ): ("__nv_sinf", core.dtype("fp32")),
|
| 1153 |
+
(core.dtype("fp64"), ): ("__nv_sin", core.dtype("fp64")),
|
| 1154 |
+
}, is_pure=True, _builder=_builder)
|
| 1155 |
+
|
| 1156 |
+
|
| 1157 |
+
@core.extern
|
| 1158 |
+
def cos(arg0, _builder=None):
|
| 1159 |
+
return core.extern_elementwise(
|
| 1160 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1161 |
+
(core.dtype("fp32"), ): ("__nv_cosf", core.dtype("fp32")),
|
| 1162 |
+
(core.dtype("fp64"), ): ("__nv_cos", core.dtype("fp64")),
|
| 1163 |
+
}, is_pure=True, _builder=_builder)
|
| 1164 |
+
|
| 1165 |
+
|
| 1166 |
+
@core.extern
|
| 1167 |
+
def sinpi(arg0, _builder=None):
|
| 1168 |
+
return core.extern_elementwise(
|
| 1169 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1170 |
+
(core.dtype("fp32"), ): ("__nv_sinpif", core.dtype("fp32")),
|
| 1171 |
+
(core.dtype("fp64"), ): ("__nv_sinpi", core.dtype("fp64")),
|
| 1172 |
+
}, is_pure=True, _builder=_builder)
|
| 1173 |
+
|
| 1174 |
+
|
| 1175 |
+
@core.extern
|
| 1176 |
+
def cospi(arg0, _builder=None):
|
| 1177 |
+
return core.extern_elementwise(
|
| 1178 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1179 |
+
(core.dtype("fp32"), ): ("__nv_cospif", core.dtype("fp32")),
|
| 1180 |
+
(core.dtype("fp64"), ): ("__nv_cospi", core.dtype("fp64")),
|
| 1181 |
+
}, is_pure=True, _builder=_builder)
|
| 1182 |
+
|
| 1183 |
+
|
| 1184 |
+
@core.extern
|
| 1185 |
+
def tan(arg0, _builder=None):
|
| 1186 |
+
return core.extern_elementwise(
|
| 1187 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1188 |
+
(core.dtype("fp32"), ): ("__nv_tanf", core.dtype("fp32")),
|
| 1189 |
+
(core.dtype("fp64"), ): ("__nv_tan", core.dtype("fp64")),
|
| 1190 |
+
}, is_pure=True, _builder=_builder)
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
@core.extern
|
| 1194 |
+
def log2(arg0, _builder=None):
|
| 1195 |
+
return core.extern_elementwise(
|
| 1196 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1197 |
+
(core.dtype("fp32"), ): ("__nv_log2f", core.dtype("fp32")),
|
| 1198 |
+
(core.dtype("fp64"), ): ("__nv_log2", core.dtype("fp64")),
|
| 1199 |
+
}, is_pure=True, _builder=_builder)
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
@core.extern
|
| 1203 |
+
def exp(arg0, _builder=None):
|
| 1204 |
+
return core.extern_elementwise(
|
| 1205 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1206 |
+
(core.dtype("fp32"), ): ("__nv_expf", core.dtype("fp32")),
|
| 1207 |
+
(core.dtype("fp64"), ): ("__nv_exp", core.dtype("fp64")),
|
| 1208 |
+
}, is_pure=True, _builder=_builder)
|
| 1209 |
+
|
| 1210 |
+
|
| 1211 |
+
@core.extern
|
| 1212 |
+
def exp10(arg0, _builder=None):
|
| 1213 |
+
return core.extern_elementwise(
|
| 1214 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1215 |
+
(core.dtype("fp32"), ): ("__nv_exp10f", core.dtype("fp32")),
|
| 1216 |
+
(core.dtype("fp64"), ): ("__nv_exp10", core.dtype("fp64")),
|
| 1217 |
+
}, is_pure=True, _builder=_builder)
|
| 1218 |
+
|
| 1219 |
+
|
| 1220 |
+
@core.extern
|
| 1221 |
+
def cosh(arg0, _builder=None):
|
| 1222 |
+
return core.extern_elementwise(
|
| 1223 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1224 |
+
(core.dtype("fp32"), ): ("__nv_coshf", core.dtype("fp32")),
|
| 1225 |
+
(core.dtype("fp64"), ): ("__nv_cosh", core.dtype("fp64")),
|
| 1226 |
+
}, is_pure=True, _builder=_builder)
|
| 1227 |
+
|
| 1228 |
+
|
| 1229 |
+
@core.extern
|
| 1230 |
+
def sinh(arg0, _builder=None):
|
| 1231 |
+
return core.extern_elementwise(
|
| 1232 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1233 |
+
(core.dtype("fp32"), ): ("__nv_sinhf", core.dtype("fp32")),
|
| 1234 |
+
(core.dtype("fp64"), ): ("__nv_sinh", core.dtype("fp64")),
|
| 1235 |
+
}, is_pure=True, _builder=_builder)
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
@core.extern
|
| 1239 |
+
def tanh(arg0, _builder=None):
|
| 1240 |
+
return core.extern_elementwise(
|
| 1241 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1242 |
+
(core.dtype("fp32"), ): ("__nv_tanhf", core.dtype("fp32")),
|
| 1243 |
+
(core.dtype("fp64"), ): ("__nv_tanh", core.dtype("fp64")),
|
| 1244 |
+
}, is_pure=True, _builder=_builder)
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
@core.extern
|
| 1248 |
+
def atan2(arg0, arg1, _builder=None):
|
| 1249 |
+
return core.extern_elementwise(
|
| 1250 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1251 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_atan2f", core.dtype("fp32")),
|
| 1252 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_atan2", core.dtype("fp64")),
|
| 1253 |
+
}, is_pure=True, _builder=_builder)
|
| 1254 |
+
|
| 1255 |
+
|
| 1256 |
+
@core.extern
|
| 1257 |
+
def atan(arg0, _builder=None):
|
| 1258 |
+
return core.extern_elementwise(
|
| 1259 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1260 |
+
(core.dtype("fp32"), ): ("__nv_atanf", core.dtype("fp32")),
|
| 1261 |
+
(core.dtype("fp64"), ): ("__nv_atan", core.dtype("fp64")),
|
| 1262 |
+
}, is_pure=True, _builder=_builder)
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
@core.extern
|
| 1266 |
+
def asin(arg0, _builder=None):
|
| 1267 |
+
return core.extern_elementwise(
|
| 1268 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1269 |
+
(core.dtype("fp32"), ): ("__nv_asinf", core.dtype("fp32")),
|
| 1270 |
+
(core.dtype("fp64"), ): ("__nv_asin", core.dtype("fp64")),
|
| 1271 |
+
}, is_pure=True, _builder=_builder)
|
| 1272 |
+
|
| 1273 |
+
|
| 1274 |
+
@core.extern
|
| 1275 |
+
def acos(arg0, _builder=None):
|
| 1276 |
+
return core.extern_elementwise(
|
| 1277 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1278 |
+
(core.dtype("fp32"), ): ("__nv_acosf", core.dtype("fp32")),
|
| 1279 |
+
(core.dtype("fp64"), ): ("__nv_acos", core.dtype("fp64")),
|
| 1280 |
+
}, is_pure=True, _builder=_builder)
|
| 1281 |
+
|
| 1282 |
+
|
| 1283 |
+
@core.extern
|
| 1284 |
+
def log(arg0, _builder=None):
|
| 1285 |
+
return core.extern_elementwise(
|
| 1286 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1287 |
+
(core.dtype("fp32"), ): ("__nv_logf", core.dtype("fp32")),
|
| 1288 |
+
(core.dtype("fp64"), ): ("__nv_log", core.dtype("fp64")),
|
| 1289 |
+
}, is_pure=True, _builder=_builder)
|
| 1290 |
+
|
| 1291 |
+
|
| 1292 |
+
@core.extern
|
| 1293 |
+
def log10(arg0, _builder=None):
|
| 1294 |
+
return core.extern_elementwise(
|
| 1295 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1296 |
+
(core.dtype("fp32"), ): ("__nv_log10f", core.dtype("fp32")),
|
| 1297 |
+
(core.dtype("fp64"), ): ("__nv_log10", core.dtype("fp64")),
|
| 1298 |
+
}, is_pure=True, _builder=_builder)
|
| 1299 |
+
|
| 1300 |
+
|
| 1301 |
+
@core.extern
|
| 1302 |
+
def log1p(arg0, _builder=None):
|
| 1303 |
+
return core.extern_elementwise(
|
| 1304 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1305 |
+
(core.dtype("fp32"), ): ("__nv_log1pf", core.dtype("fp32")),
|
| 1306 |
+
(core.dtype("fp64"), ): ("__nv_log1p", core.dtype("fp64")),
|
| 1307 |
+
}, is_pure=True, _builder=_builder)
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
@core.extern
|
| 1311 |
+
def acosh(arg0, _builder=None):
|
| 1312 |
+
return core.extern_elementwise(
|
| 1313 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1314 |
+
(core.dtype("fp32"), ): ("__nv_acoshf", core.dtype("fp32")),
|
| 1315 |
+
(core.dtype("fp64"), ): ("__nv_acosh", core.dtype("fp64")),
|
| 1316 |
+
}, is_pure=True, _builder=_builder)
|
| 1317 |
+
|
| 1318 |
+
|
| 1319 |
+
@core.extern
|
| 1320 |
+
def asinh(arg0, _builder=None):
|
| 1321 |
+
return core.extern_elementwise(
|
| 1322 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1323 |
+
(core.dtype("fp32"), ): ("__nv_asinhf", core.dtype("fp32")),
|
| 1324 |
+
(core.dtype("fp64"), ): ("__nv_asinh", core.dtype("fp64")),
|
| 1325 |
+
}, is_pure=True, _builder=_builder)
|
| 1326 |
+
|
| 1327 |
+
|
| 1328 |
+
@core.extern
|
| 1329 |
+
def atanh(arg0, _builder=None):
|
| 1330 |
+
return core.extern_elementwise(
|
| 1331 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1332 |
+
(core.dtype("fp32"), ): ("__nv_atanhf", core.dtype("fp32")),
|
| 1333 |
+
(core.dtype("fp64"), ): ("__nv_atanh", core.dtype("fp64")),
|
| 1334 |
+
}, is_pure=True, _builder=_builder)
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
@core.extern
|
| 1338 |
+
def expm1(arg0, _builder=None):
|
| 1339 |
+
return core.extern_elementwise(
|
| 1340 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1341 |
+
(core.dtype("fp32"), ): ("__nv_expm1f", core.dtype("fp32")),
|
| 1342 |
+
(core.dtype("fp64"), ): ("__nv_expm1", core.dtype("fp64")),
|
| 1343 |
+
}, is_pure=True, _builder=_builder)
|
| 1344 |
+
|
| 1345 |
+
|
| 1346 |
+
@core.extern
|
| 1347 |
+
def hypot(arg0, arg1, _builder=None):
|
| 1348 |
+
return core.extern_elementwise(
|
| 1349 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1350 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_hypotf", core.dtype("fp32")),
|
| 1351 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_hypot", core.dtype("fp64")),
|
| 1352 |
+
}, is_pure=True, _builder=_builder)
|
| 1353 |
+
|
| 1354 |
+
|
| 1355 |
+
@core.extern
|
| 1356 |
+
def rhypot(arg0, arg1, _builder=None):
|
| 1357 |
+
return core.extern_elementwise(
|
| 1358 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1359 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_rhypotf", core.dtype("fp32")),
|
| 1360 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_rhypot", core.dtype("fp64")),
|
| 1361 |
+
}, is_pure=True, _builder=_builder)
|
| 1362 |
+
|
| 1363 |
+
|
| 1364 |
+
@core.extern
|
| 1365 |
+
def norm3d(arg0, arg1, arg2, _builder=None):
|
| 1366 |
+
return core.extern_elementwise(
|
| 1367 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 1368 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_norm3df", core.dtype("fp32")),
|
| 1369 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_norm3d", core.dtype("fp64")),
|
| 1370 |
+
}, is_pure=True, _builder=_builder)
|
| 1371 |
+
|
| 1372 |
+
|
| 1373 |
+
@core.extern
|
| 1374 |
+
def rnorm3d(arg0, arg1, arg2, _builder=None):
|
| 1375 |
+
return core.extern_elementwise(
|
| 1376 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 1377 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_rnorm3df", core.dtype("fp32")),
|
| 1378 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_rnorm3d", core.dtype("fp64")),
|
| 1379 |
+
}, is_pure=True, _builder=_builder)
|
| 1380 |
+
|
| 1381 |
+
|
| 1382 |
+
@core.extern
|
| 1383 |
+
def norm4d(arg0, arg1, arg2, arg3, _builder=None):
|
| 1384 |
+
return core.extern_elementwise(
|
| 1385 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], {
|
| 1386 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")):
|
| 1387 |
+
("__nv_norm4df", core.dtype("fp32")),
|
| 1388 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")):
|
| 1389 |
+
("__nv_norm4d", core.dtype("fp64")),
|
| 1390 |
+
}, is_pure=True, _builder=_builder)
|
| 1391 |
+
|
| 1392 |
+
|
| 1393 |
+
@core.extern
|
| 1394 |
+
def rnorm4d(arg0, arg1, arg2, arg3, _builder=None):
|
| 1395 |
+
return core.extern_elementwise(
|
| 1396 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], {
|
| 1397 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")):
|
| 1398 |
+
("__nv_rnorm4df", core.dtype("fp32")),
|
| 1399 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")):
|
| 1400 |
+
("__nv_rnorm4d", core.dtype("fp64")),
|
| 1401 |
+
}, is_pure=True, _builder=_builder)
|
| 1402 |
+
|
| 1403 |
+
|
| 1404 |
+
@core.extern
|
| 1405 |
+
def cbrt(arg0, _builder=None):
|
| 1406 |
+
return core.extern_elementwise(
|
| 1407 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1408 |
+
(core.dtype("fp32"), ): ("__nv_cbrtf", core.dtype("fp32")),
|
| 1409 |
+
(core.dtype("fp64"), ): ("__nv_cbrt", core.dtype("fp64")),
|
| 1410 |
+
}, is_pure=True, _builder=_builder)
|
| 1411 |
+
|
| 1412 |
+
|
| 1413 |
+
@core.extern
|
| 1414 |
+
def rcbrt(arg0, _builder=None):
|
| 1415 |
+
return core.extern_elementwise(
|
| 1416 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1417 |
+
(core.dtype("fp32"), ): ("__nv_rcbrtf", core.dtype("fp32")),
|
| 1418 |
+
(core.dtype("fp64"), ): ("__nv_rcbrt", core.dtype("fp64")),
|
| 1419 |
+
}, is_pure=True, _builder=_builder)
|
| 1420 |
+
|
| 1421 |
+
|
| 1422 |
+
@core.extern
|
| 1423 |
+
def j0(arg0, _builder=None):
|
| 1424 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1425 |
+
(core.dtype("fp32"), ): ("__nv_j0f", core.dtype("fp32")),
|
| 1426 |
+
(core.dtype("fp64"), ): ("__nv_j0", core.dtype("fp64")),
|
| 1427 |
+
}, is_pure=True, _builder=_builder)
|
| 1428 |
+
|
| 1429 |
+
|
| 1430 |
+
@core.extern
|
| 1431 |
+
def j1(arg0, _builder=None):
|
| 1432 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1433 |
+
(core.dtype("fp32"), ): ("__nv_j1f", core.dtype("fp32")),
|
| 1434 |
+
(core.dtype("fp64"), ): ("__nv_j1", core.dtype("fp64")),
|
| 1435 |
+
}, is_pure=True, _builder=_builder)
|
| 1436 |
+
|
| 1437 |
+
|
| 1438 |
+
@core.extern
|
| 1439 |
+
def y0(arg0, _builder=None):
|
| 1440 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1441 |
+
(core.dtype("fp32"), ): ("__nv_y0f", core.dtype("fp32")),
|
| 1442 |
+
(core.dtype("fp64"), ): ("__nv_y0", core.dtype("fp64")),
|
| 1443 |
+
}, is_pure=True, _builder=_builder)
|
| 1444 |
+
|
| 1445 |
+
|
| 1446 |
+
@core.extern
|
| 1447 |
+
def y1(arg0, _builder=None):
|
| 1448 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1449 |
+
(core.dtype("fp32"), ): ("__nv_y1f", core.dtype("fp32")),
|
| 1450 |
+
(core.dtype("fp64"), ): ("__nv_y1", core.dtype("fp64")),
|
| 1451 |
+
}, is_pure=True, _builder=_builder)
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
@core.extern
|
| 1455 |
+
def yn(arg0, arg1, _builder=None):
|
| 1456 |
+
return core.extern_elementwise(
|
| 1457 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1458 |
+
(core.dtype("int32"), core.dtype("fp32")): ("__nv_ynf", core.dtype("fp32")),
|
| 1459 |
+
(core.dtype("int32"), core.dtype("fp64")): ("__nv_yn", core.dtype("fp64")),
|
| 1460 |
+
}, is_pure=True, _builder=_builder)
|
| 1461 |
+
|
| 1462 |
+
|
| 1463 |
+
@core.extern
|
| 1464 |
+
def jn(arg0, arg1, _builder=None):
|
| 1465 |
+
return core.extern_elementwise(
|
| 1466 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1467 |
+
(core.dtype("int32"), core.dtype("fp32")): ("__nv_jnf", core.dtype("fp32")),
|
| 1468 |
+
(core.dtype("int32"), core.dtype("fp64")): ("__nv_jn", core.dtype("fp64")),
|
| 1469 |
+
}, is_pure=True, _builder=_builder)
|
| 1470 |
+
|
| 1471 |
+
|
| 1472 |
+
@core.extern
|
| 1473 |
+
def cyl_bessel_i0(arg0, _builder=None):
|
| 1474 |
+
return core.extern_elementwise(
|
| 1475 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1476 |
+
(core.dtype("fp32"), ): ("__nv_cyl_bessel_i0f", core.dtype("fp32")),
|
| 1477 |
+
(core.dtype("fp64"), ): ("__nv_cyl_bessel_i0", core.dtype("fp64")),
|
| 1478 |
+
}, is_pure=True, _builder=_builder)
|
| 1479 |
+
|
| 1480 |
+
|
| 1481 |
+
@core.extern
|
| 1482 |
+
def cyl_bessel_i1(arg0, _builder=None):
|
| 1483 |
+
return core.extern_elementwise(
|
| 1484 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1485 |
+
(core.dtype("fp32"), ): ("__nv_cyl_bessel_i1f", core.dtype("fp32")),
|
| 1486 |
+
(core.dtype("fp64"), ): ("__nv_cyl_bessel_i1", core.dtype("fp64")),
|
| 1487 |
+
}, is_pure=True, _builder=_builder)
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
@core.extern
|
| 1491 |
+
def erf(arg0, _builder=None):
|
| 1492 |
+
return core.extern_elementwise(
|
| 1493 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1494 |
+
(core.dtype("fp32"), ): ("__nv_erff", core.dtype("fp32")),
|
| 1495 |
+
(core.dtype("fp64"), ): ("__nv_erf", core.dtype("fp64")),
|
| 1496 |
+
}, is_pure=True, _builder=_builder)
|
| 1497 |
+
|
| 1498 |
+
|
| 1499 |
+
@core.extern
|
| 1500 |
+
def erfinv(arg0, _builder=None):
|
| 1501 |
+
return core.extern_elementwise(
|
| 1502 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1503 |
+
(core.dtype("fp32"), ): ("__nv_erfinvf", core.dtype("fp32")),
|
| 1504 |
+
(core.dtype("fp64"), ): ("__nv_erfinv", core.dtype("fp64")),
|
| 1505 |
+
}, is_pure=True, _builder=_builder)
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
@core.extern
|
| 1509 |
+
def erfc(arg0, _builder=None):
|
| 1510 |
+
return core.extern_elementwise(
|
| 1511 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1512 |
+
(core.dtype("fp32"), ): ("__nv_erfcf", core.dtype("fp32")),
|
| 1513 |
+
(core.dtype("fp64"), ): ("__nv_erfc", core.dtype("fp64")),
|
| 1514 |
+
}, is_pure=True, _builder=_builder)
|
| 1515 |
+
|
| 1516 |
+
|
| 1517 |
+
@core.extern
|
| 1518 |
+
def erfcx(arg0, _builder=None):
|
| 1519 |
+
return core.extern_elementwise(
|
| 1520 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1521 |
+
(core.dtype("fp32"), ): ("__nv_erfcxf", core.dtype("fp32")),
|
| 1522 |
+
(core.dtype("fp64"), ): ("__nv_erfcx", core.dtype("fp64")),
|
| 1523 |
+
}, is_pure=True, _builder=_builder)
|
| 1524 |
+
|
| 1525 |
+
|
| 1526 |
+
@core.extern
|
| 1527 |
+
def erfcinv(arg0, _builder=None):
|
| 1528 |
+
return core.extern_elementwise(
|
| 1529 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1530 |
+
(core.dtype("fp32"), ): ("__nv_erfcinvf", core.dtype("fp32")),
|
| 1531 |
+
(core.dtype("fp64"), ): ("__nv_erfcinv", core.dtype("fp64")),
|
| 1532 |
+
}, is_pure=True, _builder=_builder)
|
| 1533 |
+
|
| 1534 |
+
|
| 1535 |
+
@core.extern
|
| 1536 |
+
def normcdfinv(arg0, _builder=None):
|
| 1537 |
+
return core.extern_elementwise(
|
| 1538 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1539 |
+
(core.dtype("fp32"), ): ("__nv_normcdfinvf", core.dtype("fp32")),
|
| 1540 |
+
(core.dtype("fp64"), ): ("__nv_normcdfinv", core.dtype("fp64")),
|
| 1541 |
+
}, is_pure=True, _builder=_builder)
|
| 1542 |
+
|
| 1543 |
+
|
| 1544 |
+
@core.extern
|
| 1545 |
+
def normcdf(arg0, _builder=None):
|
| 1546 |
+
return core.extern_elementwise(
|
| 1547 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1548 |
+
(core.dtype("fp32"), ): ("__nv_normcdff", core.dtype("fp32")),
|
| 1549 |
+
(core.dtype("fp64"), ): ("__nv_normcdf", core.dtype("fp64")),
|
| 1550 |
+
}, is_pure=True, _builder=_builder)
|
| 1551 |
+
|
| 1552 |
+
|
| 1553 |
+
@core.extern
|
| 1554 |
+
def lgamma(arg0, _builder=None):
|
| 1555 |
+
return core.extern_elementwise(
|
| 1556 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1557 |
+
(core.dtype("fp32"), ): ("__nv_lgammaf", core.dtype("fp32")),
|
| 1558 |
+
(core.dtype("fp64"), ): ("__nv_lgamma", core.dtype("fp64")),
|
| 1559 |
+
}, is_pure=True, _builder=_builder)
|
| 1560 |
+
|
| 1561 |
+
|
| 1562 |
+
@core.extern
|
| 1563 |
+
def ldexp(arg0, arg1, _builder=None):
|
| 1564 |
+
return core.extern_elementwise(
|
| 1565 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1566 |
+
(core.dtype("fp32"), core.dtype("int32")): ("__nv_ldexpf", core.dtype("fp32")),
|
| 1567 |
+
(core.dtype("fp64"), core.dtype("int32")): ("__nv_ldexp", core.dtype("fp64")),
|
| 1568 |
+
}, is_pure=True, _builder=_builder)
|
| 1569 |
+
|
| 1570 |
+
|
| 1571 |
+
@core.extern
|
| 1572 |
+
def scalbn(arg0, arg1, _builder=None):
|
| 1573 |
+
return core.extern_elementwise(
|
| 1574 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1575 |
+
(core.dtype("fp32"), core.dtype("int32")): ("__nv_scalbnf", core.dtype("fp32")),
|
| 1576 |
+
(core.dtype("fp64"), core.dtype("int32")): ("__nv_scalbn", core.dtype("fp64")),
|
| 1577 |
+
}, is_pure=True, _builder=_builder)
|
| 1578 |
+
|
| 1579 |
+
|
| 1580 |
+
@core.extern
|
| 1581 |
+
def fmod(arg0, arg1, _builder=None):
|
| 1582 |
+
return core.extern_elementwise(
|
| 1583 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1584 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmodf", core.dtype("fp32")),
|
| 1585 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_fmod", core.dtype("fp64")),
|
| 1586 |
+
}, is_pure=True, _builder=_builder)
|
| 1587 |
+
|
| 1588 |
+
|
| 1589 |
+
@core.extern
|
| 1590 |
+
def remainder(arg0, arg1, _builder=None):
|
| 1591 |
+
return core.extern_elementwise(
|
| 1592 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1593 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_remainderf", core.dtype("fp32")),
|
| 1594 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_remainder", core.dtype("fp64")),
|
| 1595 |
+
}, is_pure=True, _builder=_builder)
|
| 1596 |
+
|
| 1597 |
+
|
| 1598 |
+
@core.extern
|
| 1599 |
+
def fma(arg0, arg1, arg2, _builder=None):
|
| 1600 |
+
return core.extern_elementwise(
|
| 1601 |
+
"libdevice", libdevice_path(), [arg0, arg1, arg2], {
|
| 1602 |
+
(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf", core.dtype("fp32")),
|
| 1603 |
+
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma", core.dtype("fp64")),
|
| 1604 |
+
}, is_pure=True, _builder=_builder)
|
| 1605 |
+
|
| 1606 |
+
|
| 1607 |
+
@core.extern
|
| 1608 |
+
def pow(arg0, arg1, _builder=None):
|
| 1609 |
+
return core.extern_elementwise(
|
| 1610 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1611 |
+
(core.dtype("fp32"), core.dtype("int32")): ("__nv_powif", core.dtype("fp32")),
|
| 1612 |
+
(core.dtype("fp64"), core.dtype("int32")): ("__nv_powi", core.dtype("fp64")),
|
| 1613 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_powf", core.dtype("fp32")),
|
| 1614 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_pow", core.dtype("fp64")),
|
| 1615 |
+
}, is_pure=True, _builder=_builder)
|
| 1616 |
+
|
| 1617 |
+
|
| 1618 |
+
@core.extern
|
| 1619 |
+
def tgamma(arg0, _builder=None):
|
| 1620 |
+
return core.extern_elementwise(
|
| 1621 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1622 |
+
(core.dtype("fp32"), ): ("__nv_tgammaf", core.dtype("fp32")),
|
| 1623 |
+
(core.dtype("fp64"), ): ("__nv_tgamma", core.dtype("fp64")),
|
| 1624 |
+
}, is_pure=True, _builder=_builder)
|
| 1625 |
+
|
| 1626 |
+
|
| 1627 |
+
@core.extern
|
| 1628 |
+
def round(arg0, _builder=None):
|
| 1629 |
+
return core.extern_elementwise(
|
| 1630 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1631 |
+
(core.dtype("fp32"), ): ("__nv_roundf", core.dtype("fp32")),
|
| 1632 |
+
(core.dtype("fp64"), ): ("__nv_round", core.dtype("fp64")),
|
| 1633 |
+
}, is_pure=True, _builder=_builder)
|
| 1634 |
+
|
| 1635 |
+
|
| 1636 |
+
@core.extern
|
| 1637 |
+
def llround(arg0, _builder=None):
|
| 1638 |
+
return core.extern_elementwise(
|
| 1639 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1640 |
+
(core.dtype("fp32"), ): ("__nv_llroundf", core.dtype("int64")),
|
| 1641 |
+
(core.dtype("fp64"), ): ("__nv_llround", core.dtype("int64")),
|
| 1642 |
+
}, is_pure=True, _builder=_builder)
|
| 1643 |
+
|
| 1644 |
+
|
| 1645 |
+
@core.extern
|
| 1646 |
+
def fdim(arg0, arg1, _builder=None):
|
| 1647 |
+
return core.extern_elementwise(
|
| 1648 |
+
"libdevice", libdevice_path(), [arg0, arg1], {
|
| 1649 |
+
(core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdimf", core.dtype("fp32")),
|
| 1650 |
+
(core.dtype("fp64"), core.dtype("fp64")): ("__nv_fdim", core.dtype("fp64")),
|
| 1651 |
+
}, is_pure=True, _builder=_builder)
|
| 1652 |
+
|
| 1653 |
+
|
| 1654 |
+
@core.extern
|
| 1655 |
+
def ilogb(arg0, _builder=None):
|
| 1656 |
+
return core.extern_elementwise(
|
| 1657 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1658 |
+
(core.dtype("fp32"), ): ("__nv_ilogbf", core.dtype("int32")),
|
| 1659 |
+
(core.dtype("fp64"), ): ("__nv_ilogb", core.dtype("int32")),
|
| 1660 |
+
}, is_pure=True, _builder=_builder)
|
| 1661 |
+
|
| 1662 |
+
|
| 1663 |
+
@core.extern
|
| 1664 |
+
def logb(arg0, _builder=None):
|
| 1665 |
+
return core.extern_elementwise(
|
| 1666 |
+
"libdevice", libdevice_path(), [arg0], {
|
| 1667 |
+
(core.dtype("fp32"), ): ("__nv_logbf", core.dtype("fp32")),
|
| 1668 |
+
(core.dtype("fp64"), ): ("__nv_logb", core.dtype("fp64")),
|
| 1669 |
+
}, is_pure=True, _builder=_builder)
|
| 1670 |
+
|
| 1671 |
+
|
| 1672 |
+
@core.extern
|
| 1673 |
+
def isfinited(arg0, _builder=None):
|
| 1674 |
+
return core.extern_elementwise("libdevice", libdevice_path(), [arg0], {
|
| 1675 |
+
(core.dtype("fp64"), ): ("__nv_isfinited", core.dtype("int32")),
|
| 1676 |
+
}, is_pure=True, _builder=_builder)
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/random.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..runtime.jit import jit
|
| 2 |
+
from . import core as tl
|
| 3 |
+
from . import standard
|
| 4 |
+
|
| 5 |
+
N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox
|
| 6 |
+
|
| 7 |
+
# -------------------
|
| 8 |
+
# randint
|
| 9 |
+
# -------------------
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@jit
|
| 13 |
+
def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 14 |
+
"""
|
| 15 |
+
Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1).
|
| 16 |
+
"""
|
| 17 |
+
if c0.dtype == tl.uint32:
|
| 18 |
+
PHILOX_KEY_A: tl.constexpr = 0x9E3779B9
|
| 19 |
+
PHILOX_KEY_B: tl.constexpr = 0xBB67AE85
|
| 20 |
+
PHILOX_ROUND_A: tl.constexpr = 0xD2511F53
|
| 21 |
+
PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57
|
| 22 |
+
else:
|
| 23 |
+
tl.static_assert(c0.dtype == tl.uint64, "dtype not supported in philox_impl")
|
| 24 |
+
PHILOX_KEY_A: tl.constexpr = 0x9E3779B97F4A7C15
|
| 25 |
+
PHILOX_KEY_B: tl.constexpr = 0xBB67AE8584CAA73B
|
| 26 |
+
PHILOX_ROUND_A: tl.constexpr = 0xD2E7470EE14C6C93
|
| 27 |
+
PHILOX_ROUND_B: tl.constexpr = 0xCA5A826395121157
|
| 28 |
+
|
| 29 |
+
for _ in tl.static_range(n_rounds):
|
| 30 |
+
# for _ in range(n_rounds):
|
| 31 |
+
# update random state
|
| 32 |
+
A = PHILOX_ROUND_A
|
| 33 |
+
B = PHILOX_ROUND_B
|
| 34 |
+
_c0, _c2 = c0, c2
|
| 35 |
+
c0 = tl.umulhi(B, _c2) ^ c1 ^ k0
|
| 36 |
+
c2 = tl.umulhi(A, _c0) ^ c3 ^ k1
|
| 37 |
+
c1 = B * _c2
|
| 38 |
+
c3 = A * _c0
|
| 39 |
+
# raise key
|
| 40 |
+
k0 = k0 + PHILOX_KEY_A
|
| 41 |
+
k1 = k1 + PHILOX_KEY_B
|
| 42 |
+
return c0, c1, c2, c3
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@jit
|
| 46 |
+
def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 47 |
+
seed = seed.to(tl.uint64)
|
| 48 |
+
if tl.constexpr(c0.dtype.primitive_bitwidth) == 32:
|
| 49 |
+
int_dtype = tl.uint32
|
| 50 |
+
seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32)
|
| 51 |
+
seed_lo = (seed & 0xffffffff).to(tl.uint32)
|
| 52 |
+
else:
|
| 53 |
+
tl.static_assert(tl.constexpr(c0.dtype.primitive_bitwidth) == 64, "bitwidth not supported in philox")
|
| 54 |
+
int_dtype = tl.uint64
|
| 55 |
+
seed_hi = 0
|
| 56 |
+
seed_lo = seed
|
| 57 |
+
c0 = c0.to(int_dtype, bitcast=True)
|
| 58 |
+
c1 = c1.to(int_dtype, bitcast=True)
|
| 59 |
+
c2 = c2.to(int_dtype, bitcast=True)
|
| 60 |
+
c3 = c3.to(int_dtype, bitcast=True)
|
| 61 |
+
return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@jit
|
| 65 |
+
def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 66 |
+
"""
|
| 67 |
+
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
|
| 68 |
+
block of random :code:`int32`.
|
| 69 |
+
|
| 70 |
+
If you need multiple streams of random numbers,
|
| 71 |
+
using `randint4x` is likely to be faster than calling `randint` 4 times.
|
| 72 |
+
|
| 73 |
+
:param seed: The seed for generating random numbers.
|
| 74 |
+
:param offset: The offsets to generate random numbers for.
|
| 75 |
+
"""
|
| 76 |
+
ret, _, _, _ = randint4x(seed, offset, n_rounds)
|
| 77 |
+
return ret
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@jit
|
| 81 |
+
def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 82 |
+
"""
|
| 83 |
+
Given a :code:`seed` scalar and an :code:`offset` block, returns four
|
| 84 |
+
blocks of random :code:`int32`.
|
| 85 |
+
|
| 86 |
+
This is the maximally efficient entry point
|
| 87 |
+
to Triton's Philox pseudo-random number generator.
|
| 88 |
+
|
| 89 |
+
:param seed: The seed for generating random numbers.
|
| 90 |
+
:param offsets: The offsets to generate random numbers for.
|
| 91 |
+
"""
|
| 92 |
+
# _0 = tl.zeros(offset.shape, offset.dtype)
|
| 93 |
+
_0 = offset * 0
|
| 94 |
+
return philox(seed, offset, _0, _0, _0, n_rounds)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# -------------------
|
| 98 |
+
# rand
|
| 99 |
+
# -------------------
|
| 100 |
+
|
| 101 |
+
# @jit
|
| 102 |
+
# def uint32_to_uniform_float(x):
|
| 103 |
+
# """
|
| 104 |
+
# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
|
| 105 |
+
# """
|
| 106 |
+
# two_to_the_minus_32: tl.constexpr = 2.328306e-10
|
| 107 |
+
# return x * two_to_the_minus_32
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@jit
|
| 111 |
+
def uint_to_uniform_float(x):
|
| 112 |
+
"""
|
| 113 |
+
Numerically stable function to convert a random uint into a random float uniformly sampled in [0, 1).
|
| 114 |
+
"""
|
| 115 |
+
# TODO: fix frontend issues and cleanup
|
| 116 |
+
# conditions can be simplified
|
| 117 |
+
# scale is ((2**23 - 1) / 2**23) * 2**(N_BITS - 1)
|
| 118 |
+
if tl.constexpr(x.dtype == tl.uint32) or tl.constexpr(x.dtype == tl.int32):
|
| 119 |
+
# maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
| 120 |
+
x = x.to(tl.int32, bitcast=True)
|
| 121 |
+
scale = 4.6566127342e-10
|
| 122 |
+
else:
|
| 123 |
+
tl.static_assert(tl.constexpr(x.dtype == tl.uint64) or tl.constexpr(x.dtype == tl.int64))
|
| 124 |
+
x = x.to(tl.int64, bitcast=True)
|
| 125 |
+
scale = 1.0842020432385337e-19
|
| 126 |
+
x = tl.where(x < 0, -x - 1, x)
|
| 127 |
+
return x * scale
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@jit
|
| 131 |
+
def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 132 |
+
"""
|
| 133 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 134 |
+
returns a block of random :code:`float32` in :math:`U(0, 1)`.
|
| 135 |
+
|
| 136 |
+
:param seed: The seed for generating random numbers.
|
| 137 |
+
:param offsets: The offsets to generate random numbers for.
|
| 138 |
+
"""
|
| 139 |
+
source = randint(seed, offset, n_rounds)
|
| 140 |
+
return uint_to_uniform_float(source)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@jit
|
| 144 |
+
def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 145 |
+
"""
|
| 146 |
+
Given a :code:`seed` scalar and an :code:`offsets` block,
|
| 147 |
+
returns 4 blocks of random :code:`float32` in :math:`U(0, 1)`.
|
| 148 |
+
|
| 149 |
+
:param seed: The seed for generating random numbers.
|
| 150 |
+
:param offsets: The offsets to generate random numbers for.
|
| 151 |
+
"""
|
| 152 |
+
i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds)
|
| 153 |
+
u1 = uint_to_uniform_float(i1)
|
| 154 |
+
u2 = uint_to_uniform_float(i2)
|
| 155 |
+
u3 = uint_to_uniform_float(i3)
|
| 156 |
+
u4 = uint_to_uniform_float(i4)
|
| 157 |
+
return u1, u2, u3, u4
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# -------------------
|
| 161 |
+
# randn
|
| 162 |
+
# -------------------
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@jit
|
| 166 |
+
def pair_uniform_to_normal(u1, u2):
|
| 167 |
+
"""Box-Muller transform"""
|
| 168 |
+
u1 = standard.maximum(1.0e-7, u1)
|
| 169 |
+
th = 6.283185307179586 * u2
|
| 170 |
+
r = tl.sqrt(-2.0 * tl.log(u1))
|
| 171 |
+
return r * tl.cos(th), r * tl.sin(th)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
@jit
|
| 175 |
+
def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 176 |
+
"""
|
| 177 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 178 |
+
returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`.
|
| 179 |
+
|
| 180 |
+
:param seed: The seed for generating random numbers.
|
| 181 |
+
:param offsets: The offsets to generate random numbers for.
|
| 182 |
+
"""
|
| 183 |
+
i1, i2, _, _ = randint4x(seed, offset, n_rounds)
|
| 184 |
+
u1 = uint_to_uniform_float(i1)
|
| 185 |
+
u2 = uint_to_uniform_float(i2)
|
| 186 |
+
n1, _ = pair_uniform_to_normal(u1, u2)
|
| 187 |
+
return n1
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@jit
|
| 191 |
+
def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
|
| 192 |
+
"""
|
| 193 |
+
Given a :code:`seed` scalar and an :code:`offset` block,
|
| 194 |
+
returns 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`.
|
| 195 |
+
|
| 196 |
+
:param seed: The seed for generating random numbers.
|
| 197 |
+
:param offsets: The offsets to generate random numbers for.
|
| 198 |
+
"""
|
| 199 |
+
u1, u2, u3, u4 = rand4x(seed, offset, n_rounds)
|
| 200 |
+
n1, n2 = pair_uniform_to_normal(u1, u2)
|
| 201 |
+
n3, n4 = pair_uniform_to_normal(u3, u4)
|
| 202 |
+
return n1, n2, n3, n4
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/build_extern.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import subprocess
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from typing import Dict, List, Optional
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Symbol:
|
| 8 |
+
_name: str
|
| 9 |
+
_op_name: str
|
| 10 |
+
_ret_type: str
|
| 11 |
+
_arg_names: List[str]
|
| 12 |
+
_arg_types: List[str]
|
| 13 |
+
|
| 14 |
+
def __init__(
|
| 15 |
+
self,
|
| 16 |
+
name: str,
|
| 17 |
+
op_name: str,
|
| 18 |
+
ret_type: str,
|
| 19 |
+
arg_names: List[str],
|
| 20 |
+
arg_types: List[str],
|
| 21 |
+
) -> None:
|
| 22 |
+
'''
|
| 23 |
+
A symbol is a function declaration.
|
| 24 |
+
:param name: name of the symbol
|
| 25 |
+
:param op_name: name of the operation
|
| 26 |
+
:param ret_type: return type of the operation
|
| 27 |
+
:param arg_names: names of the arguments
|
| 28 |
+
:param arg_types: types of the arguments
|
| 29 |
+
'''
|
| 30 |
+
self._name = name
|
| 31 |
+
self._op_name = op_name
|
| 32 |
+
self._ret_type = ret_type
|
| 33 |
+
self._arg_names = list(arg_names)
|
| 34 |
+
self._arg_types = list(arg_types)
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def name(self) -> str:
|
| 38 |
+
return self._name
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def op_name(self) -> str:
|
| 42 |
+
return self._op_name
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def ret_type(self) -> str:
|
| 46 |
+
return self._ret_type
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def arg_names(self) -> List[str]:
|
| 50 |
+
return self._arg_names
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def arg_types(self) -> List[str]:
|
| 54 |
+
return self._arg_types
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def convert_type(type_str) -> Optional[str]:
|
| 58 |
+
if type_str == "i32":
|
| 59 |
+
return "int32"
|
| 60 |
+
elif type_str == "u32":
|
| 61 |
+
return "uint32"
|
| 62 |
+
elif type_str == "i64":
|
| 63 |
+
return "int64"
|
| 64 |
+
elif type_str == "u64":
|
| 65 |
+
return "uint64"
|
| 66 |
+
elif type_str == "float":
|
| 67 |
+
return "fp32"
|
| 68 |
+
elif type_str == "double":
|
| 69 |
+
return "fp64"
|
| 70 |
+
else:
|
| 71 |
+
# ignore other types, such as pointer types
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def to_unsigned(type_str) -> str:
|
| 76 |
+
if type_str == "int32":
|
| 77 |
+
return "uint32"
|
| 78 |
+
elif type_str == "int64":
|
| 79 |
+
return "uint64"
|
| 80 |
+
else:
|
| 81 |
+
return type_str
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class ExternLibrary(ABC):
|
| 85 |
+
_name: str
|
| 86 |
+
_path: str
|
| 87 |
+
_symbols: Dict[str, Symbol]
|
| 88 |
+
_format: bool
|
| 89 |
+
_grouping: bool
|
| 90 |
+
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
name: str,
|
| 94 |
+
path: str,
|
| 95 |
+
format: bool = True,
|
| 96 |
+
grouping: bool = True,
|
| 97 |
+
) -> None:
|
| 98 |
+
'''
|
| 99 |
+
Abstract class for extern library.
|
| 100 |
+
:param name: name of the library
|
| 101 |
+
:param path: path of the library
|
| 102 |
+
:param format: whether to format the generated stub file
|
| 103 |
+
'''
|
| 104 |
+
self._name = name
|
| 105 |
+
self._path = path
|
| 106 |
+
self._symbols = {}
|
| 107 |
+
self._format = format
|
| 108 |
+
self._grouping = grouping
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def name(self) -> str:
|
| 112 |
+
return self._name
|
| 113 |
+
|
| 114 |
+
@property
|
| 115 |
+
def path(self) -> str:
|
| 116 |
+
return self._path
|
| 117 |
+
|
| 118 |
+
@property
|
| 119 |
+
def symbols(self) -> Dict[str, Symbol]:
|
| 120 |
+
return self._symbols
|
| 121 |
+
|
| 122 |
+
@property
|
| 123 |
+
def grouping(self) -> bool:
|
| 124 |
+
return self._grouping
|
| 125 |
+
|
| 126 |
+
@abstractmethod
|
| 127 |
+
def parse_symbols(self, input_file) -> None:
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@abstractmethod
|
| 131 |
+
def _output_stubs(self) -> str:
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
def generate_stub_file(self, output_dir) -> None:
|
| 135 |
+
file_str = self._output_stubs()
|
| 136 |
+
if file_str is None or len(file_str) == 0:
|
| 137 |
+
raise Exception("file_str is empty")
|
| 138 |
+
|
| 139 |
+
output_file = f"{output_dir}/{self._name}.py"
|
| 140 |
+
with open(output_file, "w") as f:
|
| 141 |
+
f.write(file_str)
|
| 142 |
+
f.close()
|
| 143 |
+
if self._format:
|
| 144 |
+
subprocess.Popen(["autopep8", "-a", "-r", "-i", output_file], stdout=subprocess.PIPE).communicate()
|
| 145 |
+
subprocess.Popen(["isort", output_file], stdout=subprocess.PIPE).communicate()
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class Libdevice(ExternLibrary):
|
| 149 |
+
_symbol_groups: Dict[str, List[Symbol]]
|
| 150 |
+
|
| 151 |
+
def __init__(self, path) -> None:
|
| 152 |
+
'''
|
| 153 |
+
Constructor for Libdevice.
|
| 154 |
+
:param path: path of the libdevice library
|
| 155 |
+
'''
|
| 156 |
+
super().__init__("libdevice", path)
|
| 157 |
+
self._symbol_groups = {}
|
| 158 |
+
self.is_pure = True
|
| 159 |
+
|
| 160 |
+
@staticmethod
|
| 161 |
+
def _extract_symbol(line) -> Optional[Symbol]:
|
| 162 |
+
# Extract symbols from line in the following format:
|
| 163 |
+
# "define [internal] <ret_type> @<name>(<arg_types>,)"
|
| 164 |
+
entries = line.split("@")
|
| 165 |
+
ret_str = entries[0]
|
| 166 |
+
func_str = entries[1]
|
| 167 |
+
# Get ret_type, skip internal symbols
|
| 168 |
+
ret_strs = ret_str.split()
|
| 169 |
+
if ret_strs[1] == "internal":
|
| 170 |
+
return None
|
| 171 |
+
ret_type = convert_type(ret_strs[1])
|
| 172 |
+
if ret_type is None:
|
| 173 |
+
return None
|
| 174 |
+
# Get function name
|
| 175 |
+
func_strs = func_str.split("(")
|
| 176 |
+
func_name = func_strs[0].replace("@", "")
|
| 177 |
+
op_name = func_name.replace("__nv_", "")
|
| 178 |
+
if 'ieee' in op_name:
|
| 179 |
+
return None
|
| 180 |
+
# Get arg_types
|
| 181 |
+
arg_strs = func_strs[1].split(",")
|
| 182 |
+
arg_types = []
|
| 183 |
+
arg_names = []
|
| 184 |
+
for i, arg_str in enumerate(arg_strs):
|
| 185 |
+
arg_type = convert_type(arg_str.split()[0])
|
| 186 |
+
if arg_type is None:
|
| 187 |
+
return None
|
| 188 |
+
arg_name = 'arg' + str(i)
|
| 189 |
+
arg_types.append(arg_type)
|
| 190 |
+
arg_names.append(arg_name)
|
| 191 |
+
if op_name == "sad":
|
| 192 |
+
# Special case for sad, where the last argument is an unsigned int
|
| 193 |
+
arg_types[-1] = to_unsigned(arg_types[-1])
|
| 194 |
+
elif op_name.startswith("u"):
|
| 195 |
+
# LLVM does not differentiate between signed and unsigned integer type.
|
| 196 |
+
# We have to convert the types to unsigned
|
| 197 |
+
ret_type = to_unsigned(ret_type)
|
| 198 |
+
for i, arg_type in enumerate(arg_types):
|
| 199 |
+
arg_types[i] = to_unsigned(arg_type)
|
| 200 |
+
return Symbol(func_name, op_name, ret_type, arg_names, arg_types)
|
| 201 |
+
|
| 202 |
+
def _group_symbols(self) -> None:
|
| 203 |
+
symbol_set = {}
|
| 204 |
+
for symbol in self._symbols.values():
|
| 205 |
+
op_name = symbol.op_name
|
| 206 |
+
symbol_set[op_name] = symbol
|
| 207 |
+
|
| 208 |
+
# Group functions together by renaming.
|
| 209 |
+
renaming = {
|
| 210 |
+
'llabs': 'abs', 'acosf': 'acos', 'acoshf': 'acosh', 'dadd_rd': 'add_rd', 'fadd_rd': 'add_rd', 'dadd_rn':
|
| 211 |
+
'add_rn', 'fadd_rn': 'add_rn', 'dadd_ru': 'add_ru', 'fadd_ru': 'add_ru', 'dadd_rz': 'add_rz', 'fadd_rz':
|
| 212 |
+
'add_rz', 'asinf': 'asin', 'asinhf': 'asinh', 'atanf': 'atan', 'atan2f': 'atan2', 'atanhf': 'atanh',
|
| 213 |
+
'brevll': 'brev', 'cbrtf': 'cbrt', 'ceilf': 'ceil', 'clzll': 'clz', 'copysignf': 'copysign', 'cosf': 'cos',
|
| 214 |
+
'coshf': 'cosh', 'cospif': 'cospi', 'cyl_bessel_i0f': 'cyl_bessel_i0', 'cyl_bessel_i1f': 'cyl_bessel_i1',
|
| 215 |
+
'fdiv_rd': 'div_rd', 'ddiv_rd': 'div_rd', 'fdiv_rn': 'div_rn', 'ddiv_rn': 'div_rn', 'fdiv_ru': 'div_ru',
|
| 216 |
+
'ddiv_ru': 'div_ru', 'fdiv_rz': 'div_rz', 'ddiv_rz': 'div_rz', 'erff': 'erf', 'erfcf': 'erfc', 'erfcinvf':
|
| 217 |
+
'erfcinv', 'erfcxf': 'erfcx', 'erfinvf': 'erfinv', 'expf': 'exp', 'exp10f': 'exp10', 'exp2f': 'exp2',
|
| 218 |
+
'expm1f': 'expm1', 'fabsf': 'abs', 'fabs': 'abs', 'fast_fdividef': 'fast_dividef', 'fdimf': 'fdim', 'ffsll':
|
| 219 |
+
'ffs', 'floorf': 'floor', 'fmaf': 'fma', 'fmaf_rd': 'fma_rd', 'fmaf_rn': 'fma_rn', 'fmaf_ru': 'fma_ru',
|
| 220 |
+
'fmaf_rz': 'fma_rz', 'fmodf': 'fmod', 'uhadd': 'hadd', 'hypotf': 'hypot', 'ilogbf': 'ilogb', 'isinff':
|
| 221 |
+
'isinf', 'isinfd': 'isinf', 'isnanf': 'isnan', 'isnand': 'isnan', 'j0f': 'j0', 'j1f': 'j1', 'jnf': 'jn',
|
| 222 |
+
'ldexpf': 'ldexp', 'lgammaf': 'lgamma', 'llrintf': 'llrint', 'llroundf': 'llround', 'logf': 'log', 'log10f':
|
| 223 |
+
'log10', 'log1pf': 'log1p', 'log2f': 'log2', 'logbf': 'logb', 'umax': 'max', 'llmax': 'max', 'ullmax':
|
| 224 |
+
'max', 'fmaxf': 'max', 'fmax': 'max', 'umin': 'min', 'llmin': 'min', 'ullmin': 'min', 'fminf': 'min',
|
| 225 |
+
'fmin': 'min', 'dmul_rd': 'mul_rd', 'fmul_rd': 'mul_rd', 'dmul_rn': 'mul_rn', 'fmul_rn': 'mul_rn',
|
| 226 |
+
'dmul_ru': 'mul_ru', 'fmul_ru': 'mul_ru', 'dmul_rz': 'mul_rz', 'fmul_rz': 'mul_rz', 'umul24': 'mul24',
|
| 227 |
+
'umulhi': 'mulhi', 'mul64hi': 'mulhi', 'umul64hi': 'mulhi', 'nearbyintf': 'nearbyint', 'nextafterf':
|
| 228 |
+
'nextafter', 'norm3df': 'norm3d', 'norm4df': 'norm4d', 'normcdff': 'normcdf', 'normcdfinvf': 'normcdfinv',
|
| 229 |
+
'popcll': 'popc', 'powif': 'pow', 'powi': 'pow', 'powf': 'pow', 'rcbrtf': 'rcbrt', 'frcp_rd': 'rcp_rd',
|
| 230 |
+
'drcp_rd': 'rcp_rd', 'frcp_rn': 'rcp_rn', 'drcp_rn': 'rcp_rn', 'frcp_ru': 'rcp_ru', 'drcp_ru': 'rcp_ru',
|
| 231 |
+
'frcp_rz': 'rcp_rz', 'drcp_rz': 'rcp_rz', 'remainderf': 'remainder', 'urhadd': 'rhadd', 'rhypotf': 'rhypot',
|
| 232 |
+
'rintf': 'rint', 'rnorm3df': 'rnorm3d', 'rnorm4df': 'rnorm4d', 'roundf': 'round', 'rsqrtf': 'rsqrt',
|
| 233 |
+
'frsqrt_rn': 'rsqrt_rn', 'usad': 'sad', 'scalbnf': 'scalbn', 'signbitf': 'signbit', 'signbitd': 'signbit',
|
| 234 |
+
'sinf': 'sin', 'sinhf': 'sinh', 'sinpif': 'sinpi', 'sqrtf': 'sqrt', 'fsqrt_rd': 'sqrt_rd', 'dsqrt_rd':
|
| 235 |
+
'sqrt_rd', 'fsqrt_rn': 'sqrt_rn', 'dsqrt_rn': 'sqrt_rn', 'fsqrt_ru': 'sqrt_ru', 'dsqrt_ru': 'sqrt_ru',
|
| 236 |
+
'fsqrt_rz': 'sqrt_rz', 'dsqrt_rz': 'sqrt_rz', 'fsub_rd': 'sub_rd', 'dsub_rd': 'sub_rd', 'fsub_rn': 'sub_rn',
|
| 237 |
+
'dsub_rn': 'sub_rn', 'fsub_ru': 'sub_ru', 'dsub_ru': 'sub_ru', 'fsub_rz': 'sub_rz', 'dsub_rz': 'sub_rz',
|
| 238 |
+
'tanf': 'tan', 'tanhf': 'tanh', 'tgammaf': 'tgamma', 'truncf': 'trunc', 'y0f': 'y0', 'y1f': 'y1', 'ynf':
|
| 239 |
+
'yn'
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
for symbol in self._symbols.values():
|
| 243 |
+
op_name = symbol.op_name
|
| 244 |
+
if op_name in renaming:
|
| 245 |
+
op_name = renaming[op_name]
|
| 246 |
+
symbol._op_name = op_name
|
| 247 |
+
if op_name in self._symbol_groups:
|
| 248 |
+
self._symbol_groups[op_name].append(symbol)
|
| 249 |
+
else:
|
| 250 |
+
self._symbol_groups[op_name] = [symbol]
|
| 251 |
+
|
| 252 |
+
def parse_symbols(self, input_file) -> None:
|
| 253 |
+
if len(self.symbols) > 0:
|
| 254 |
+
return
|
| 255 |
+
output = subprocess.check_output(["grep", "define", input_file]).decode().splitlines()
|
| 256 |
+
for line in output:
|
| 257 |
+
symbol = self._extract_symbol(line)
|
| 258 |
+
if symbol is None:
|
| 259 |
+
continue
|
| 260 |
+
self._symbols[symbol.name] = symbol
|
| 261 |
+
|
| 262 |
+
self._group_symbols()
|
| 263 |
+
|
| 264 |
+
def _output_stubs(self) -> str:
|
| 265 |
+
# Generate python functions in the following format:
|
| 266 |
+
# @extern.extern
|
| 267 |
+
# def <op_name>(<args>, _builder=None):
|
| 268 |
+
# arg_type_symbol_dict = {[arg_type]: {(symbol, ret_type)}}
|
| 269 |
+
# return core.extern_elementwise("libdevice", <path>, <args>, <arg_type_symbol_dict>, _builder)
|
| 270 |
+
import_str = "from . import core\n"
|
| 271 |
+
import_str += "import os\n"
|
| 272 |
+
import_str += "import functools\n"
|
| 273 |
+
|
| 274 |
+
header_str = ""
|
| 275 |
+
header_str += "@functools.lru_cache()\n"
|
| 276 |
+
header_str += "def libdevice_path():\n"
|
| 277 |
+
header_str += " import torch\n"
|
| 278 |
+
header_str += " third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"third_party\")\n"
|
| 279 |
+
header_str += " if torch.version.hip is None:\n"
|
| 280 |
+
header_str += " default = os.path.join(third_party_dir, \"cuda\", \"lib\", \"libdevice.10.bc\")\n"
|
| 281 |
+
header_str += " else:\n"
|
| 282 |
+
header_str += " default = ''\n"
|
| 283 |
+
header_str += " return os.getenv(\"TRITON_LIBDEVICE_PATH\", default)\n"
|
| 284 |
+
func_str = ""
|
| 285 |
+
for symbols in self._symbol_groups.values():
|
| 286 |
+
func_str += "@core.extern\n"
|
| 287 |
+
func_name_str = f"def {symbols[0].op_name}("
|
| 288 |
+
for arg_name in symbols[0].arg_names:
|
| 289 |
+
func_name_str += f"{arg_name}, "
|
| 290 |
+
func_name_str += "_builder=None):\n"
|
| 291 |
+
|
| 292 |
+
return_str = f"\treturn core.extern_elementwise(\"{self._name}\", libdevice_path(), ["
|
| 293 |
+
for arg_name in symbols[0].arg_names:
|
| 294 |
+
return_str += f"{arg_name}, "
|
| 295 |
+
return_str += "], \n"
|
| 296 |
+
|
| 297 |
+
arg_type_symbol_dict_str = "{"
|
| 298 |
+
for symbol in symbols:
|
| 299 |
+
arg_type_symbol_dict_str += "("
|
| 300 |
+
for arg_type in symbol.arg_types:
|
| 301 |
+
arg_type_symbol_dict_str += f'core.dtype("{arg_type}"),'
|
| 302 |
+
ret_type = f'core.dtype("{symbol.ret_type}")'
|
| 303 |
+
arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n"
|
| 304 |
+
arg_type_symbol_dict_str += "}"
|
| 305 |
+
|
| 306 |
+
return_str += arg_type_symbol_dict_str
|
| 307 |
+
return_str += f", is_pure={self.is_pure}"
|
| 308 |
+
return_str += ", _builder=_builder)\n"
|
| 309 |
+
|
| 310 |
+
func_str += func_name_str + return_str + "\n"
|
| 311 |
+
file_str = import_str + header_str + func_str
|
| 312 |
+
|
| 313 |
+
return file_str
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
class LLVMDisassembler:
|
| 317 |
+
_path: str
|
| 318 |
+
_ll_file: str
|
| 319 |
+
|
| 320 |
+
def __init__(self, path) -> None:
|
| 321 |
+
'''
|
| 322 |
+
Invoke llvm-dis to disassemble the given file.
|
| 323 |
+
:param path: path to llvm-dis
|
| 324 |
+
'''
|
| 325 |
+
self._path = path
|
| 326 |
+
self._ll_file = "/tmp/extern_lib.ll"
|
| 327 |
+
|
| 328 |
+
def disasm(self, lib_path: str) -> None:
|
| 329 |
+
subprocess.Popen([self._path, lib_path, "-o", self.ll_file], stdout=subprocess.PIPE).communicate()
|
| 330 |
+
|
| 331 |
+
@property
|
| 332 |
+
def ll_file(self) -> str:
|
| 333 |
+
return self._ll_file
|
| 334 |
+
|
| 335 |
+
@property
|
| 336 |
+
def path(self) -> str:
|
| 337 |
+
return self._path
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
extern_libs = ["libdevice"]
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def build(
|
| 344 |
+
llvm_dis_path: str,
|
| 345 |
+
lib_path: str,
|
| 346 |
+
lib_name: str,
|
| 347 |
+
output_dir: str,
|
| 348 |
+
) -> None:
|
| 349 |
+
'''
|
| 350 |
+
Interface function to build the library file.
|
| 351 |
+
:param llvm_dis_path: path to the llvm-dis binary
|
| 352 |
+
:param lib_path: path to the external library file
|
| 353 |
+
:param lib_name: name of the library
|
| 354 |
+
:param output_dir: path to the output directory
|
| 355 |
+
'''
|
| 356 |
+
if lib_name == "libdevice":
|
| 357 |
+
extern_lib = Libdevice(lib_path)
|
| 358 |
+
else:
|
| 359 |
+
raise Exception(f"Unknown extern library: {lib_name}")
|
| 360 |
+
|
| 361 |
+
llvm_disassembler = LLVMDisassembler(llvm_dis_path)
|
| 362 |
+
llvm_disassembler.disasm(lib_path)
|
| 363 |
+
|
| 364 |
+
extern_lib.parse_symbols(llvm_disassembler.ll_file)
|
| 365 |
+
extern_lib.generate_stub_file(output_dir)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
if __name__ == "__main__":
|
| 369 |
+
parser = argparse.ArgumentParser()
|
| 370 |
+
parser.add_argument("--llvm-dis", dest="llvm_dis_path", help="Path to llvm-dis", default="llvm-dis")
|
| 371 |
+
parser.add_argument("--lib-path", dest="lib_path", help="Path to the extern library")
|
| 372 |
+
parser.add_argument("--lib-name", dest="lib_name", help="Name of the extern library")
|
| 373 |
+
parser.add_argument("--output", dest="output_dir", help="Output file path", default="/tmp/")
|
| 374 |
+
args = parser.parse_args()
|
| 375 |
+
|
| 376 |
+
build(args.llvm_dis_path, args.lib_path, args.lib_name, args.output_dir)
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.c
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* clang-format off */
|
| 2 |
+
#include <stdio.h>
|
| 3 |
+
#include <stdint.h>
|
| 4 |
+
#include <inttypes.h>
|
| 5 |
+
#include <string.h>
|
| 6 |
+
#include <cuda.h>
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
// helpers to check for cuda errors
|
| 10 |
+
#define CUDA_CHECK(ans) {{\
|
| 11 |
+
gpuAssert((ans), __FILE__, __LINE__);\
|
| 12 |
+
}}\
|
| 13 |
+
|
| 14 |
+
static inline void gpuAssert(CUresult code, const char *file, int line) {{
|
| 15 |
+
if (code != CUDA_SUCCESS) {{
|
| 16 |
+
const char *prefix = "Triton Error [CUDA]: ";
|
| 17 |
+
const char *str;
|
| 18 |
+
cuGetErrorString(code, &str);
|
| 19 |
+
char err[1024] = {{0}};
|
| 20 |
+
strcat(err, prefix);
|
| 21 |
+
strcat(err, str);
|
| 22 |
+
printf("%s\\n", err);
|
| 23 |
+
exit(code);
|
| 24 |
+
}}
|
| 25 |
+
}}
|
| 26 |
+
|
| 27 |
+
// globals
|
| 28 |
+
#define CUBIN_NAME {kernel_name}_cubin
|
| 29 |
+
CUmodule {kernel_name}_mod = NULL;
|
| 30 |
+
CUfunction {kernel_name}_func = NULL;
|
| 31 |
+
unsigned char CUBIN_NAME[{bin_size}] = {{ {bin_data} }};
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
void unload_{kernel_name}(void) {{
|
| 35 |
+
CUDA_CHECK(cuModuleUnload({kernel_name}_mod));
|
| 36 |
+
}}
|
| 37 |
+
|
| 38 |
+
// TODO: some code duplication with `runtime/backend/cuda.c`
|
| 39 |
+
void load_{kernel_name}() {{
|
| 40 |
+
int dev = 0;
|
| 41 |
+
void *bin = (void *)&CUBIN_NAME;
|
| 42 |
+
int shared = {shared};
|
| 43 |
+
CUDA_CHECK(cuModuleLoadData(&{kernel_name}_mod, bin));
|
| 44 |
+
CUDA_CHECK(cuModuleGetFunction(&{kernel_name}_func, {kernel_name}_mod, "{triton_kernel_name}"));
|
| 45 |
+
// set dynamic shared memory if necessary
|
| 46 |
+
int shared_optin;
|
| 47 |
+
CUDA_CHECK(cuDeviceGetAttribute(&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, dev));
|
| 48 |
+
if (shared > 49152 && shared_optin > 49152) {{
|
| 49 |
+
CUDA_CHECK(cuFuncSetCacheConfig({kernel_name}_func, CU_FUNC_CACHE_PREFER_SHARED));
|
| 50 |
+
CUDA_CHECK(cuFuncSetAttribute({kernel_name}_func, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_optin))
|
| 51 |
+
}}
|
| 52 |
+
}}
|
| 53 |
+
|
| 54 |
+
/*
|
| 55 |
+
{kernel_docstring}
|
| 56 |
+
*/
|
| 57 |
+
CUresult {kernel_name}(CUstream stream, {signature}) {{
|
| 58 |
+
if ({kernel_name}_func == NULL)
|
| 59 |
+
load_{kernel_name}();
|
| 60 |
+
unsigned int gX = {gridX};
|
| 61 |
+
unsigned int gY = {gridY};
|
| 62 |
+
unsigned int gZ = {gridZ};
|
| 63 |
+
void *args[{num_args}] = {{ {arg_pointers} }};
|
| 64 |
+
// TODO: shared memory
|
| 65 |
+
if(gX * gY * gZ > 0)
|
| 66 |
+
return cuLaunchKernel({kernel_name}_func, gX, gY, gZ, {num_warps} * 32, 1, 1, {shared}, stream, args, NULL);
|
| 67 |
+
}}
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef TT_KERNEL_INCLUDES
|
| 2 |
+
#define TT_KERNEL_INCLUDES
|
| 3 |
+
|
| 4 |
+
#include <cuda.h>
|
| 5 |
+
#include <inttypes.h>
|
| 6 |
+
#include <stdint.h>
|
| 7 |
+
#include <stdio.h>
|
| 8 |
+
|
| 9 |
+
#endif
|
| 10 |
+
|
| 11 |
+
void unload_{kernel_name}(void);
|
| 12 |
+
void load_{kernel_name}(void);
|
| 13 |
+
// tt-linker: {kernel_name}:{full_signature}:{algo_info}
|
| 14 |
+
CUresult{_placeholder} {kernel_name}(CUstream stream, {signature});
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/compile.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import binascii
|
| 2 |
+
import hashlib
|
| 3 |
+
import importlib.util
|
| 4 |
+
import sys
|
| 5 |
+
from argparse import ArgumentParser
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import List
|
| 8 |
+
|
| 9 |
+
import triton
|
| 10 |
+
from triton.compiler.code_generator import kernel_suffix
|
| 11 |
+
from triton.compiler.make_launcher import ty_to_cpp
|
| 12 |
+
|
| 13 |
+
desc = """
|
| 14 |
+
Triton ahead-of-time compiler:
|
| 15 |
+
|
| 16 |
+
This program compiles the kernel with name `kernel-name` in the file at the
|
| 17 |
+
provided `path` into self-contained C source-code that embeds the `cubin`
|
| 18 |
+
data along with utilities to load, unload and launch the kernel.
|
| 19 |
+
|
| 20 |
+
signature is provided as a list of (optionally divisibility-hinted) types
|
| 21 |
+
or constexpr values, e.g.
|
| 22 |
+
|
| 23 |
+
`compile.py --kernel-name kernel --signature "*fp32:16, i32:16, 1024, i32" --out-name kernel /path/to/kernel.py`
|
| 24 |
+
|
| 25 |
+
will compile triton.JITFunction of name `kernel` inside the file `/path/to/kernel.py`.
|
| 26 |
+
Said kernel will be specialized such that argument 0, 1 are assumed to be multiple of 16,
|
| 27 |
+
and argument 2 is assumed to be a compile-time constant of value 1024, i.e. it won't be part of the generated prototype.
|
| 28 |
+
|
| 29 |
+
The resulting entry point will have signature
|
| 30 |
+
|
| 31 |
+
CUresult kernel_{specialization_suffix}(CUstream stream, unsigned gX, unsigned gY, unsigned gZ, float* arg0, int32_t arg1, int32_t arg2)
|
| 32 |
+
|
| 33 |
+
Different such specialized entry points can be combined using the `linker.py` script.
|
| 34 |
+
|
| 35 |
+
NOTE: when resolving the scope of /path/to/kernel.py, the file will be executed from within its parent directory with the python interpreter
|
| 36 |
+
used to run this `compile.py` script
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
|
| 41 |
+
# command-line arguments
|
| 42 |
+
parser = ArgumentParser(description=desc)
|
| 43 |
+
parser.add_argument("path",
|
| 44 |
+
help="Path to Python source containing desired kernel in its scope. File will be executed.")
|
| 45 |
+
parser.add_argument("--kernel-name", "-n", type=str, default="", help="Name of the kernel to compile",
|
| 46 |
+
required=True)
|
| 47 |
+
parser.add_argument("--num-warps", "-w", type=int, default=1, help="Number of warps to launch the kernel")
|
| 48 |
+
parser.add_argument("--num-stages", "-ns", type=int, default=3,
|
| 49 |
+
help="Number of stages (meta-parameter of the kernel)")
|
| 50 |
+
parser.add_argument("--out-name", "-on", type=str, default=None, help="Out name for the compiled kernel")
|
| 51 |
+
parser.add_argument("--out-path", "-o", type=Path, default=None, help="Out filename")
|
| 52 |
+
parser.add_argument("--signature", "-s", type=str, help="Signature of the kernel", required=True)
|
| 53 |
+
parser.add_argument("--grid", "-g", type=str, help="Launch grid of the kernel", required=True)
|
| 54 |
+
args = parser.parse_args()
|
| 55 |
+
|
| 56 |
+
out_name = args.out_name if args.out_name else args.kernel_name
|
| 57 |
+
out_path = args.out_path if args.out_path else Path(out_name)
|
| 58 |
+
|
| 59 |
+
# execute python sources and extract functions wrapped in JITFunction
|
| 60 |
+
arg_path = Path(args.path)
|
| 61 |
+
sys.path.insert(0, str(arg_path.parent))
|
| 62 |
+
spec = importlib.util.spec_from_file_location(arg_path.stem, arg_path)
|
| 63 |
+
mod = importlib.util.module_from_spec(spec)
|
| 64 |
+
spec.loader.exec_module(mod)
|
| 65 |
+
kernel = getattr(mod, args.kernel_name)
|
| 66 |
+
grid = args.grid.split(",")
|
| 67 |
+
assert len(grid) == 3
|
| 68 |
+
|
| 69 |
+
# validate and parse signature
|
| 70 |
+
signature = list(map(lambda s: s.strip(" "), args.signature.split(",")))
|
| 71 |
+
|
| 72 |
+
def hash_signature(signature: List[str]):
|
| 73 |
+
m = hashlib.sha256()
|
| 74 |
+
m.update(" ".join(signature).encode())
|
| 75 |
+
return m.hexdigest()[:8]
|
| 76 |
+
|
| 77 |
+
meta_sig = f"warps{args.num_warps}xstages{args.num_stages}"
|
| 78 |
+
sig_hash = hash_signature(signature + [meta_sig])
|
| 79 |
+
|
| 80 |
+
def constexpr(s):
|
| 81 |
+
try:
|
| 82 |
+
ret = int(s)
|
| 83 |
+
return ret
|
| 84 |
+
except ValueError:
|
| 85 |
+
pass
|
| 86 |
+
try:
|
| 87 |
+
ret = float(s)
|
| 88 |
+
return ret
|
| 89 |
+
except ValueError:
|
| 90 |
+
pass
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
hints = {i: constexpr(s.split(":")[1]) for i, s in enumerate(signature) if ":" in s}
|
| 94 |
+
hints = {k: v for k, v in hints.items() if v is not None}
|
| 95 |
+
constexprs = {i: constexpr(s) for i, s in enumerate(signature)}
|
| 96 |
+
constexprs = {k: v for k, v in constexprs.items() if v is not None}
|
| 97 |
+
signature = {i: s.split(":")[0] for i, s in enumerate(signature) if i not in constexprs}
|
| 98 |
+
const_sig = 'x'.join([str(v) for v in constexprs.values()])
|
| 99 |
+
doc_string = [f"{kernel.arg_names[i]}={constexprs[i]}" for i in constexprs.keys()]
|
| 100 |
+
doc_string += [f"num_warps={args.num_warps}", f"num_stages={args.num_stages}"]
|
| 101 |
+
|
| 102 |
+
# compile ast into cubin
|
| 103 |
+
for h in hints.values():
|
| 104 |
+
assert h in [1, 16], f"Only 1 and 16 are valid hints, got {h}"
|
| 105 |
+
divisible_by_16 = [i for i, h in hints.items() if h == 16]
|
| 106 |
+
equal_to_1 = [i for i, h in hints.items() if h == 1]
|
| 107 |
+
config = triton.compiler.instance_descriptor(divisible_by_16=divisible_by_16, equal_to_1=equal_to_1)
|
| 108 |
+
for i in equal_to_1:
|
| 109 |
+
constexprs.update({i: 1})
|
| 110 |
+
ccinfo = triton.compile(kernel, signature=signature, constants=constexprs, configs=[config],
|
| 111 |
+
num_warps=args.num_warps, num_stages=args.num_stages)
|
| 112 |
+
arg_names = []
|
| 113 |
+
arg_types = []
|
| 114 |
+
for i in signature.keys():
|
| 115 |
+
if i not in equal_to_1:
|
| 116 |
+
arg_names += [kernel.arg_names[i]]
|
| 117 |
+
arg_types += [signature[i]]
|
| 118 |
+
|
| 119 |
+
# dump C stub code
|
| 120 |
+
suffix = kernel_suffix(signature.values(), config)
|
| 121 |
+
func_name = '_'.join([out_name, sig_hash, suffix])
|
| 122 |
+
triton_kernel_name = '_'.join([args.kernel_name, suffix])
|
| 123 |
+
hex_ = str(binascii.hexlify(ccinfo.asm["cubin"]))[2:-1]
|
| 124 |
+
params = {
|
| 125 |
+
"kernel_name": func_name,
|
| 126 |
+
"triton_kernel_name": triton_kernel_name,
|
| 127 |
+
"bin_size": len(hex_),
|
| 128 |
+
"bin_data": ", ".join([f"0x{x}{y}" for x, y in zip(hex_[::2], hex_[1::2])]),
|
| 129 |
+
"signature": ", ".join([f"{ty_to_cpp(ty)} {name}" for name, ty in zip(arg_names, arg_types)]),
|
| 130 |
+
"full_signature": ", ".join([f"{ty_to_cpp(signature[i])} {kernel.arg_names[i]}" for i in signature.keys()]),
|
| 131 |
+
"arg_pointers": ", ".join([f"&{arg}" for arg in arg_names]),
|
| 132 |
+
"num_args": len(arg_names),
|
| 133 |
+
"kernel_docstring": doc_string,
|
| 134 |
+
"shared": ccinfo.shared,
|
| 135 |
+
"num_warps": args.num_warps,
|
| 136 |
+
"algo_info": '_'.join([const_sig, meta_sig]),
|
| 137 |
+
"gridX": grid[0],
|
| 138 |
+
"gridY": grid[1],
|
| 139 |
+
"gridZ": grid[2],
|
| 140 |
+
"_placeholder": "",
|
| 141 |
+
}
|
| 142 |
+
for ext in ['h', 'c']:
|
| 143 |
+
template_path = Path(__file__).parent / f"compile.{ext}"
|
| 144 |
+
with out_path.with_suffix(f".{sig_hash}_{suffix}.{ext}").open("w") as fp:
|
| 145 |
+
fp.write(Path(template_path).read_text().format(**params))
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/disasm.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MIT License
|
| 2 |
+
|
| 3 |
+
# Copyright (c) 2020 Da Yan @ HKUST
|
| 4 |
+
|
| 5 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
# in the Software without restriction, including without limitation the rights
|
| 8 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
# furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
# copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
# SOFTWARE.
|
| 22 |
+
|
| 23 |
+
import functools
|
| 24 |
+
import os
|
| 25 |
+
import re
|
| 26 |
+
import subprocess
|
| 27 |
+
import tempfile
|
| 28 |
+
|
| 29 |
+
from ..common.backend import path_to_cuobjdump, path_to_nvdisasm
|
| 30 |
+
|
| 31 |
+
FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*')
|
| 32 |
+
SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*')
|
| 33 |
+
FNAME_RE = re.compile(r'\s*Function : (\w+)\s*')
|
| 34 |
+
BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def parseCtrl(sline):
|
| 38 |
+
enc = int(SLINE_RE.match(sline).group(1), 16)
|
| 39 |
+
stall = (enc >> 41) & 0xf
|
| 40 |
+
yld = (enc >> 45) & 0x1
|
| 41 |
+
wrtdb = (enc >> 46) & 0x7
|
| 42 |
+
readb = (enc >> 49) & 0x7
|
| 43 |
+
watdb = (enc >> 52) & 0x3f
|
| 44 |
+
|
| 45 |
+
yld_str = 'Y' if yld == 0 else '-'
|
| 46 |
+
wrtdb_str = '-' if wrtdb == 7 else str(wrtdb)
|
| 47 |
+
readb_str = '-' if readb == 7 else str(readb)
|
| 48 |
+
watdb_str = '--' if watdb == 0 else f'{watdb:02d}'
|
| 49 |
+
return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}'
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def processSassLines(fline, sline, labels):
|
| 53 |
+
asm = FLINE_RE.match(fline).group(1)
|
| 54 |
+
# Remove tailing space
|
| 55 |
+
if asm.endswith(" ;"):
|
| 56 |
+
asm = asm[:-2] + ";"
|
| 57 |
+
ctrl = parseCtrl(sline)
|
| 58 |
+
# BRA target address
|
| 59 |
+
if BRA_RE.match(asm) is not None:
|
| 60 |
+
target = int(BRA_RE.match(asm).group(2), 16)
|
| 61 |
+
if target in labels:
|
| 62 |
+
pass
|
| 63 |
+
else:
|
| 64 |
+
labels[target] = len(labels)
|
| 65 |
+
return (f'{ctrl}', f'{asm}')
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@functools.lru_cache()
|
| 69 |
+
def get_sass(cubin_asm, fun=None):
|
| 70 |
+
fd, path = tempfile.mkstemp()
|
| 71 |
+
try:
|
| 72 |
+
with open(fd, 'wb') as cubin:
|
| 73 |
+
cubin.write(cubin_asm)
|
| 74 |
+
sass = extract(path, fun)
|
| 75 |
+
finally:
|
| 76 |
+
os.remove(path)
|
| 77 |
+
return sass
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def extract(file_path, fun):
|
| 81 |
+
cuobjdump, _ = path_to_cuobjdump()
|
| 82 |
+
nvdisasm, _ = path_to_nvdisasm()
|
| 83 |
+
os.environ["NVDISASM_PATH"] = nvdisasm
|
| 84 |
+
if fun is None:
|
| 85 |
+
sass_str = subprocess.check_output([cuobjdump, "-sass", file_path])
|
| 86 |
+
else:
|
| 87 |
+
sass_str = subprocess.check_output([cuobjdump, "-fun", fun, "-sass", file_path])
|
| 88 |
+
sass_lines = sass_str.splitlines()
|
| 89 |
+
line_idx = 0
|
| 90 |
+
while line_idx < len(sass_lines):
|
| 91 |
+
line = sass_lines[line_idx].decode()
|
| 92 |
+
# format:
|
| 93 |
+
# function : <function_name>
|
| 94 |
+
# .headerflags: ...
|
| 95 |
+
# /*0000*/ asmstr /*0x...*/
|
| 96 |
+
# /*0x...*/
|
| 97 |
+
|
| 98 |
+
# Looking for new function header (function: <name>)
|
| 99 |
+
while FNAME_RE.match(line) is None:
|
| 100 |
+
line_idx += 1
|
| 101 |
+
if line_idx < len(sass_lines):
|
| 102 |
+
line = sass_lines[line_idx].decode()
|
| 103 |
+
else:
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
fname = FNAME_RE.match(line).group(1)
|
| 107 |
+
ret = ''
|
| 108 |
+
ret += f'Function:{fname}\n'
|
| 109 |
+
line_idx += 2 # bypass .headerflags
|
| 110 |
+
line = sass_lines[line_idx].decode()
|
| 111 |
+
# Remapping address to label
|
| 112 |
+
labels = {} # address -> label_idx
|
| 113 |
+
# store sass asm in buffer and them print them (for labels)
|
| 114 |
+
# (ctrl, asm)
|
| 115 |
+
asm_buffer = []
|
| 116 |
+
while FLINE_RE.match(line) is not None:
|
| 117 |
+
# First line (Offset ASM Encoding)
|
| 118 |
+
fline = sass_lines[line_idx].decode()
|
| 119 |
+
line_idx += 1
|
| 120 |
+
# Second line (Encoding)
|
| 121 |
+
sline = sass_lines[line_idx].decode()
|
| 122 |
+
line_idx += 1
|
| 123 |
+
asm_buffer.append(processSassLines(fline, sline, labels))
|
| 124 |
+
# peek the next line
|
| 125 |
+
line = sass_lines[line_idx].decode()
|
| 126 |
+
# Print sass
|
| 127 |
+
# label naming convention: LBB#i
|
| 128 |
+
for idx, (ctrl, asm) in enumerate(asm_buffer):
|
| 129 |
+
# Print label if this is BRA target
|
| 130 |
+
offset = idx * 16
|
| 131 |
+
if offset in labels:
|
| 132 |
+
label_name = f'LBB{labels[offset]}'
|
| 133 |
+
ret += f'{label_name}:\n'
|
| 134 |
+
ret += ctrl + '\t'
|
| 135 |
+
# if this is BRA, remap offset to label
|
| 136 |
+
if BRA_RE.match(asm):
|
| 137 |
+
target = int(BRA_RE.match(asm).group(2), 16)
|
| 138 |
+
target_name = f'LBB{labels[target]}'
|
| 139 |
+
asm = BRA_RE.sub(rf'\1{target_name};', asm)
|
| 140 |
+
ret += asm + '\n'
|
| 141 |
+
ret += '\n'
|
| 142 |
+
return ret
|
evalkit_cambrian/lib/python3.10/site-packages/triton/tools/link.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Sequence, Union
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _exists(x):
|
| 9 |
+
return x is not None
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class LinkerError(Exception):
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class KernelLinkerMeta:
|
| 18 |
+
orig_kernel_name: str
|
| 19 |
+
arg_names: Sequence[str]
|
| 20 |
+
arg_ctypes: Sequence[str]
|
| 21 |
+
sizes: Sequence[Union[int, None]]
|
| 22 |
+
sig_hash: str
|
| 23 |
+
triton_suffix: str
|
| 24 |
+
suffix: str
|
| 25 |
+
num_specs: int
|
| 26 |
+
""" number of specialized arguments """
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class HeaderParser:
|
| 30 |
+
|
| 31 |
+
def __init__(self) -> None:
|
| 32 |
+
import re
|
| 33 |
+
|
| 34 |
+
# [kernel_name, c signature]
|
| 35 |
+
self.linker_directives = re.compile("//[\\s]*tt-linker:[\\s]*([\\w]+):(.+):(.+)")
|
| 36 |
+
# [name, hash, suffix]
|
| 37 |
+
self.kernel_name = re.compile("^([\\w]+)_([\\w]+)_([\\w]+)$")
|
| 38 |
+
# [(type, name)]
|
| 39 |
+
self.c_sig = re.compile("[\\s]*(\\w+)\\s(\\w+)[,]?")
|
| 40 |
+
# [d|c]
|
| 41 |
+
self.arg_suffix = re.compile("[c,d]")
|
| 42 |
+
|
| 43 |
+
self.kernels = defaultdict(list)
|
| 44 |
+
|
| 45 |
+
def extract_linker_meta(self, header: str):
|
| 46 |
+
for ln in header.splitlines():
|
| 47 |
+
if ln.startswith("//"):
|
| 48 |
+
m = self.linker_directives.match(ln)
|
| 49 |
+
if _exists(m):
|
| 50 |
+
ker_name, c_sig, algo_info = m.group(1), m.group(2), m.group(3)
|
| 51 |
+
name, sig_hash, suffix = self._match_name(ker_name)
|
| 52 |
+
c_types, arg_names = self._match_c_sig(c_sig)
|
| 53 |
+
num_specs, sizes = self._match_suffix(suffix, c_sig)
|
| 54 |
+
self._add_kernel(
|
| 55 |
+
"_".join([name, algo_info]),
|
| 56 |
+
KernelLinkerMeta(
|
| 57 |
+
orig_kernel_name=name,
|
| 58 |
+
arg_names=arg_names,
|
| 59 |
+
arg_ctypes=c_types,
|
| 60 |
+
sizes=sizes,
|
| 61 |
+
sig_hash=sig_hash,
|
| 62 |
+
triton_suffix=suffix,
|
| 63 |
+
suffix=suffix,
|
| 64 |
+
num_specs=num_specs,
|
| 65 |
+
),
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def _match_name(self, ker_name: str):
|
| 69 |
+
m = self.kernel_name.match(ker_name)
|
| 70 |
+
if _exists(m):
|
| 71 |
+
name, sig_hash, suffix = m.group(1), m.group(2), m.group(3)
|
| 72 |
+
return name, sig_hash, suffix
|
| 73 |
+
raise LinkerError(f"{ker_name} is not a valid kernel name")
|
| 74 |
+
|
| 75 |
+
def _match_c_sig(self, c_sig: str):
|
| 76 |
+
m = self.c_sig.findall(c_sig)
|
| 77 |
+
if len(m):
|
| 78 |
+
tys, args = [], []
|
| 79 |
+
for ty, arg_name in m:
|
| 80 |
+
tys.append(ty)
|
| 81 |
+
args.append(arg_name)
|
| 82 |
+
return tys, args
|
| 83 |
+
|
| 84 |
+
raise LinkerError(f"{c_sig} is not a valid argument signature")
|
| 85 |
+
|
| 86 |
+
def _match_suffix(self, suffix: str, c_sig: str):
|
| 87 |
+
args = c_sig.split(",")
|
| 88 |
+
s2i = {"c": 1, "d": 16}
|
| 89 |
+
num_specs = 0
|
| 90 |
+
sizes = []
|
| 91 |
+
# scan through suffix, first find the index,
|
| 92 |
+
# then see if it is followed by d or c
|
| 93 |
+
for i in range(len(args)):
|
| 94 |
+
pos = suffix.find(str(i))
|
| 95 |
+
if pos == -1:
|
| 96 |
+
raise LinkerError(f"{suffix} is not a valid kernel suffix")
|
| 97 |
+
pos += len(str(i))
|
| 98 |
+
if self.arg_suffix.match(suffix, pos):
|
| 99 |
+
num_specs += 1
|
| 100 |
+
sizes.extend([None] * (i - len(sizes)))
|
| 101 |
+
sizes.append(s2i[suffix[pos]])
|
| 102 |
+
pos += 1
|
| 103 |
+
if i < len(args) - 1:
|
| 104 |
+
suffix = suffix[pos:]
|
| 105 |
+
else:
|
| 106 |
+
sizes.extend([None] * (len(args) - len(sizes)))
|
| 107 |
+
return num_specs, sizes
|
| 108 |
+
|
| 109 |
+
def _add_kernel(self, name: str, ker: KernelLinkerMeta):
|
| 110 |
+
if name in self.kernels:
|
| 111 |
+
last: KernelLinkerMeta = self.kernels[name][-1]
|
| 112 |
+
|
| 113 |
+
for cur, new_ in zip(last.arg_ctypes, ker.arg_ctypes):
|
| 114 |
+
if cur != new_:
|
| 115 |
+
raise LinkerError(
|
| 116 |
+
f"Mismatched signature for kernel {name}: \n\texisting sig is: {','.join(last.arg_ctypes)}\n\tcurrent is: {','.join(ker.arg_ctypes)}"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
self.kernels[name].append(ker)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def gen_signature_with_full_args(m):
|
| 123 |
+
return ", ".join([f"{ty} {arg}" for ty, arg in zip(m.arg_ctypes, m.arg_names)])
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def gen_signature(m):
|
| 127 |
+
arg_types = [ty for ty, hint in zip(m.arg_ctypes, m.sizes) if hint != 1]
|
| 128 |
+
arg_names = [arg for arg, hint in zip(m.arg_names, m.sizes) if hint != 1]
|
| 129 |
+
sig = ", ".join([f"{ty} {arg}" for ty, arg in zip(arg_types, arg_names)])
|
| 130 |
+
return sig
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# generate declarations of kernels with meta-parameter and constant values
|
| 134 |
+
def make_algo_decls(name: str, metas: Sequence[KernelLinkerMeta]) -> str:
|
| 135 |
+
return f"""
|
| 136 |
+
CUresult {name}(CUstream stream, {gen_signature_with_full_args(metas[-1])});
|
| 137 |
+
void load_{name}();
|
| 138 |
+
void unload_{name}();
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# generate declarations of kernels with meta-parameter and constant values
|
| 143 |
+
def make_global_decl(meta: KernelLinkerMeta) -> str:
|
| 144 |
+
return f"""
|
| 145 |
+
CUresult {meta.orig_kernel_name}_default(CUstream stream, {gen_signature_with_full_args(meta)});
|
| 146 |
+
CUresult {meta.orig_kernel_name}(CUstream stream, {gen_signature_with_full_args(meta)}, int algo_id);
|
| 147 |
+
void load_{meta.orig_kernel_name}();
|
| 148 |
+
void unload_{meta.orig_kernel_name}();
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# generate dispatcher function for kernels with different meta-parameter and constant values
|
| 153 |
+
def make_default_algo_kernel(meta: KernelLinkerMeta) -> str:
|
| 154 |
+
src = f"CUresult {meta.orig_kernel_name}_default(CUstream stream, {gen_signature_with_full_args(meta)}){{\n"
|
| 155 |
+
src += (f" return {meta.orig_kernel_name}(stream, {', '.join(meta.arg_names)}, 0);\n")
|
| 156 |
+
src += "}\n"
|
| 157 |
+
return src
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# generate dispatcher function for kernels with different integer value hints
|
| 161 |
+
def make_kernel_hints_dispatcher(name: str, metas: Sequence[KernelLinkerMeta]) -> str:
|
| 162 |
+
src = f"// launcher for: {name}\n"
|
| 163 |
+
for meta in sorted(metas, key=lambda m: -m.num_specs):
|
| 164 |
+
src += f"CUresult {meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}(CUstream stream, {gen_signature(meta)});\n"
|
| 165 |
+
src += "\n"
|
| 166 |
+
|
| 167 |
+
src += (f"CUresult {name}(CUstream stream, {gen_signature_with_full_args(metas[-1])}){{")
|
| 168 |
+
src += "\n"
|
| 169 |
+
for meta in sorted(metas, key=lambda m: -m.num_specs):
|
| 170 |
+
cond_fn = ( #
|
| 171 |
+
lambda val, hint: f"({val} % {hint} == 0)" #
|
| 172 |
+
if hint == 16 #
|
| 173 |
+
else f"({val} == {hint})" #
|
| 174 |
+
if hint == 1 #
|
| 175 |
+
else None)
|
| 176 |
+
conds = " && ".join([ #
|
| 177 |
+
cond_fn(val, hint) #
|
| 178 |
+
for val, hint in zip(meta.arg_names, meta.sizes) #
|
| 179 |
+
if hint is not None
|
| 180 |
+
])
|
| 181 |
+
src += (f" if ({conds})\n" if any(meta.sizes) else "if (1)\n"
|
| 182 |
+
) # Edge case where no specializations hence no dispatching required
|
| 183 |
+
arg_names = [arg for arg, hint in zip(meta.arg_names, meta.sizes) if hint != 1]
|
| 184 |
+
src += f" return {meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}(stream, {', '.join(arg_names)});\n"
|
| 185 |
+
src += "\n"
|
| 186 |
+
src += " return CUDA_ERROR_INVALID_VALUE;\n"
|
| 187 |
+
src += "}\n"
|
| 188 |
+
|
| 189 |
+
for mode in ["load", "unload"]:
|
| 190 |
+
src += f"\n// {mode} for: {name}\n"
|
| 191 |
+
for meta in sorted(metas, key=lambda m: -m.num_specs):
|
| 192 |
+
src += f"void {mode}_{meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}();\n"
|
| 193 |
+
src += f"void {mode}_{name}() {{"
|
| 194 |
+
src += "\n"
|
| 195 |
+
for meta in sorted(metas, key=lambda m: -m.num_specs):
|
| 196 |
+
src += (f" {mode}_{meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}();\n")
|
| 197 |
+
src += "}\n"
|
| 198 |
+
return src
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# generate dispatcher function for kernels with different meta-parameter and constant values
|
| 202 |
+
def make_kernel_meta_const_dispatcher(meta: KernelLinkerMeta) -> str:
|
| 203 |
+
src = f"CUresult {meta.orig_kernel_name}(CUstream stream, {gen_signature_with_full_args(meta)}, int algo_id){{\n"
|
| 204 |
+
src += f" assert (algo_id < (int)sizeof({meta.orig_kernel_name}_kernels));\n"
|
| 205 |
+
src += f" return {meta.orig_kernel_name}_kernels[algo_id](stream, {', '.join(meta.arg_names)});\n"
|
| 206 |
+
src += "}\n"
|
| 207 |
+
return src
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# generate definition of function pointers of kernel dispatchers based on meta-parameter and constant values
|
| 211 |
+
def make_func_pointers(names: str, meta: KernelLinkerMeta) -> str:
|
| 212 |
+
# the table of hint dispatchers
|
| 213 |
+
src = f"typedef CUresult (*kernel_func_t)(CUstream stream, {gen_signature_with_full_args(meta)});\n"
|
| 214 |
+
src += f"kernel_func_t {meta.orig_kernel_name}_kernels[] = {{\n"
|
| 215 |
+
for name in names:
|
| 216 |
+
src += f" {name},\n"
|
| 217 |
+
src += "};\n"
|
| 218 |
+
return src
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# generate definition for load/unload functions for kernels with different meta-parameter and constant values
|
| 222 |
+
def make_kernel_load_def(names: str, meta: KernelLinkerMeta) -> str:
|
| 223 |
+
src = ""
|
| 224 |
+
for mode in ["load", "unload"]:
|
| 225 |
+
src += f"void {mode}_{meta.orig_kernel_name}(void){{\n"
|
| 226 |
+
for name in names:
|
| 227 |
+
src += f" {mode}_{name}();\n"
|
| 228 |
+
src += "}\n\n"
|
| 229 |
+
return src
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def make_get_num_algos_decl(meta: KernelLinkerMeta) -> str:
|
| 233 |
+
src = f"int {meta.orig_kernel_name}_get_num_algos(void);"
|
| 234 |
+
return src
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def make_get_num_algos_def(meta: KernelLinkerMeta) -> str:
|
| 238 |
+
src = f"int {meta.orig_kernel_name}_get_num_algos(void){{\n"
|
| 239 |
+
src += f" return (int)sizeof({meta.orig_kernel_name}_kernels);\n"
|
| 240 |
+
src += "}\n"
|
| 241 |
+
return src
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
desc = """
|
| 245 |
+
Triton ahead-of-time linker:
|
| 246 |
+
|
| 247 |
+
This program takes in header files generated by compile.py, and generates a
|
| 248 |
+
single entry-point responsible for dispatching the user's input to the right
|
| 249 |
+
kernel given the specializations that were compiled.
|
| 250 |
+
|
| 251 |
+
Example usage:
|
| 252 |
+
python link.py /path/to/headers/*.h -o kernel_name
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
if __name__ == "__main__":
|
| 256 |
+
from argparse import ArgumentParser
|
| 257 |
+
|
| 258 |
+
parser = ArgumentParser(description=desc)
|
| 259 |
+
parser.add_argument(
|
| 260 |
+
"headers",
|
| 261 |
+
nargs="+",
|
| 262 |
+
help="Paths to header files to link. Must include linker directive annotations (autogenerated by ttc)",
|
| 263 |
+
)
|
| 264 |
+
parser.add_argument("--out", "-o", type=Path, help="Out filename")
|
| 265 |
+
parser.add_argument(
|
| 266 |
+
"--prefix",
|
| 267 |
+
type=str,
|
| 268 |
+
default="",
|
| 269 |
+
help="String to prefix kernel dispatcher names",
|
| 270 |
+
)
|
| 271 |
+
args = parser.parse_args()
|
| 272 |
+
|
| 273 |
+
# metadata
|
| 274 |
+
parser = HeaderParser()
|
| 275 |
+
includes = []
|
| 276 |
+
for header in args.headers:
|
| 277 |
+
h_path = Path(header)
|
| 278 |
+
h_str = h_path.read_text()
|
| 279 |
+
includes.append(h_path.name)
|
| 280 |
+
parser.extract_linker_meta(h_str)
|
| 281 |
+
|
| 282 |
+
# generate headers
|
| 283 |
+
algo_decls = [make_algo_decls(name, meta) for name, meta in parser.kernels.items()]
|
| 284 |
+
meta_lists = [meta for name, meta in parser.kernels.items()]
|
| 285 |
+
meta = meta_lists[0][0]
|
| 286 |
+
get_num_algos_decl = make_get_num_algos_decl(meta)
|
| 287 |
+
global_decl = make_global_decl(meta)
|
| 288 |
+
with args.out.with_suffix(".h").open("w") as fp:
|
| 289 |
+
out = "#include <cuda.h>\n"
|
| 290 |
+
out += "\n".join(algo_decls)
|
| 291 |
+
out += "\n"
|
| 292 |
+
out += get_num_algos_decl
|
| 293 |
+
out += "\n"
|
| 294 |
+
out += global_decl
|
| 295 |
+
fp.write(out)
|
| 296 |
+
|
| 297 |
+
# generate source
|
| 298 |
+
defs = [make_kernel_hints_dispatcher(name, meta) for name, meta in parser.kernels.items()]
|
| 299 |
+
names = [name for name in parser.kernels.keys()]
|
| 300 |
+
func_pointers_def = make_func_pointers(names, meta)
|
| 301 |
+
meta_const_def = make_kernel_meta_const_dispatcher(meta)
|
| 302 |
+
load_unload_def = make_kernel_load_def(names, meta)
|
| 303 |
+
get_num_algos_def = make_get_num_algos_def(meta)
|
| 304 |
+
default_algo_kernel = make_default_algo_kernel(meta)
|
| 305 |
+
with args.out.with_suffix(".c").open("w") as fp:
|
| 306 |
+
out = ""
|
| 307 |
+
out += "#include <cuda.h>\n"
|
| 308 |
+
out += "#include <stdint.h>\n"
|
| 309 |
+
out += "#include <assert.h>\n"
|
| 310 |
+
out += "\n"
|
| 311 |
+
out += "\n".join(defs)
|
| 312 |
+
out += "\n"
|
| 313 |
+
out += func_pointers_def
|
| 314 |
+
out += "\n"
|
| 315 |
+
out += get_num_algos_def
|
| 316 |
+
out += "\n"
|
| 317 |
+
out += meta_const_def
|
| 318 |
+
out += "\n"
|
| 319 |
+
out += load_unload_def
|
| 320 |
+
out += "\n"
|
| 321 |
+
out += default_algo_kernel
|
| 322 |
+
fp.write(out)
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _batch_norm_with_update {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, at::Tensor &, at::Tensor &, double, double);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_batch_norm_with_update")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _batch_norm_with_update_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, at::Tensor &, at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_batch_norm_with_update")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _batch_norm_with_update_functional {
|
| 40 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const at::Tensor &, const at::Tensor &, double, double);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_batch_norm_with_update_functional")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)")
|
| 46 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
|
| 47 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cast_Long_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
|
| 26 |
+
inline at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking=false) {
|
| 27 |
+
return at::_ops::_cast_Long::call(self, non_blocking);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _coalesced(const at::Tensor & self, bool coalesced);
|
| 21 |
+
TORCH_API at::Tensor & _coalesced_out(at::Tensor & out, const at::Tensor & self, bool coalesced);
|
| 22 |
+
TORCH_API at::Tensor & _coalesced_outf(const at::Tensor & self, bool coalesced, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeexplicitautograd
|
| 25 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _cudnn_init_dropout_state_out(at::Tensor & out, double dropout, bool train, int64_t dropout_seed);
|
| 21 |
+
TORCH_API at::Tensor & _cudnn_init_dropout_state_outf(double dropout, bool train, int64_t dropout_seed, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int
|
| 26 |
+
inline int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index) {
|
| 27 |
+
return at::_ops::_cufft_get_plan_cache_size::call(device_index);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
|
| 26 |
+
inline void _cufft_set_plan_cache_max_size(at::DeviceIndex device_index, int64_t max_size) {
|
| 27 |
+
return at::_ops::_cufft_set_plan_cache_max_size::call(device_index, max_size);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_acos(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_acos_(at::TensorList self);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API void _foreach_copy_(at::TensorList self, at::TensorList src, bool non_blocking=false);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_copy_ {
|
| 18 |
+
using schema = void (at::TensorList, at::TensorList, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_copy_")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()")
|
| 24 |
+
static void call(at::TensorList self, at::TensorList src, bool non_blocking);
|
| 25 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_copy {
|
| 29 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList, at::TensorList, bool);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_copy")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out")
|
| 35 |
+
static ::std::vector<at::Tensor> call(at::TensorList self, at::TensorList src, bool non_blocking);
|
| 36 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_copy_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList, bool, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_copy")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()")
|
| 46 |
+
static void call(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log1p_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_log1p {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p(Tensor[] self) -> Tensor[]")
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_log1p_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p_(Tensor(a!)[] self) -> ()")
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_log1p_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log1p")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_pow_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, at::TensorList exponent) {
|
| 27 |
+
return at::_ops::_foreach_pow_List::call(self, exponent);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
|
| 31 |
+
inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, const at::Scalar & exponent) {
|
| 32 |
+
return at::_ops::_foreach_pow_Scalar::call(self, exponent);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
|
| 36 |
+
inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
|
| 37 |
+
return at::_ops::_foreach_pow_ScalarList::call(self, exponent);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
|
| 41 |
+
inline ::std::vector<at::Tensor> _foreach_pow(const at::Scalar & self, at::TensorList exponent) {
|
| 42 |
+
return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
|
| 46 |
+
inline void _foreach_pow_(at::TensorList self, at::TensorList exponent) {
|
| 47 |
+
return at::_ops::_foreach_pow__List::call(self, exponent);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
|
| 51 |
+
inline void _foreach_pow_(at::TensorList self, const at::Scalar & exponent) {
|
| 52 |
+
return at::_ops::_foreach_pow__Scalar::call(self, exponent);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
|
| 56 |
+
inline void _foreach_pow_(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
|
| 57 |
+
return at::_ops::_foreach_pow__ScalarList::call(self, exponent);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
|
| 61 |
+
inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::TensorList exponent) {
|
| 62 |
+
return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
|
| 63 |
+
}
|
| 64 |
+
// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
|
| 65 |
+
inline void _foreach_pow_outf(at::TensorList self, at::TensorList exponent, at::TensorList out) {
|
| 66 |
+
return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
|
| 70 |
+
inline void _foreach_pow_out(at::TensorList out, at::TensorList self, const at::Scalar & exponent) {
|
| 71 |
+
return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
|
| 72 |
+
}
|
| 73 |
+
// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
|
| 74 |
+
inline void _foreach_pow_outf(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
|
| 75 |
+
return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
|
| 79 |
+
inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
|
| 80 |
+
return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
|
| 81 |
+
}
|
| 82 |
+
// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
|
| 83 |
+
inline void _foreach_pow_outf(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
|
| 84 |
+
return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _functional_sym_constrain_range_for_size(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_spdiags_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
|
| 26 |
+
inline at::Tensor _spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout=::std::nullopt) {
|
| 27 |
+
return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _spdiags_out(at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout=::std::nullopt) {
|
| 32 |
+
return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _spdiags_outf(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _standard_gamma_grad {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma_grad")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma_grad(Tensor self, Tensor output) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & output);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _standard_gamma_grad_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma_grad")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & output, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _upsample_nearest_exact2d_backward_grad_input {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional<double>, ::std::optional<double>, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact2d_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _upsample_nearest_exact2d_backward {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional<double>, ::std::optional<double>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact2d_backward")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace cpu
|
| 28 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_addcdiv : public TensorIteratorBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_scatter.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/as_strided_scatter_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
|
| 26 |
+
inline at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset=::std::nullopt) {
|
| 27 |
+
return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset=::std::nullopt) {
|
| 32 |
+
return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
|
| 37 |
+
inline at::Tensor as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset=::std::nullopt) {
|
| 38 |
+
return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset=::std::nullopt) {
|
| 43 |
+
return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset=::std::nullopt) {
|
| 49 |
+
return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt, out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 53 |
+
at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset=::std::nullopt) {
|
| 54 |
+
return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt, out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset, at::Tensor & out) {
|
| 60 |
+
return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt, out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 64 |
+
at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset, at::Tensor & out) {
|
| 65 |
+
return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt, out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 70 |
+
inline at::Tensor & as_strided_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset=::std::nullopt) {
|
| 71 |
+
return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 75 |
+
at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset=::std::nullopt) {
|
| 76 |
+
return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 81 |
+
inline at::Tensor & as_strided_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
|
| 82 |
+
return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
|
| 87 |
+
return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/avg_pool3d_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
|
| 27 |
+
return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
|
| 28 |
+
}
|
| 29 |
+
// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
|
| 31 |
+
return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
|
| 35 |
+
inline at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
|
| 36 |
+
return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API ceil {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API ceil_ {
|
| 29 |
+
using schema = at::Tensor & (at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil_(Tensor(a!) self) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(at::Tensor & self);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API ceil_out {
|
| 40 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_ops.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API clamp_min {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min(Tensor self, Scalar min) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & min);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API clamp_min_Tensor {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.Tensor(Tensor self, Tensor min) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & min);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API clamp_min_ {
|
| 40 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min_")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & min);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API clamp_min__Tensor {
|
| 51 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min_")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)")
|
| 57 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & min);
|
| 58 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API clamp_min_out {
|
| 62 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)")
|
| 68 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & min, at::Tensor & out);
|
| 69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
struct TORCH_API clamp_min_Tensor_out {
|
| 73 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 74 |
+
using ptr_schema = schema*;
|
| 75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_min")
|
| 77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
|
| 78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)")
|
| 79 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & min, at::Tensor & out);
|
| 80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out);
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor copysign(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & copysign_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API cudnn_convolution_transpose {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_transpose")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API cudnn_convolution_transpose_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool, bool, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_transpose")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautograd
|
| 23 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor erfc(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & erfc_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/erfc_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_erfc_out : public at::meta::structured_erfc {
|
| 20 |
+
void impl(const at::Tensor & self, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
} // namespace native
|
| 23 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor eye(int64_t n, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
|
| 20 |
+
TORCH_API at::Tensor & eye_out_cpu(int64_t n, at::Tensor & out);
|
| 21 |
+
TORCH_API at::Tensor & eye_out_cuda(int64_t n, at::Tensor & out);
|
| 22 |
+
TORCH_API at::Tensor eye(int64_t n, int64_t m, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
|
| 23 |
+
TORCH_API at::Tensor & eye_out_cpu(int64_t n, int64_t m, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & eye_out_cuda(int64_t n, int64_t m, at::Tensor & out);
|
| 25 |
+
} // namespace native
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/fill_diagonal_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor hardsigmoid(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace meta
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor i0(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & i0_(at::Tensor & self);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 24 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1);
|
| 21 |
+
TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1);
|
| 22 |
+
TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1);
|
| 24 |
+
|
| 25 |
+
} // namespace meta
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API infinitely_differentiable_gelu_backward {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::infinitely_differentiable_gelu_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & grad, const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|