diff --git a/.gitattributes b/.gitattributes index dae59d94ca072ed29bd18c3f1d7b7dcf9ddaba34..ef42676f7deaba01a631d85770717c994ef4075d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -937,3 +937,4 @@ infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_spropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs diff=lfs merge=lfs -text diff --git a/deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 b/deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 new file mode 100644 index 0000000000000000000000000000000000000000..e21d98d8e5968828e0cd94047fd0c5d2b5256d36 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca5c6f81d584906b4d32b984ad8704dd65bf75bdab4334ed22ce7eef7501a95a +size 279161544 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..409f3dfc766702d7027adefb65ab35bbf0fa1536 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..511d85ead2b8028f2753dd029823d6bd1fc480f5 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..632d2ff56923abe707f9b978016f98927d05f693 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b4a3abf62a94b03286f918394b1a8b0f5aec694 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b47606124eb37400d960a060da267da54158c049 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c617e0f9d18ecff68eb23286ed56b3647ff49bf Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d8097262b4baea63a70351289da3b1ef47a1796 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..91289d88db8adcf2b9b538945c9b04e564ee5662 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py @@ -0,0 +1,106 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class BinaryOpFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x` and `y`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + # Moreover, `y` will occasionally have singleton + # dimensions in order to test broadcasting. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + + [ + FuzzedParameter( + name=f"y_k{i}", + distribution={ + ParameterAlias(f"k{i}"): 0.8, + 1: 0.2, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x` and `y`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"{name}_step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) + for i in range(3) + for name in ("x", "y") + ], + + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + FuzzedTensor( + name="y", + size=("y_k0", "y_k1", "y_k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..984493fe4a718bc995c9cf32ab6165581022fb43 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py @@ -0,0 +1,106 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class BinaryOpSparseFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + FuzzedParameter( + name="sparse_dim", + distribution={1: 0.4, 2: 0.4, 3: 0.2}, + strict=True + ), + # Shapes for `x` and `y`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + # Moreover, `y` will occasionally have singleton + # dimensions in order to test broadcasting. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"y_k{i}", + distribution={ + ParameterAlias(f"k{i}"): 1.0}, + strict=True, + ) for i in range(3) + ], + FuzzedParameter( + name="density", + distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, + ), + FuzzedParameter( + name="coalesced", + distribution={True: 0.5, False: 0.5}, + ), + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedSparseTensor( + name="x", + size=("k0", "k1", "k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + density="density", + coalesced="coalesced", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + dtype=dtype, + cuda=cuda, + ), + FuzzedSparseTensor( + name="y", + size=("y_k0", "y_k1", "y_k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + density="density", + coalesced="coalesced", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..70b5ae3cd3a5dd6a73d0704a199728e6be512cac --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py @@ -0,0 +1,82 @@ + +import numpy as np +import torch +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + +class UnaryOpSparseFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + FuzzedParameter( + name="sparse_dim", + distribution={1: 0.4, 2: 0.4, 3: 0.2}, + strict=True + ), + # Shapes for `x`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + FuzzedParameter( + name="density", + distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, + ), + FuzzedParameter( + name="coalesced", + distribution={True: 0.5, False: 0.5}, + ), + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedSparseTensor( + name="x", + size=("k0", "k1", "k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + density="density", + coalesced="coalesced", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..29359ba3edb6f0fb8554476529d11d9fcdedecdd --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py @@ -0,0 +1,93 @@ +import math + +import torch +from torch.utils import benchmark +from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias + + +__all__ = ['SpectralOpFuzzer'] + +MIN_DIM_SIZE = 16 +MAX_DIM_SIZE = 16 * 1024 + +def power_range(upper_bound, base): + return (base ** i for i in range(int(math.log(upper_bound, base)) + 1)) + +# List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE +# These numbers factorize into multiples of prime factors 2, 3, and 5 only +# and are usually the fastest in FFT implementations. +REGULAR_SIZES = [] +for i in power_range(MAX_DIM_SIZE, 2): + for j in power_range(MAX_DIM_SIZE // i, 3): + ij = i * j + for k in power_range(MAX_DIM_SIZE // ij, 5): + ijk = ij * k + if ijk > MIN_DIM_SIZE: + REGULAR_SIZES.append(ijk) +REGULAR_SIZES.sort() + +class SpectralOpFuzzer(benchmark.Fuzzer): + def __init__(self, *, seed: int, dtype=torch.float64, + cuda: bool = False, probability_regular: float = 1.0): + super().__init__( + parameters=[ + # Dimensionality of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x`. + # It is important to test all shapes, however + # regular sizes are especially important to the FFT and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the regular numbers + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=MIN_DIM_SIZE, + maxval=MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_regular_{i}", + distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_regular_{i}"): probability_regular, + ParameterAlias(f"k_any_{i}"): 1 - probability_regular, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) for i in range(3) + ], + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("step_0", "step_1", "step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="ndim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f810d0b9fadb3968b16eff7e6a5617d00168ce --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py @@ -0,0 +1,81 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class UnaryOpFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"x_step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) for i in range(3) + ], + + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..089ba0adc3bfd013e9e0744d4cb1b42a58548294 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..356ef973c84ccb613287159e80547e8ea84191a3 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..784956bd265f0e73da7cbd8127e2b06eb082d875 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52ed3c7fdd088ad50bbf1bc22ef9379e6ac73a08 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ede540657edfcd0920da5826b6c7011c26b0fc Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb249ad207454d403e3dc5f9f93c4527b3331f2e Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7406bbe0a77e439eafc988e4ee19c837325b9af Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..13fdd22e2727d42892e7bc0d0efb2fc3eaf8d376 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py @@ -0,0 +1,40 @@ +from typing import Any, Callable, Dict, Protocol, runtime_checkable + + +class TimerClass(Protocol): + """This is the portion of the `timeit.Timer` API used by benchmark utils.""" + def __init__( + self, + stmt: str, + setup: str, + timer: Callable[[], float], + globals: Dict[str, Any], + **kwargs: Any, + ) -> None: + ... + + def timeit(self, number: int) -> float: + ... + + +@runtime_checkable +class TimeitModuleType(Protocol): + """Modules generated from `timeit_template.cpp`.""" + def timeit(self, number: int) -> float: + ... + + +class CallgrindModuleType(Protocol): + """Replicates the valgrind endpoints in `torch._C`. + + These bindings are used to collect Callgrind profiles on earlier versions + of PyTorch and will eventually be removed. + """ + __file__: str + __name__: str + + def _valgrind_supported_platform(self) -> bool: + ... + + def _valgrind_toggle(self) -> None: + ... diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b8134c599d66cb7c88d356d36300b455b9785eb3 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py @@ -0,0 +1,355 @@ +"""Base shared classes and utilities.""" + +import collections +import contextlib +import dataclasses +import os +import shutil +import tempfile +import textwrap +import time +from typing import cast, Any, DefaultDict, Dict, Iterable, Iterator, List, Optional, Tuple +import uuid + +import torch + + +__all__ = ["TaskSpec", "Measurement", "select_unit", "unit_to_english", "trim_sigfig", "ordered_unique", "set_torch_threads"] + + +_MAX_SIGNIFICANT_FIGURES = 4 +_MIN_CONFIDENCE_INTERVAL = 25e-9 # 25 ns + +# Measurement will include a warning if the distribution is suspect. All +# runs are expected to have some variation; these parameters set the +# thresholds. +_IQR_WARN_THRESHOLD = 0.1 +_IQR_GROSS_WARN_THRESHOLD = 0.25 + + +@dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True) +class TaskSpec: + """Container for information used to define a Timer. (except globals)""" + stmt: str + setup: str + global_setup: str = "" + label: Optional[str] = None + sub_label: Optional[str] = None + description: Optional[str] = None + env: Optional[str] = None + num_threads: int = 1 + + @property + def title(self) -> str: + """Best effort attempt at a string label for the measurement.""" + if self.label is not None: + return self.label + (f": {self.sub_label}" if self.sub_label else "") + elif "\n" not in self.stmt: + return self.stmt + (f": {self.sub_label}" if self.sub_label else "") + return ( + f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n" + f"{textwrap.indent(self.stmt, ' ')}" + ) + + def setup_str(self) -> str: + return ( + "" if (self.setup == "pass" or not self.setup) + else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup + else f"setup: {self.setup}" + ) + + def summarize(self) -> str: + """Build TaskSpec portion of repr string for other containers.""" + sections = [ + self.title, + self.description or "", + self.setup_str(), + ] + return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i]) + +_TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec)) + + +@dataclasses.dataclass(init=True, repr=False) +class Measurement: + """The result of a Timer measurement. + + This class stores one or more measurements of a given statement. It is + serializable and provides several convenience methods + (including a detailed __repr__) for downstream consumers. + """ + number_per_run: int + raw_times: List[float] + task_spec: TaskSpec + metadata: Optional[Dict[Any, Any]] = None # Reserved for user payloads. + + def __post_init__(self) -> None: + self._sorted_times: Tuple[float, ...] = () + self._warnings: Tuple[str, ...] = () + self._median: float = -1.0 + self._mean: float = -1.0 + self._p25: float = -1.0 + self._p75: float = -1.0 + + def __getattr__(self, name: str) -> Any: + # Forward TaskSpec fields for convenience. + if name in _TASKSPEC_FIELDS: + return getattr(self.task_spec, name) + return super().__getattribute__(name) + + # ========================================================================= + # == Convenience methods for statistics =================================== + # ========================================================================= + # + # These methods use raw time divided by number_per_run; this is an + # extrapolation and hides the fact that different number_per_run will + # result in different amortization of overheads, however if Timer has + # selected an appropriate number_per_run then this is a non-issue, and + # forcing users to handle that division would result in a poor experience. + @property + def times(self) -> List[float]: + return [t / self.number_per_run for t in self.raw_times] + + @property + def median(self) -> float: + self._lazy_init() + return self._median + + @property + def mean(self) -> float: + self._lazy_init() + return self._mean + + @property + def iqr(self) -> float: + self._lazy_init() + return self._p75 - self._p25 + + @property + def significant_figures(self) -> int: + """Approximate significant figure estimate. + + This property is intended to give a convenient way to estimate the + precision of a measurement. It only uses the interquartile region to + estimate statistics to try to mitigate skew from the tails, and + uses a static z value of 1.645 since it is not expected to be used + for small values of `n`, so z can approximate `t`. + + The significant figure estimation used in conjunction with the + `trim_sigfig` method to provide a more human interpretable data + summary. __repr__ does not use this method; it simply displays raw + values. Significant figure estimation is intended for `Compare`. + """ + self._lazy_init() + n_total = len(self._sorted_times) + lower_bound = int(n_total // 4) + upper_bound = int(torch.tensor(3 * n_total / 4).ceil()) + interquartile_points: Tuple[float, ...] = self._sorted_times[lower_bound:upper_bound] + std = torch.tensor(interquartile_points).std(unbiased=False).item() + sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item() + + # Rough estimates. These are by no means statistically rigorous. + confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL) + relative_ci = torch.tensor(self._median / confidence_interval).log10().item() + num_significant_figures = int(torch.tensor(relative_ci).floor()) + return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES) + + @property + def has_warnings(self) -> bool: + self._lazy_init() + return bool(self._warnings) + + def _lazy_init(self) -> None: + if self.raw_times and not self._sorted_times: + self._sorted_times = tuple(sorted(self.times)) + _sorted_times = torch.tensor(self._sorted_times, dtype=torch.float64) + self._median = _sorted_times.quantile(.5).item() + self._mean = _sorted_times.mean().item() + self._p25 = _sorted_times.quantile(.25).item() + self._p75 = _sorted_times.quantile(.75).item() + + def add_warning(msg: str) -> None: + rel_iqr = self.iqr / self.median * 100 + self._warnings += ( + f" WARNING: Interquartile range is {rel_iqr:.1f}% " + f"of the median measurement.\n {msg}", + ) + + if not self.meets_confidence(_IQR_GROSS_WARN_THRESHOLD): + add_warning("This suggests significant environmental influence.") + elif not self.meets_confidence(_IQR_WARN_THRESHOLD): + add_warning("This could indicate system fluctuation.") + + + def meets_confidence(self, threshold: float = _IQR_WARN_THRESHOLD) -> bool: + return self.iqr / self.median < threshold + + @property + def title(self) -> str: + return self.task_spec.title + + @property + def env(self) -> str: + return ( + "Unspecified env" if self.taskspec.env is None + else cast(str, self.taskspec.env) + ) + + @property + def as_row_name(self) -> str: + return self.sub_label or self.stmt or "[Unknown]" + + def __repr__(self) -> str: + """ + Example repr: + + Broadcasting add (4x8) + Median: 5.73 us + IQR: 2.25 us (4.01 to 6.26) + 372 measurements, 100 runs per measurement, 1 thread + WARNING: Interquartile range is 39.4% of the median measurement. + This suggests significant environmental influence. + """ + self._lazy_init() + skip_line, newline = "MEASUREMENT_REPR_SKIP_LINE", "\n" + n = len(self._sorted_times) + time_unit, time_scale = select_unit(self._median) + iqr_filter = '' if n >= 4 else skip_line + + repr_str = f""" +{super().__repr__()} +{self.task_spec.summarize()} + {'Median: ' if n > 1 else ''}{self._median / time_scale:.2f} {time_unit} + {iqr_filter}IQR: {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f}) + {n} measurement{'s' if n > 1 else ''}, {self.number_per_run} runs {'per measurement,' if n > 1 else ','} {self.num_threads} thread{'s' if self.num_threads > 1 else ''} +{newline.join(self._warnings)}""".strip() # noqa: B950 + + return "\n".join(l for l in repr_str.splitlines(keepends=False) if skip_line not in l) + + @staticmethod + def merge(measurements: Iterable["Measurement"]) -> List["Measurement"]: + """Convenience method for merging replicates. + + Merge will extrapolate times to `number_per_run=1` and will not + transfer any metadata. (Since it might differ between replicates) + """ + grouped_measurements: DefaultDict[TaskSpec, List[Measurement]] = collections.defaultdict(list) + for m in measurements: + grouped_measurements[m.task_spec].append(m) + + def merge_group(task_spec: TaskSpec, group: List["Measurement"]) -> "Measurement": + times: List[float] = [] + for m in group: + # Different measurements could have different `number_per_run`, + # so we call `.times` which normalizes the results. + times.extend(m.times) + + return Measurement( + number_per_run=1, + raw_times=times, + task_spec=task_spec, + metadata=None, + ) + + return [merge_group(t, g) for t, g in grouped_measurements.items()] + + +def select_unit(t: float) -> Tuple[str, float]: + """Determine how to scale times for O(1) magnitude. + + This utility is used to format numbers for human consumption. + """ + time_unit = {-3: "ns", -2: "us", -1: "ms"}.get(int(torch.tensor(t).log10().item() // 3), "s") + time_scale = {"ns": 1e-9, "us": 1e-6, "ms": 1e-3, "s": 1}[time_unit] + return time_unit, time_scale + + +def unit_to_english(u: str) -> str: + return { + "ns": "nanosecond", + "us": "microsecond", + "ms": "millisecond", + "s": "second", + }[u] + + +def trim_sigfig(x: float, n: int) -> float: + """Trim `x` to `n` significant figures. (e.g. 3.14159, 2 -> 3.10000)""" + assert n == int(n) + magnitude = int(torch.tensor(x).abs().log10().ceil().item()) + scale = 10 ** (magnitude - n) + return float(torch.tensor(x / scale).round() * scale) + + +def ordered_unique(elements: Iterable[Any]) -> List[Any]: + return list(collections.OrderedDict({i: None for i in elements}).keys()) + + +@contextlib.contextmanager +def set_torch_threads(n: int) -> Iterator[None]: + prior_num_threads = torch.get_num_threads() + try: + torch.set_num_threads(n) + yield + finally: + torch.set_num_threads(prior_num_threads) + + +def _make_temp_dir(prefix: Optional[str] = None, gc_dev_shm: bool = False) -> str: + """Create a temporary directory. The caller is responsible for cleanup. + + This function is conceptually similar to `tempfile.mkdtemp`, but with + the key additional feature that it will use shared memory if the + `BENCHMARK_USE_DEV_SHM` environment variable is set. This is an + implementation detail, but an important one for cases where many Callgrind + measurements are collected at once. (Such as when collecting + microbenchmarks.) + + This is an internal utility, and is exported solely so that microbenchmarks + can reuse the util. + """ + use_dev_shm: bool = (os.getenv("BENCHMARK_USE_DEV_SHM") or "").lower() in ("1", "true") + if use_dev_shm: + root = "/dev/shm/pytorch_benchmark_utils" + assert os.name == "posix", f"tmpfs (/dev/shm) is POSIX only, current platform is {os.name}" + assert os.path.exists("/dev/shm"), "This system does not appear to support tmpfs (/dev/shm)." + os.makedirs(root, exist_ok=True) + + # Because we're working in shared memory, it is more important than + # usual to clean up ALL intermediate files. However we don't want every + # worker to walk over all outstanding directories, so instead we only + # check when we are sure that it won't lead to contention. + if gc_dev_shm: + for i in os.listdir(root): + owner_file = os.path.join(root, i, "owner.pid") + if not os.path.exists(owner_file): + continue + + with open(owner_file) as f: + owner_pid = int(f.read()) + + if owner_pid == os.getpid(): + continue + + try: + # https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python + os.kill(owner_pid, 0) + + except OSError: + print(f"Detected that {os.path.join(root, i)} was orphaned in shared memory. Cleaning up.") + shutil.rmtree(os.path.join(root, i)) + + else: + root = tempfile.gettempdir() + + # We include the time so names sort by creation time, and add a UUID + # to ensure we don't collide. + name = f"{prefix or tempfile.gettempprefix()}__{int(time.time())}__{uuid.uuid4()}" + path = os.path.join(root, name) + os.makedirs(path, exist_ok=False) + + if use_dev_shm: + with open(os.path.join(path, "owner.pid"), "w") as f: + f.write(str(os.getpid())) + + return path diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7863e6a740e09692c6e28e3fffeb41f02ab36e --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py @@ -0,0 +1,320 @@ +"""Display class to aggregate and print the results of many measurements.""" +import collections +import enum +import itertools as it +from typing import DefaultDict, List, Optional, Tuple + +from torch.utils.benchmark.utils import common +from torch import tensor as _tensor + +__all__ = ["Colorize", "Compare"] + +BEST = "\033[92m" +GOOD = "\033[34m" +BAD = "\033[2m\033[91m" +VERY_BAD = "\033[31m" +BOLD = "\033[1m" +TERMINATE = "\033[0m" + + +class Colorize(enum.Enum): + NONE = "none" + COLUMNWISE = "columnwise" + ROWWISE = "rowwise" + + +# Classes to separate internal bookkeeping from what is rendered. +class _Column: + def __init__( + self, + grouped_results: List[Tuple[Optional[common.Measurement], ...]], + time_scale: float, + time_unit: str, + trim_significant_figures: bool, + highlight_warnings: bool, + ): + self._grouped_results = grouped_results + self._flat_results = list(it.chain(*grouped_results)) + self._time_scale = time_scale + self._time_unit = time_unit + self._trim_significant_figures = trim_significant_figures + self._highlight_warnings = ( + highlight_warnings + and any(r.has_warnings for r in self._flat_results if r) + ) + leading_digits = [ + int(_tensor(r.median / self._time_scale).log10().ceil()) if r else None + for r in self._flat_results + ] + unit_digits = max(d for d in leading_digits if d is not None) + decimal_digits = min( + max(m.significant_figures - digits, 0) + for digits, m in zip(leading_digits, self._flat_results) + if (m is not None) and (digits is not None) + ) if self._trim_significant_figures else 1 + length = unit_digits + decimal_digits + (1 if decimal_digits else 0) + self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}" + + def get_results_for(self, group): + return self._grouped_results[group] + + def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]): + if value is None: + return " " * len(self.num_to_str(1, estimated_sigfigs, None)) + + if self._trim_significant_figures: + value = common.trim_sigfig(value, estimated_sigfigs) + + return self._template.format( + value, + f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "") + + +def optional_min(seq): + l = list(seq) + return None if len(l) == 0 else min(l) + + +class _Row: + def __init__(self, results, row_group, render_env, env_str_len, + row_name_str_len, time_scale, colorize, num_threads=None): + super().__init__() + self._results = results + self._row_group = row_group + self._render_env = render_env + self._env_str_len = env_str_len + self._row_name_str_len = row_name_str_len + self._time_scale = time_scale + self._colorize = colorize + self._columns: Tuple[_Column, ...] = () + self._num_threads = num_threads + + def register_columns(self, columns: Tuple[_Column, ...]): + self._columns = columns + + def as_column_strings(self): + concrete_results = [r for r in self._results if r is not None] + env = f"({concrete_results[0].env})" if self._render_env else "" + env = env.ljust(self._env_str_len + 4) + output = [" " + env + concrete_results[0].as_row_name] + for m, col in zip(self._results, self._columns or ()): + if m is None: + output.append(col.num_to_str(None, 1, None)) + else: + output.append(col.num_to_str( + m.median / self._time_scale, + m.significant_figures, + m.iqr / m.median if m.has_warnings else None + )) + return output + + @staticmethod + def color_segment(segment, value, best_value): + if value <= best_value * 1.01 or value <= best_value + 100e-9: + return BEST + BOLD + segment + TERMINATE * 2 + if value <= best_value * 1.1: + return GOOD + BOLD + segment + TERMINATE * 2 + if value >= best_value * 5: + return VERY_BAD + BOLD + segment + TERMINATE * 2 + if value >= best_value * 2: + return BAD + segment + TERMINATE * 2 + + return segment + + def row_separator(self, overall_width): + return ( + [f"{self._num_threads} threads: ".ljust(overall_width, "-")] + if self._num_threads is not None else [] + ) + + def finalize_column_strings(self, column_strings, col_widths): + best_values = [-1 for _ in column_strings] + if self._colorize == Colorize.ROWWISE: + row_min = min(r.median for r in self._results if r is not None) + best_values = [row_min for _ in column_strings] + elif self._colorize == Colorize.COLUMNWISE: + best_values = [ + optional_min(r.median for r in column.get_results_for(self._row_group) if r is not None) + for column in (self._columns or ()) + ] + + row_contents = [column_strings[0].ljust(col_widths[0])] + for col_str, width, result, best_value in zip(column_strings[1:], col_widths[1:], self._results, best_values): + col_str = col_str.center(width) + if self._colorize != Colorize.NONE and result is not None and best_value is not None: + col_str = self.color_segment(col_str, result.median, best_value) + row_contents.append(col_str) + return row_contents + + +class Table: + def __init__( + self, + results: List[common.Measurement], + colorize: Colorize, + trim_significant_figures: bool, + highlight_warnings: bool + ): + assert len({r.label for r in results}) == 1 + + self.results = results + self._colorize = colorize + self._trim_significant_figures = trim_significant_figures + self._highlight_warnings = highlight_warnings + self.label = results[0].label + self.time_unit, self.time_scale = common.select_unit( + min(r.median for r in results) + ) + + self.row_keys = common.ordered_unique([self.row_fn(i) for i in results]) + self.row_keys.sort(key=lambda args: args[:2]) # preserve stmt order + self.column_keys = common.ordered_unique([self.col_fn(i) for i in results]) + self.rows, self.columns = self.populate_rows_and_columns() + + @staticmethod + def row_fn(m: common.Measurement) -> Tuple[int, Optional[str], str]: + return m.num_threads, m.env, m.as_row_name + + @staticmethod + def col_fn(m: common.Measurement) -> Optional[str]: + return m.description + + def populate_rows_and_columns(self) -> Tuple[Tuple[_Row, ...], Tuple[_Column, ...]]: + rows: List[_Row] = [] + columns: List[_Column] = [] + ordered_results: List[List[Optional[common.Measurement]]] = [ + [None for _ in self.column_keys] + for _ in self.row_keys + ] + row_position = {key: i for i, key in enumerate(self.row_keys)} + col_position = {key: i for i, key in enumerate(self.column_keys)} + for r in self.results: + i = row_position[self.row_fn(r)] + j = col_position[self.col_fn(r)] + ordered_results[i][j] = r + + unique_envs = {r.env for r in self.results} + render_env = len(unique_envs) > 1 + env_str_len = max(len(i) for i in unique_envs) if render_env else 0 + + row_name_str_len = max(len(r.as_row_name) for r in self.results) + + prior_num_threads = -1 + prior_env = "" + row_group = -1 + rows_by_group: List[List[List[Optional[common.Measurement]]]] = [] + for (num_threads, env, _), row in zip(self.row_keys, ordered_results): + thread_transition = (num_threads != prior_num_threads) + if thread_transition: + prior_num_threads = num_threads + prior_env = "" + row_group += 1 + rows_by_group.append([]) + rows.append( + _Row( + results=row, + row_group=row_group, + render_env=(render_env and env != prior_env), + env_str_len=env_str_len, + row_name_str_len=row_name_str_len, + time_scale=self.time_scale, + colorize=self._colorize, + num_threads=num_threads if thread_transition else None, + ) + ) + rows_by_group[-1].append(row) + prior_env = env + + for i in range(len(self.column_keys)): + grouped_results = [tuple(row[i] for row in g) for g in rows_by_group] + column = _Column( + grouped_results=grouped_results, + time_scale=self.time_scale, + time_unit=self.time_unit, + trim_significant_figures=self._trim_significant_figures, + highlight_warnings=self._highlight_warnings,) + columns.append(column) + + rows_tuple, columns_tuple = tuple(rows), tuple(columns) + for ri in rows_tuple: + ri.register_columns(columns_tuple) + return rows_tuple, columns_tuple + + def render(self) -> str: + string_rows = [[""] + self.column_keys] + for r in self.rows: + string_rows.append(r.as_column_strings()) + num_cols = max(len(i) for i in string_rows) + for sr in string_rows: + sr.extend(["" for _ in range(num_cols - len(sr))]) + + col_widths = [max(len(j) for j in i) for i in zip(*string_rows)] + finalized_columns = [" | ".join(i.center(w) for i, w in zip(string_rows[0], col_widths))] + overall_width = len(finalized_columns[0]) + for string_row, row in zip(string_rows[1:], self.rows): + finalized_columns.extend(row.row_separator(overall_width)) + finalized_columns.append(" | ".join(row.finalize_column_strings(string_row, col_widths))) + + newline = "\n" + has_warnings = self._highlight_warnings and any(ri.has_warnings for ri in self.results) + return f""" +[{(' ' + (self.label or '') + ' ').center(overall_width - 2, '-')}] +{newline.join(finalized_columns)} + +Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}). +{'(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline if has_warnings else ""}"""[1:] + + +class Compare: + def __init__(self, results: List[common.Measurement]): + self._results: List[common.Measurement] = [] + self.extend_results(results) + self._trim_significant_figures = False + self._colorize = Colorize.NONE + self._highlight_warnings = False + + def __str__(self): + return "\n".join(self._render()) + + def extend_results(self, results): + for r in results: + if not isinstance(r, common.Measurement): + raise ValueError( + "Expected an instance of `Measurement`, " f"got {type(r)} instead." + ) + self._results.extend(results) + + def trim_significant_figures(self): + self._trim_significant_figures = True + + def colorize(self, rowwise=False): + self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE + + def highlight_warnings(self): + self._highlight_warnings = True + + def print(self): + print(str(self)) + + def _render(self): + results = common.Measurement.merge(self._results) + grouped_results = self._group_by_label(results) + output = [] + for group in grouped_results.values(): + output.append(self._layout(group)) + return output + + def _group_by_label(self, results: List[common.Measurement]): + grouped_results: DefaultDict[str, List[common.Measurement]] = collections.defaultdict(list) + for r in results: + grouped_results[r.label].append(r) + return grouped_results + + def _layout(self, results: List[common.Measurement]): + table = Table( + results, + self._colorize, + self._trim_significant_figures, + self._highlight_warnings + ) + return table.render() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py new file mode 100644 index 0000000000000000000000000000000000000000..dcee32ace4031a6401e6c117fb072552af889cb7 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py @@ -0,0 +1,187 @@ +import torch + +__all__ = ["bench_all", "benchmark_compile"] + +import torch._dynamo +from torch._dynamo.testing import CompileCounterWithBackend +from torch.utils.benchmark import Timer + +from typing import Optional, List, Callable, Union, Any, cast + +_warned_tensor_cores = False +_default_float_32_precision = torch.get_float32_matmul_precision() + +try: + from tabulate import tabulate + HAS_TABULATE = True +except ImportError: + HAS_TABULATE = False + print("tabulate is not installed, please pip install tabulate to use this utility") + +if HAS_TABULATE: + def _enable_tensor_cores(): + global _warned_tensor_cores + + if torch.cuda.is_available(): + if torch.backends.cuda.matmul.allow_tf32 is False and torch.cuda.get_device_capability() >= (8, 0): + torch.set_float32_matmul_precision("high") + if not _warned_tensor_cores: + print("Your GPU supports tensor cores") + print("we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`") + _warned_tensor_cores = True + + def _disable_tensor_cores(): + torch.set_float32_matmul_precision(_default_float_32_precision) + + def bench_loop( + model: Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters: int = 5, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn: Optional[Callable] = None, + ): + # Define the statement and setup for the benchmark + if optimizer and loss_fn: + # Training mode + stmt = """ + output = model(sample_input) + loss = loss_fn(output) if loss_fn else output.sum() + loss.backward() + optimizer.step() + optimizer.zero_grad() + """ + else: + # Inference mode + stmt = "model(sample_input)" + + # Create the Timer object + timer = Timer( + stmt=stmt, + globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn}, + ) + + + result = timer.timeit(number=num_iters) + + # Get the average time per iteration in milliseconds + avg_time = result.mean * 1000 + return round(avg_time, 2) + + def benchmark_compile( + model: Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters: int = 5, + backend: Optional[str] = None, + mode: Optional[str] = "default", + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn : Union[torch.nn.Module, Callable, None] = None, + ): + """ + Use this utility to benchmark torch.compile + """ + if backend: + try: + torch._dynamo.reset() + compile_counter_with_backend = CompileCounterWithBackend(backend) + opt_model = torch.compile(model, backend=compile_counter_with_backend, mode=mode) + + # Compilation only happens after the first inference + compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn) + + running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) + + if compile_counter_with_backend.frame_count == 0: + raise RuntimeError("No compilation occurred during benchmarking.") + + if compile_counter_with_backend.frame_count > 1: + raise RuntimeError("Recompilation occurred during benchmarking.") + + except Exception as e: + print(e) + print(f"Failed to compile {backend} with mode {mode}") + return None, None + else: + opt_model = model + compilation_time = None + running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) + + compilation_time = round(compilation_time, 2) if compilation_time else None + running_time = round(running_time, 2) if running_time else None + + + return compilation_time, running_time + + + def bench_all( + model : Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters : int = 5, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn : Union[torch.nn.Module, Callable, None] = None, + ): + """ + This is a simple utility that can be used to benchmark torch.compile + In particular it ensures that your GPU is setup to use tensor cores if it supports its + It also tries out all the main backends and prints a table of results so you can easily compare them all + Many of the backendds have their own optional dependencies so please pip install them seperately + + You will get one table for inference and another for training + If you'd like to leverage this utility for training make sure to pass in a torch.optim.Optimizer + + The important warnings are + Your GPU supports tensor cores + we will enable it automatically by setting `torch.set_float32_matmul_precision('high')` + + If a compilation fails for any reason including the dependency not being included + then we will print Failed to compile {backend} with mode {mode} + """ + field_names = ["Train/Inference", "Backend", "Mode", "Compilation Time", "Average Running Time"] + table = [] + + + eager_time = None + torch._dynamo.reset() + _, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer) + table.append( + [("Training" if optimizer else "Inference"), "Eager", "-", "-", f"{eager_time} ms"] + ) + + for backend in torch._dynamo.list_backends(): + + if backend == "inductor": + mode_options = cast(List[Optional[str]], list(torch._inductor.list_mode_options().keys())) + [None] + for mode in mode_options: + if mode == "default": + continue + torch._dynamo.reset() + try: + if torch.cuda.is_available(): + _enable_tensor_cores() + compilation_time, running_time = benchmark_compile( + model, sample_input, num_iters, backend, mode, optimizer, loss_fn) + finally: + if torch.cuda.is_available(): + _disable_tensor_cores() + table.append([ + ("Training" if optimizer else "Inference"), + backend if backend else "-", + mode if mode is not None else "-", + f"{compilation_time} ms " if compilation_time else "-", + f"{running_time} ms " if running_time else "-", + ]) + + else: + torch._dynamo.reset() + compilation_time, running_time = benchmark_compile( + model, sample_input, num_iters, backend, None, optimizer, loss_fn) + + if running_time is not None: + table.append([ + ("Training" if optimizer else "Inference"), + backend, "-", + f"{compilation_time} ms " or "-", + f"{running_time} ms ", + ]) + + + return tabulate(table, headers=field_names, tablefmt="github") diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py new file mode 100644 index 0000000000000000000000000000000000000000..7d1ee8ebb8f8b596f0f6fa53b862e29a07c6d179 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py @@ -0,0 +1,457 @@ +import functools +import itertools as it +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + + +__all__ = [ + "Fuzzer", + "FuzzedParameter", "ParameterAlias", + "FuzzedTensor", +] + + +_DISTRIBUTIONS = ( + "loguniform", + "uniform", +) + + +class FuzzedParameter: + """Specification for a parameter to be generated during fuzzing.""" + def __init__( + self, + name: str, + minval: Optional[Union[int, float]] = None, + maxval: Optional[Union[int, float]] = None, + distribution: Optional[Union[str, Dict[Any, float]]] = None, + strict: bool = False, + ): + """ + Args: + name: + A string name with which to identify the parameter. + FuzzedTensors can reference this string in their + specifications. + minval: + The lower bound for the generated value. See the description + of `distribution` for type behavior. + maxval: + The upper bound for the generated value. Type behavior is + identical to `minval`. + distribution: + Specifies the distribution from which this parameter should + be drawn. There are three possibilities: + - "loguniform" + Samples between `minval` and `maxval` (inclusive) such + that the probabilities are uniform in log space. As a + concrete example, if minval=1 and maxval=100, a sample + is as likely to fall in [1, 10) as it is [10, 100]. + - "uniform" + Samples are chosen with uniform probability between + `minval` and `maxval` (inclusive). If either `minval` + or `maxval` is a float then the distribution is the + continuous uniform distribution; otherwise samples + are constrained to the integers. + - dict: + If a dict is passed, the keys are taken to be choices + for the variables and the values are interpreted as + probabilities. (And must sum to one.) + If a dict is passed, `minval` and `maxval` must not be set. + Otherwise, they must be set. + strict: + If a parameter is strict, it will not be included in the + iterative resampling process which Fuzzer uses to find a + valid parameter configuration. This allows an author to + prevent skew from resampling for a given parameter (for + instance, a low size limit could inadvertently bias towards + Tensors with fewer dimensions) at the cost of more iterations + when generating parameters. + """ + self._name = name + self._minval = minval + self._maxval = maxval + self._distribution = self._check_distribution(distribution) + self.strict = strict + + @property + def name(self): + return self._name + + def sample(self, state): + if self._distribution == "loguniform": + return self._loguniform(state) + + if self._distribution == "uniform": + return self._uniform(state) + + if isinstance(self._distribution, dict): + return self._custom_distribution(state) + + def _check_distribution(self, distribution): + if not isinstance(distribution, dict): + assert distribution in _DISTRIBUTIONS + else: + assert not any(i < 0 for i in distribution.values()), "Probabilities cannot be negative" + assert abs(sum(distribution.values()) - 1) <= 1e-5, "Distribution is not normalized" + assert self._minval is None + assert self._maxval is None + + return distribution + + def _loguniform(self, state): + output = int(2 ** state.uniform( + low=np.log2(self._minval) if self._minval is not None else None, + high=np.log2(self._maxval) if self._maxval is not None else None, + )) + if self._minval is not None and output < self._minval: + return self._minval + if self._maxval is not None and output > self._maxval: + return self._maxval + return output + + def _uniform(self, state): + if isinstance(self._minval, int) and isinstance(self._maxval, int): + return int(state.randint(low=self._minval, high=self._maxval + 1)) + return state.uniform(low=self._minval, high=self._maxval) + + def _custom_distribution(self, state): + # If we directly pass the keys to `choice`, numpy will convert + # them to numpy dtypes. + index = state.choice( + np.arange(len(self._distribution)), + p=tuple(self._distribution.values())) + return list(self._distribution.keys())[index] + + +class ParameterAlias: + """Indicates that a parameter should alias the value of another parameter. + + When used in conjunction with a custom distribution, this allows fuzzed + tensors to represent a broader range of behaviors. For example, the + following sometimes produces Tensors which broadcast: + + Fuzzer( + parameters=[ + FuzzedParameter("x_len", 4, 1024, distribution="uniform"), + + # `y` will either be size one, or match the size of `x`. + FuzzedParameter("y_len", distribution={ + 0.5: 1, + 0.5: ParameterAlias("x_len") + }), + ], + tensors=[ + FuzzedTensor("x", size=("x_len",)), + FuzzedTensor("y", size=("y_len",)), + ], + ) + + Chains of alias' are allowed, but may not contain cycles. + """ + def __init__(self, alias_to): + self.alias_to = alias_to + + def __repr__(self): + return f"ParameterAlias[alias_to: {self.alias_to}]" + + +def dtype_size(dtype): + if dtype == torch.bool: + return 1 + if dtype.is_floating_point or dtype.is_complex: + return int(torch.finfo(dtype).bits / 8) + return int(torch.iinfo(dtype).bits / 8) + + +def prod(values, base=1): + """np.prod can overflow, so for sizes the product should be done in Python. + + Even though np.prod type promotes to int64, it can still overflow in which + case the negative value will pass the size check and OOM when attempting to + actually allocate the Tensor. + """ + return functools.reduce(lambda x, y: int(x) * int(y), values, base) + + +class FuzzedTensor: + def __init__( + self, + name: str, + size: Tuple[Union[str, int], ...], + steps: Optional[Tuple[Union[str, int], ...]] = None, + probability_contiguous: float = 0.5, + min_elements: Optional[int] = None, + max_elements: Optional[int] = None, + max_allocation_bytes: Optional[int] = None, + dim_parameter: Optional[str] = None, + roll_parameter: Optional[str] = None, + dtype=torch.float32, + cuda=False, + tensor_constructor: Optional[Callable] = None + ): + """ + Args: + name: + A string identifier for the generated Tensor. + size: + A tuple of integers or strings specifying the size of the generated + Tensor. String values will replaced with a concrete int during the + generation process, while ints are simply passed as literals. + steps: + An optional tuple with the same length as `size`. This indicates + that a larger Tensor should be allocated, and then sliced to + produce the generated Tensor. For instance, if size is (4, 8) + and steps is (1, 4), then a tensor `t` of size (4, 32) will be + created and then `t[:, ::4]` will be used. (Allowing one to test + Tensors with strided memory.) + probability_contiguous: + A number between zero and one representing the chance that the + generated Tensor has a contiguous memory layout. This is achieved by + randomly permuting the shape of a Tensor, calling `.contiguous()`, + and then permuting back. This is applied before `steps`, which can + also cause a Tensor to be non-contiguous. + min_elements: + The minimum number of parameters that this Tensor must have for a + set of parameters to be valid. (Otherwise they are resampled.) + max_elements: + Like `min_elements`, but setting an upper bound. + max_allocation_bytes: + Like `max_elements`, but for the size of Tensor that must be + allocated prior to slicing for `steps` (if applicable). For + example, a FloatTensor with size (1024, 1024) and steps (4, 4) + would have 1M elements, but would require a 64 MB allocation. + dim_parameter: + The length of `size` and `steps` will be truncated to this value. + This allows Tensors of varying dimensions to be generated by the + Fuzzer. + dtype: + The PyTorch dtype of the generated Tensor. + cuda: + Whether to place the Tensor on a GPU. + tensor_constructor: + Callable which will be used instead of the default Tensor + construction method. This allows the author to enforce properties + of the Tensor (e.g. it can only have certain values). The dtype and + concrete shape of the Tensor to be created will be passed, and + concrete values of all parameters will be passed as kwargs. Note + that transformations to the result (permuting, slicing) will be + performed by the Fuzzer; the tensor_constructor is only responsible + for creating an appropriately sized Tensor. + """ + self._name = name + self._size = size + self._steps = steps + self._probability_contiguous = probability_contiguous + self._min_elements = min_elements + self._max_elements = max_elements + self._max_allocation_bytes = max_allocation_bytes + self._dim_parameter = dim_parameter + self._dtype = dtype + self._cuda = cuda + self._tensor_constructor = tensor_constructor + + @property + def name(self): + return self._name + + @staticmethod + def default_tensor_constructor(size, dtype, **kwargs): + if dtype.is_floating_point or dtype.is_complex: + return torch.rand(size=size, dtype=dtype, device="cpu") + else: + return torch.randint(1, 127, size=size, dtype=dtype, device="cpu") + + def _make_tensor(self, params, state): + size, steps, allocation_size = self._get_size_and_steps(params) + constructor = ( + self._tensor_constructor or + self.default_tensor_constructor + ) + + raw_tensor = constructor(size=allocation_size, dtype=self._dtype, **params) + if self._cuda: + raw_tensor = raw_tensor.cuda() + + # Randomly permute the Tensor and call `.contiguous()` to force re-ordering + # of the memory, and then permute it back to the original shape. + dim = len(size) + order = np.arange(dim) + if state.rand() > self._probability_contiguous: + while dim > 1 and np.all(order == np.arange(dim)): + order = state.permutation(raw_tensor.dim()) + + raw_tensor = raw_tensor.permute(tuple(order)).contiguous() + raw_tensor = raw_tensor.permute(tuple(np.argsort(order))) + + slices = [slice(0, size * step, step) for size, step in zip(size, steps)] + tensor = raw_tensor[slices] + + properties = { + "numel": int(tensor.numel()), + "order": order, + "steps": steps, + "is_contiguous": tensor.is_contiguous(), + "dtype": str(self._dtype), + } + + return tensor, properties + + def _get_size_and_steps(self, params): + dim = ( + params[self._dim_parameter] + if self._dim_parameter is not None + else len(self._size) + ) + + def resolve(values, dim): + """Resolve values into concrete integers.""" + values = tuple(params.get(i, i) for i in values) + if len(values) > dim: + values = values[:dim] + if len(values) < dim: + values = values + tuple(1 for _ in range(dim - len(values))) + return values + + size = resolve(self._size, dim) + steps = resolve(self._steps or (), dim) + allocation_size = tuple(size_i * step_i for size_i, step_i in zip(size, steps)) + return size, steps, allocation_size + + def satisfies_constraints(self, params): + size, _, allocation_size = self._get_size_and_steps(params) + # Product is computed in Python to avoid integer overflow. + num_elements = prod(size) + assert num_elements >= 0 + + allocation_bytes = prod(allocation_size, base=dtype_size(self._dtype)) + + def nullable_greater(left, right): + if left is None or right is None: + return False + return left > right + + return not any(( + nullable_greater(num_elements, self._max_elements), + nullable_greater(self._min_elements, num_elements), + nullable_greater(allocation_bytes, self._max_allocation_bytes), + )) + + +class Fuzzer: + def __init__( + self, + parameters: List[Union[FuzzedParameter, List[FuzzedParameter]]], + tensors: List[Union[FuzzedTensor, List[FuzzedTensor]]], + constraints: Optional[List[Callable]] = None, + seed: Optional[int] = None + ): + """ + Args: + parameters: + List of FuzzedParameters which provide specifications + for generated parameters. Iterable elements will be + unpacked, though arbitrary nested structures will not. + tensors: + List of FuzzedTensors which define the Tensors which + will be created each step based on the parameters for + that step. Iterable elements will be unpacked, though + arbitrary nested structures will not. + constraints: + List of callables. They will be called with params + as kwargs, and if any of them return False the current + set of parameters will be rejected. + seed: + Seed for the RandomState used by the Fuzzer. This will + also be used to set the PyTorch random seed so that random + ops will create reproducible Tensors. + """ + if seed is None: + seed = np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64) + self._seed = seed + self._parameters = Fuzzer._unpack(parameters, FuzzedParameter) + self._tensors = Fuzzer._unpack(tensors, FuzzedTensor) + self._constraints = constraints or () + + p_names = {p.name for p in self._parameters} + t_names = {t.name for t in self._tensors} + name_overlap = p_names.intersection(t_names) + if name_overlap: + raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}") + + self._rejections = 0 + self._total_generated = 0 + + @staticmethod + def _unpack(values, cls): + return tuple(it.chain( + *[[i] if isinstance(i, cls) else i for i in values] + )) + + def take(self, n): + state = np.random.RandomState(self._seed) + torch.manual_seed(state.randint(low=0, high=2 ** 63, dtype=np.int64)) + for _ in range(n): + params = self._generate(state) + tensors = {} + tensor_properties = {} + for t in self._tensors: + tensor, properties = t._make_tensor(params, state) + tensors[t.name] = tensor + tensor_properties[t.name] = properties + yield tensors, tensor_properties, params + + @property + def rejection_rate(self): + if not self._total_generated: + return 0. + return self._rejections / self._total_generated + + def _generate(self, state): + strict_params: Dict[str, Union[float, int, ParameterAlias]] = {} + for _ in range(1000): + candidate_params: Dict[str, Union[float, int, ParameterAlias]] = {} + for p in self._parameters: + if p.strict: + if p.name in strict_params: + candidate_params[p.name] = strict_params[p.name] + else: + candidate_params[p.name] = p.sample(state) + strict_params[p.name] = candidate_params[p.name] + else: + candidate_params[p.name] = p.sample(state) + + candidate_params = self._resolve_aliases(candidate_params) + + self._total_generated += 1 + if not all(f(candidate_params) for f in self._constraints): + self._rejections += 1 + continue + + if not all(t.satisfies_constraints(candidate_params) for t in self._tensors): + self._rejections += 1 + continue + + return candidate_params + raise ValueError("Failed to generate a set of valid parameters.") + + @staticmethod + def _resolve_aliases(params): + params = dict(params) + alias_count = sum(isinstance(v, ParameterAlias) for v in params.values()) + + keys = list(params.keys()) + while alias_count: + for k in keys: + v = params[k] + if isinstance(v, ParameterAlias): + params[k] = params[v.alias_to] + alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values()) + if alias_count == alias_count_new: + raise ValueError(f"ParameterAlias cycle detected\n{params}") + + alias_count = alias_count_new + + return params diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30b6f79c0b5aebca676035ac0b7c08cfce639b23 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp @@ -0,0 +1,43 @@ +/* C++ template for Timer.timeit + +This template will be consumed by `cpp_jit.py`, and will replace: + `GLOBAL_SETUP_TEMPLATE_LOCATION`, + `SETUP_TEMPLATE_LOCATION` + and + `STMT_TEMPLATE_LOCATION` +sections with user provided statements. +*/ +#include + +#include +#include +#include +#include + +// Global setup. (e.g. #includes) +// GLOBAL_SETUP_TEMPLATE_LOCATION + +double timeit(int n) { + pybind11::gil_scoped_release no_gil; + + // Setup + // SETUP_TEMPLATE_LOCATION + + { + // Warmup + // STMT_TEMPLATE_LOCATION + } + + // Main loop + auto start_time = std::chrono::high_resolution_clock::now(); + for (const auto loop_idx : c10::irange(n)) { + (void)loop_idx; + // STMT_TEMPLATE_LOCATION + } + auto end_time = std::chrono::high_resolution_clock::now(); + return std::chrono::duration(end_time - start_time).count(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("timeit", &timeit); +} diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..b101c38f4ccdbbd4496d14e59bbfba34bbe5a6f1 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py @@ -0,0 +1,537 @@ +"""Timer class based on the timeit.Timer class, but torch aware.""" +import enum +import timeit +import textwrap +from typing import overload, Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union + +import torch +from torch.utils.benchmark.utils import common, cpp_jit +from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType +from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface + + +__all__ = ["Timer", "timer", "Language"] + + +if torch.backends.cuda.is_built() and torch.cuda.is_available(): + def timer() -> float: + torch.cuda.synchronize() + return timeit.default_timer() +elif torch._C._get_privateuse1_backend_name() != "privateuseone": + privateuse1_device_handler = getattr(torch, torch._C._get_privateuse1_backend_name(), None) \ + if torch._C._get_privateuse1_backend_name() != "cpu" else None + + def timer() -> float: + if privateuse1_device_handler: + privateuse1_device_handler.synchronize() + return timeit.default_timer() +else: + timer = timeit.default_timer + + +class Language(enum.Enum): + PYTHON = 0 + CPP = 1 + + +class CPPTimer: + def __init__( + self, + stmt: str, + setup: str, + global_setup: str, + timer: Callable[[], float], + globals: Dict[str, Any], + ) -> None: + if timer is not timeit.default_timer: + raise NotImplementedError( + "PyTorch was built with CUDA and a GPU is present; however " + "Timer does not yet support GPU measurements. If your " + "code is CPU only, pass `timer=timeit.default_timer` to the " + "Timer's constructor to indicate this. (Note that this will " + "produce incorrect results if the GPU is in fact used, as " + "Timer will not synchronize CUDA.)" + ) + + if globals: + raise ValueError("C++ timing does not support globals.") + + self._stmt: str = textwrap.dedent(stmt) + self._setup: str = textwrap.dedent(setup) + self._global_setup: str = textwrap.dedent(global_setup) + self._timeit_module: Optional[TimeitModuleType] = None + + def timeit(self, number: int) -> float: + if self._timeit_module is None: + self._timeit_module = cpp_jit.compile_timeit_template( + stmt=self._stmt, + setup=self._setup, + global_setup=self._global_setup, + ) + + return self._timeit_module.timeit(number) + + +class Timer: + """Helper class for measuring execution time of PyTorch statements. + + For a full tutorial on how to use this class, see: + https://pytorch.org/tutorials/recipes/recipes/benchmark.html + + The PyTorch Timer is based on `timeit.Timer` (and in fact uses + `timeit.Timer` internally), but with several key differences: + + 1) Runtime aware: + Timer will perform warmups (important as some elements of PyTorch are + lazily initialized), set threadpool size so that comparisons are + apples-to-apples, and synchronize asynchronous CUDA functions when + necessary. + + 2) Focus on replicates: + When measuring code, and particularly complex kernels / models, + run-to-run variation is a significant confounding factor. It is + expected that all measurements should include replicates to quantify + noise and allow median computation, which is more robust than mean. + To that effect, this class deviates from the `timeit` API by + conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`. + (Exact algorithms are discussed in method docstrings.) The `timeit` + method is replicated for cases where an adaptive strategy is not + desired. + + 3) Optional metadata: + When defining a Timer, one can optionally specify `label`, `sub_label`, + `description`, and `env`. (Defined later) These fields are included in + the representation of result object and by the `Compare` class to group + and display results for comparison. + + 4) Instruction counts + In addition to wall times, Timer can run a statement under Callgrind + and report instructions executed. + + Directly analogous to `timeit.Timer` constructor arguments: + + `stmt`, `setup`, `timer`, `globals` + + PyTorch Timer specific constructor arguments: + + `label`, `sub_label`, `description`, `env`, `num_threads` + + Args: + stmt: Code snippet to be run in a loop and timed. + + setup: Optional setup code. Used to define variables used in `stmt` + + global_setup: (C++ only) + Code which is placed at the top level of the file for things like + `#include` statements. + + timer: + Callable which returns the current time. If PyTorch was built + without CUDA or there is no GPU present, this defaults to + `timeit.default_timer`; otherwise it will synchronize CUDA before + measuring the time. + + globals: + A dict which defines the global variables when `stmt` is being + executed. This is the other method for providing variables which + `stmt` needs. + + label: + String which summarizes `stmt`. For instance, if `stmt` is + "torch.nn.functional.relu(torch.add(x, 1, out=out))" + one might set label to "ReLU(x + 1)" to improve readability. + + sub_label: + Provide supplemental information to disambiguate measurements + with identical stmt or label. For instance, in our example + above sub_label might be "float" or "int", so that it is easy + to differentiate: + "ReLU(x + 1): (float)" + + "ReLU(x + 1): (int)" + when printing Measurements or summarizing using `Compare`. + + description: + String to distinguish measurements with identical label and + sub_label. The principal use of `description` is to signal to + `Compare` the columns of data. For instance one might set it + based on the input size to create a table of the form: :: + + | n=1 | n=4 | ... + ------------- ... + ReLU(x + 1): (float) | ... | ... | ... + ReLU(x + 1): (int) | ... | ... | ... + + + using `Compare`. It is also included when printing a Measurement. + + env: + This tag indicates that otherwise identical tasks were run in + different environments, and are therefore not equivalent, for + instance when A/B testing a change to a kernel. `Compare` will + treat Measurements with different `env` specification as distinct + when merging replicate runs. + + num_threads: + The size of the PyTorch threadpool when executing `stmt`. Single + threaded performance is important as both a key inference workload + and a good indicator of intrinsic algorithmic efficiency, so the + default is set to one. This is in contrast to the default PyTorch + threadpool size which tries to utilize all cores. + """ + + _timer_cls: Type[TimerClass] = timeit.Timer + + def __init__( + self, + stmt: str = "pass", + setup: str = "pass", + global_setup: str = "", + timer: Callable[[], float] = timer, + globals: Optional[Dict[str, Any]] = None, + label: Optional[str] = None, + sub_label: Optional[str] = None, + description: Optional[str] = None, + env: Optional[str] = None, + num_threads: int = 1, + language: Union[Language, str] = Language.PYTHON, + ): + if not isinstance(stmt, str): + raise ValueError("Currently only a `str` stmt is supported.") + + # We copy `globals` to prevent mutations from leaking. + # (For instance, `eval` adds the `__builtins__` key) + self._globals = dict(globals or {}) + + timer_kwargs = {} + if language in (Language.PYTHON, "py", "python"): + # Include `torch` if not specified as a convenience feature. + self._globals.setdefault("torch", torch) + self._language: Language = Language.PYTHON + if global_setup: + raise ValueError( + f"global_setup is C++ only, got `{global_setup}`. Most " + "likely this code can simply be moved to `setup`." + ) + + elif language in (Language.CPP, "cpp", "c++"): + assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped." + self._timer_cls = CPPTimer + setup = ("" if setup == "pass" else setup) + self._language = Language.CPP + timer_kwargs["global_setup"] = global_setup + + else: + raise ValueError(f"Invalid language `{language}`.") + + # Convenience adjustment so that multi-line code snippets defined in + # functions do not IndentationError (Python) or look odd (C++). The + # leading newline removal is for the initial newline that appears when + # defining block strings. For instance: + # textwrap.dedent(""" + # print("This is a stmt") + # """) + # produces '\nprint("This is a stmt")\n'. + # + # Stripping this down to 'print("This is a stmt")' doesn't change + # what gets executed, but it makes __repr__'s nicer. + stmt = textwrap.dedent(stmt) + stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip() + setup = textwrap.dedent(setup) + setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip() + + self._timer = self._timer_cls( + stmt=stmt, + setup=setup, + timer=timer, + globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals), + **timer_kwargs, + ) + self._task_spec = common.TaskSpec( + stmt=stmt, + setup=setup, + global_setup=global_setup, + label=label, + sub_label=sub_label, + description=description, + env=env, + num_threads=num_threads, + ) + + def _timeit(self, number: int) -> float: + # Even calling a timer in C++ takes ~50 ns, so no real operation should + # take less than 1 ns. (And this prevents divide by zero errors.) + return max(self._timer.timeit(number), 1e-9) + + def timeit(self, number: int = 1000000) -> common.Measurement: + """Mirrors the semantics of timeit.Timer.timeit(). + + Execute the main statement (`stmt`) `number` times. + https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit + """ + with common.set_torch_threads(self._task_spec.num_threads): + # Warmup + self._timeit(number=max(int(number // 100), 2)) + + return common.Measurement( + number_per_run=number, + raw_times=[self._timeit(number=number)], + task_spec=self._task_spec + ) + + def repeat(self, repeat: int = -1, number: int = -1) -> None: + raise NotImplementedError("See `Timer.blocked_autorange.`") + + def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None: + raise NotImplementedError("See `Timer.blocked_autorange.`") + + def _threaded_measurement_loop( + self, + number: int, + time_hook: Callable[[], float], + stop_hook: Callable[[List[float]], bool], + min_run_time: float, + max_run_time: Optional[float] = None, + callback: Optional[Callable[[int, float], NoReturn]] = None + ) -> List[float]: + total_time = 0.0 + can_stop = False + times: List[float] = [] + with common.set_torch_threads(self._task_spec.num_threads): + while (total_time < min_run_time) or (not can_stop): + time_spent = time_hook() + times.append(time_spent) + total_time += time_spent + if callback: + callback(number, time_spent) + can_stop = stop_hook(times) + if max_run_time and total_time > max_run_time: + break + return times + + def _estimate_block_size(self, min_run_time: float) -> int: + with common.set_torch_threads(self._task_spec.num_threads): + # Estimate the block size needed for measurement to be negligible + # compared to the inner loop. This also serves as a warmup. + overhead = torch.tensor([self._timeit(0) for _ in range(5)]).median().item() + number = 1 + while True: + time_taken = self._timeit(number) + relative_overhead = overhead / time_taken + if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000: + break + if time_taken > min_run_time: + break + # Avoid overflow in C++ pybind11 interface + if number * 10 > 2147483647: + break + number *= 10 + return number + + def blocked_autorange( + self, + callback: Optional[Callable[[int, float], NoReturn]] = None, + min_run_time: float = 0.2, + ) -> common.Measurement: + """Measure many replicates while keeping timer overhead to a minimum. + + At a high level, blocked_autorange executes the following pseudo-code:: + + `setup` + + total_time = 0 + while total_time < min_run_time + start = timer() + for _ in range(block_size): + `stmt` + total_time += (timer() - start) + + Note the variable `block_size` in the inner loop. The choice of block + size is important to measurement quality, and must balance two + competing objectives: + + 1) A small block size results in more replicates and generally + better statistics. + + 2) A large block size better amortizes the cost of `timer` + invocation, and results in a less biased measurement. This is + important because CUDA synchronization time is non-trivial + (order single to low double digit microseconds) and would + otherwise bias the measurement. + + blocked_autorange sets block_size by running a warmup period, + increasing block size until timer overhead is less than 0.1% of + the overall computation. This value is then used for the main + measurement loop. + + Returns: + A `Measurement` object that contains measured runtimes and + repetition counts, and can be used to compute statistics. + (mean, median, etc.) + """ + number = self._estimate_block_size(min_run_time) + + def time_hook() -> float: + return self._timeit(number) + + def stop_hook(times: List[float]) -> bool: + return True + + times = self._threaded_measurement_loop( + number, time_hook, stop_hook, + min_run_time=min_run_time, + callback=callback) + + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ) + + def adaptive_autorange( + self, + threshold: float = 0.1, + *, + min_run_time: float = 0.01, + max_run_time: float = 10.0, + callback: Optional[Callable[[int, float], NoReturn]] = None, + ) -> common.Measurement: + """Similar to `blocked_autorange` but also checks for variablility in measurements + and repeats until iqr/median is smaller than `threshold` or `max_run_time` is reached. + + + At a high level, adaptive_autorange executes the following pseudo-code:: + + `setup` + + times = [] + while times.sum < max_run_time + start = timer() + for _ in range(block_size): + `stmt` + times.append(timer() - start) + + enough_data = len(times)>3 and times.sum > min_run_time + small_iqr=times.iqr/times.mean float: + return self._timeit(number) + + def stop_hook(times: List[float]) -> bool: + if len(times) > 3: + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ).meets_confidence(threshold=threshold) + return False + times = self._threaded_measurement_loop( + number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback) + + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ) + + @overload + def collect_callgrind( + self, + number: int, + *, + repeats: None, + collect_baseline: bool, + retain_out_file: bool, + ) -> valgrind_timer_interface.CallgrindStats: + ... + + @overload + def collect_callgrind( + self, + number: int, + *, + repeats: int, + collect_baseline: bool, + retain_out_file: bool, + ) -> Tuple[valgrind_timer_interface.CallgrindStats, ...]: + ... + + def collect_callgrind( + self, + number: int = 100, + *, + repeats: Optional[int] = None, + collect_baseline: bool = True, + retain_out_file: bool = False, + ) -> Any: + """Collect instruction counts using Callgrind. + + Unlike wall times, instruction counts are deterministic + (modulo non-determinism in the program itself and small amounts of + jitter from the Python interpreter.) This makes them ideal for detailed + performance analysis. This method runs `stmt` in a separate process + so that Valgrind can instrument the program. Performance is severely + degraded due to the instrumentation, however this is ameliorated by + the fact that a small number of iterations is generally sufficient to + obtain good measurements. + + In order to to use this method `valgrind`, `callgrind_control`, and + `callgrind_annotate` must be installed. + + Because there is a process boundary between the caller (this process) + and the `stmt` execution, `globals` cannot contain arbitrary in-memory + data structures. (Unlike timing methods) Instead, globals are + restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules + to reduce the surprise factor from serialization and subsequent + deserialization. The `GlobalsBridge` class provides more detail on this + subject. Take particular care with nn.Modules: they rely on pickle and + you may need to add an import to `setup` for them to transfer properly. + + By default, a profile for an empty statement will be collected and + cached to indicate how many instructions are from the Python loop which + drives `stmt`. + + Returns: + A `CallgrindStats` object which provides instruction counts and + some basic facilities for analyzing and manipulating results. + """ + if not isinstance(self._task_spec.stmt, str): + raise ValueError("`collect_callgrind` currently only supports string `stmt`") + + if repeats is not None and repeats < 1: + raise ValueError("If specified, `repeats` must be >= 1") + + # Check that the statement is valid. It doesn't guarantee success, but it's much + # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in + # the parent process rather than the valgrind subprocess. + self._timeit(1) + is_python = (self._language == Language.PYTHON) + assert is_python or not self._globals + result = valgrind_timer_interface.wrapper_singleton().collect_callgrind( + task_spec=self._task_spec, + globals=self._globals, + number=number, + repeats=repeats or 1, + collect_baseline=collect_baseline and is_python, + is_python=is_python, + retain_out_file=retain_out_file, + ) + + return (result[0] if repeats is None else result) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..524ee9ebe97695f023b8fa171d0fc668fdfdc5b7 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py @@ -0,0 +1,906 @@ +"""Intermediate layer between `Timer` and `valgrind`.""" +import collections +import enum +import dataclasses +import itertools as it +import os +import pickle +import re +import shutil +import subprocess +import sys +import textwrap +from typing import ( + cast, Any, Callable, DefaultDict, Dict, Generator, List, NamedTuple, + Optional, Tuple, Union, TYPE_CHECKING) + +import torch +from torch.utils.benchmark.utils import common, cpp_jit +from torch.utils.benchmark.utils._stubs import CallgrindModuleType + + +__all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"] + + +if TYPE_CHECKING: + CompletedProcessType = subprocess.CompletedProcess[str] +else: + CompletedProcessType = subprocess.CompletedProcess + + +class FunctionCount(NamedTuple): + # TODO(#105471): Rename the count field + count: int # type: ignore[assignment] + function: str + + +@dataclasses.dataclass(repr=False, eq=False, frozen=True) +class FunctionCounts: + """Container for manipulating Callgrind results. + + It supports: + 1) Addition and subtraction to combine or diff results. + 2) Tuple-like indexing. + 3) A `denoise` function which strips CPython calls which are known to + be non-deterministic and quite noisy. + 4) Two higher order methods (`filter` and `transform`) for custom + manipulation. + """ + _data: Tuple[FunctionCount, ...] + inclusive: bool + truncate_rows: bool = True + + # For normal use, torch._tensor_str.PRINT_OPTS.linewidth determines + # the print settings. This is simply to allow hermetic unit tests. + _linewidth: Optional[int] = None + + def __iter__(self) -> Generator[FunctionCount, None, None]: + yield from self._data + + def __len__(self) -> int: + return len(self._data) + + def __getitem__(self, item: Any) -> Union[FunctionCount, "FunctionCounts"]: + data: Union[FunctionCount, Tuple[FunctionCount, ...]] = self._data[item] + return ( + FunctionCounts(cast(Tuple[FunctionCount, ...], data), self.inclusive, truncate_rows=False) + if isinstance(data, tuple) else data + ) + + def __repr__(self) -> str: + count_len = 0 + for c, _ in self: + # Account for sign in string length. + count_len = max(count_len, len(str(c)) + int(c < 0)) + + lines = [] + linewidth = self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth + fn_str_len = max(linewidth - count_len - 4, 40) + for c, fn in self: + if len(fn) > fn_str_len: + left_len = int((fn_str_len - 5) // 2) + fn = fn[:left_len] + " ... " + fn[-(fn_str_len - left_len - 5):] + lines.append(f" {c:>{count_len}} {fn}") + + if self.truncate_rows and len(lines) > 18: + lines = lines[:9] + ["...".rjust(count_len + 2)] + lines[-9:] + + if not self.inclusive: + lines.extend(["", f"Total: {self.sum()}"]) + + return "\n".join([super().__repr__()] + lines) + + def __add__( + self, + other: "FunctionCounts", + ) -> "FunctionCounts": + return self._merge(other, lambda c: c) + + def __sub__( + self, + other: "FunctionCounts", + ) -> "FunctionCounts": + return self._merge(other, lambda c: -c) + + def __mul__(self, other: Union[int, float]) -> "FunctionCounts": + return self._from_dict({ + fn: int(c * other) for c, fn in self._data + }, self.inclusive) + + def transform(self, map_fn: Callable[[str], str]) -> "FunctionCounts": + """Apply `map_fn` to all of the function names. + + This can be used to regularize function names (e.g. stripping irrelevant + parts of the file path), coalesce entries by mapping multiple functions + to the same name (in which case the counts are added together), etc. + """ + counts: DefaultDict[str, int] = collections.defaultdict(int) + for c, fn in self._data: + counts[map_fn(fn)] += c + + return self._from_dict(counts, self.inclusive) + + def filter(self, filter_fn: Callable[[str], bool]) -> "FunctionCounts": + """Keep only the elements where `filter_fn` applied to function name returns True.""" + return FunctionCounts(tuple(i for i in self if filter_fn(i.function)), self.inclusive) + + def sum(self) -> int: + return sum(c for c, _ in self) + + def denoise(self) -> "FunctionCounts": + """Remove known noisy instructions. + + Several instructions in the CPython interpreter are rather noisy. These + instructions involve unicode to dictionary lookups which Python uses to + map variable names. FunctionCounts is generally a content agnostic + container, however this is sufficiently important for obtaining + reliable results to warrant an exception.""" + return self.filter(lambda fn: "dictobject.c:lookdict_unicode" not in fn) + + def _merge( + self, + second: "FunctionCounts", + merge_fn: Callable[[int], int] + ) -> "FunctionCounts": + assert self.inclusive == second.inclusive, "Cannot merge inclusive and exclusive counts." + counts: DefaultDict[str, int] = collections.defaultdict(int) + for c, fn in self: + counts[fn] += c + + for c, fn in second: + counts[fn] += merge_fn(c) + + return self._from_dict(counts, self.inclusive) + + @staticmethod + def _from_dict(counts: Dict[str, int], inclusive: bool) -> "FunctionCounts": + flat_counts = (FunctionCount(c, fn) for fn, c in counts.items() if c) + return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive) + + +@dataclasses.dataclass(repr=False, eq=False, frozen=True) +class CallgrindStats: + """Top level container for Callgrind results collected by Timer. + + Manipulation is generally done using the FunctionCounts class, which is + obtained by calling `CallgrindStats.stats(...)`. Several convenience + methods are provided as well; the most significant is + `CallgrindStats.as_standardized()`. + """ + task_spec: common.TaskSpec + number_per_run: int + built_with_debug_symbols: bool + baseline_inclusive_stats: FunctionCounts + baseline_exclusive_stats: FunctionCounts + stmt_inclusive_stats: FunctionCounts + stmt_exclusive_stats: FunctionCounts + stmt_callgrind_out: Optional[str] + + def __repr__(self) -> str: + newline = "\n" # `\` cannot appear in fstring code section. + base_stats = self.baseline_exclusive_stats + output = f""" +{super().__repr__()} +{self.task_spec.summarize()} + {'':>25}All{'':>10}Noisy symbols removed + Instructions: {self.counts(denoise=False):>12}{'':>15}{self.counts(denoise=True):>12} + Baseline: {base_stats.sum():>12}{'':>15}{base_stats.denoise().sum():>12} +{self.number_per_run} runs per measurement, {self.task_spec.num_threads} thread{'s' if self.task_spec.num_threads > 1 else ''} +""".strip() + if not self.built_with_debug_symbols: + output += textwrap.dedent(""" + Warning: PyTorch was not built with debug symbols. + Source information may be limited. Rebuild with + REL_WITH_DEB_INFO=1 for more detailed results.""") + return output + + def stats(self, inclusive: bool = False) -> FunctionCounts: + """Returns detailed function counts. + + Conceptually, the FunctionCounts returned can be thought of as a tuple + of (count, path_and_function_name) tuples. + + `inclusive` matches the semantics of callgrind. If True, the counts + include instructions executed by children. `inclusive=True` is useful + for identifying hot spots in code; `inclusive=False` is useful for + reducing noise when diffing counts from two different runs. (See + CallgrindStats.delta(...) for more details) + """ + return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats + + def counts(self, *, denoise: bool = False) -> int: + """Returns the total number of instructions executed. + + See `FunctionCounts.denoise()` for an explanation of the `denoise` arg. + """ + stats = self.stmt_exclusive_stats + return (stats.denoise() if denoise else stats).sum() + + # FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563 + def delta( + self, + other: "CallgrindStats", + inclusive: bool = False, + ) -> FunctionCounts: + """Diff two sets of counts. + + One common reason to collect instruction counts is to determine the + the effect that a particular change will have on the number of instructions + needed to perform some unit of work. If a change increases that number, the + next logical question is "why". This generally involves looking at what part + if the code increased in instruction count. This function automates that + process so that one can easily diff counts on both an inclusive and + exclusive basis. + """ + return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive) + + def as_standardized(self) -> "CallgrindStats": + """Strip library names and some prefixes from function strings. + + When comparing two different sets of instruction counts, on stumbling + block can be path prefixes. Callgrind includes the full filepath + when reporting a function (as it should). However, this can cause + issues when diffing profiles. If a key component such as Python + or PyTorch was built in separate locations in the two profiles, which + can result in something resembling:: + + 23234231 /tmp/first_build_dir/thing.c:foo(...) + 9823794 /tmp/first_build_dir/thing.c:bar(...) + ... + 53453 .../aten/src/Aten/...:function_that_actually_changed(...) + ... + -9823794 /tmp/second_build_dir/thing.c:bar(...) + -23234231 /tmp/second_build_dir/thing.c:foo(...) + + Stripping prefixes can ameliorate this issue by regularizing the + strings and causing better cancellation of equivalent call sites + when diffing. + """ + def strip(stats: FunctionCounts) -> FunctionCounts: + transforms = ( + # PyTorch may have been built in different locations. + (r"^.+build/\.\./", "build/../"), + (r"^.+/" + re.escape("build/aten/"), "build/aten/"), + + # "Python" and "Objects" come from CPython. + (r"^.+/" + re.escape("Python/"), "Python/"), + (r"^.+/" + re.escape("Objects/"), "Objects/"), + + # Strip library name. e.g. `libtorch.so` + (r"\s\[.+\]$", ""), + ) + + for before, after in transforms: + stats = stats.transform(lambda fn: re.sub(before, after, fn)) + + return stats + + return CallgrindStats( + task_spec=self.task_spec, + number_per_run=self.number_per_run, + built_with_debug_symbols=self.built_with_debug_symbols, + baseline_inclusive_stats=strip(self.baseline_inclusive_stats), + baseline_exclusive_stats=strip(self.baseline_exclusive_stats), + stmt_inclusive_stats=strip(self.stmt_inclusive_stats), + stmt_exclusive_stats=strip(self.stmt_exclusive_stats), + + # `as_standardized` will change symbol names, so the contents will + # no longer map directly to `callgrind.out` + stmt_callgrind_out=None, + ) + + +class Serialization(enum.Enum): + PICKLE = 0 + TORCH = 1 + TORCH_JIT = 2 + + +_GLOBALS_ALLOWED_TYPES: Dict[Serialization, Tuple[Any, ...]] = { + Serialization.PICKLE: (str, bytes, bool, int, float, complex), + Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule), + Serialization.TORCH: (torch.nn.Module,), +} + + +class CopyIfCallgrind: + """Signal that a global may be replaced with a deserialized copy. + + See `GlobalsBridge` for why this matters. + """ + def __init__(self, value: Any, *, setup: Optional[str] = None): + for method, supported_types in _GLOBALS_ALLOWED_TYPES.items(): + if any(isinstance(value, t) for t in supported_types): + self._value: Any = value + self._setup: Optional[str] = setup + self._serialization: Serialization = method + break + else: + supported_str = "\n".join([ + getattr(t, "__name__", repr(t)) + for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())]) + + raise ValueError( + f"Unsupported type: {type(value)}\n" + f"`collect_callgrind` restricts globals to the following types:\n" + f"{textwrap.indent(supported_str, ' ')}" + ) + + @property + def value(self) -> Any: + return self._value + + @property + def setup(self) -> Optional[str]: + return self._setup + + @property + def serialization(self) -> Serialization: + return self._serialization + + @staticmethod + def unwrap_all(globals: Dict[str, Any]) -> Dict[str, Any]: + return { + k: (v.value if isinstance(v, CopyIfCallgrind) else v) + for k, v in globals.items() + } + + +class GlobalsBridge: + """Handle the transfer of (certain) globals when collecting Callgrind statistics. + + Key takeaway: Any globals passed must be wrapped in `CopyIfCallgrind` to + work with `Timer.collect_callgrind`. + + Consider the following code snippet: + ``` + import pickle + import timeit + + class Counter: + value = 0 + + def __call__(self): + self.value += 1 + + counter = Counter() + timeit.Timer("counter()", globals={"counter": counter}).timeit(10) + print(counter.value) # 10 + + timeit.Timer( + "counter()", + globals={"counter": pickle.loads(pickle.dumps(counter))} + ).timeit(20) + print(counter.value) # Still 10 + ``` + + In the first case, `stmt` is executed using the objects in `globals`; + however, the addition of serialization and deserialization changes the + semantics and may meaningfully change behavior. + + This is a practical consideration when collecting Callgrind statistics. + Unlike `exec` based execution (which `timeit` uses under the hood) which + can share in-memory data structures with the caller, Callgrind collection + requires an entirely new process in order to run under Valgrind. This means + that any data structures used for statement execution will have to be + serialized and deserialized in the subprocess. + + In order to avoid surprising semantics from (user invisible) process + boundaries, what can be passed through `globals` is severely restricted + for `Timer.collect_callgrind`. It is expected that most setup should be + achievable (albeit perhaps less ergonomically) by passing a `setup` + string. + + There are, however, exceptions. One such class are TorchScripted functions. + Because they require a concrete file with source code it is not possible + to define them using a `setup` string. Another group are torch.nn.Modules, + whose construction can be complex and prohibitively cumbersome to coerce + into a `setup` string. Finally, most builtin types are sufficiently well + behaved and sufficiently common to warrant allowing as well. (e.g. + `globals={"n": 1}` is very convenient.) + + Fortunately, all have well defined serialization semantics. This class + is responsible for enabling the Valgrind subprocess to use elements in + `globals` so long as they are an allowed type. + + Caveats: + The user is required to acknowledge this serialization by wrapping + elements in `globals` with `CopyIfCallgrind`. + + While ScriptFunction and ScriptModule are expected to save and load + quite robustly, it is up to the user to ensure that an nn.Module can + un-pickle successfully. + + `torch.Tensor` and `np.ndarray` are deliberately excluded. The + serialization/deserialization process perturbs the representation of a + tensor in ways that could result in incorrect measurements. For example, + if a tensor lives in pinned CPU memory, this fact would not be preserved + by a dump, and that will in turn change the performance of certain CUDA + operations. + """ + + def __init__(self, globals: Dict[str, Any], data_dir: str) -> None: + self._globals: Dict[str, CopyIfCallgrind] = {} + self._data_dir = data_dir + if not os.path.exists(data_dir): + os.mkdir(data_dir) + + if globals.get("torch", torch) is not torch: + raise ValueError("`collect_callgrind` does not support mocking out `torch`.") + + for name, value in globals.items(): + if name in ("torch", "__builtins__"): + # Torch will be imported by the collection script, and + # __builtins__ is added by Timer. + continue + + if not isinstance(value, CopyIfCallgrind): + raise ValueError( + "`collect_callgrind` requires that globals be wrapped in " + "`CopyIfCallgrind` so that serialization is explicit." + ) + + self._globals[name] = value + + def construct(self) -> str: + load_lines = [] + for name, wrapped_value in self._globals.items(): + if wrapped_value.setup is not None: + load_lines.append(textwrap.dedent(wrapped_value.setup)) + + if wrapped_value.serialization == Serialization.PICKLE: + path = os.path.join(self._data_dir, f"{name}.pkl") + load_lines.append( + f"with open({repr(path)}, 'rb') as f:\n {name} = pickle.load(f)") + with open(path, "wb") as f: + pickle.dump(wrapped_value.value, f) + + elif wrapped_value.serialization == Serialization.TORCH: + path = os.path.join(self._data_dir, f"{name}.pt") + load_lines.append(f"{name} = torch.load({repr(path)})") + torch.save(wrapped_value.value, path) + + elif wrapped_value.serialization == Serialization.TORCH_JIT: + path = os.path.join(self._data_dir, f"{name}.pt") + load_lines.append(f"{name} = torch.jit.load({repr(path)})") + with open(path, "wb") as f: + torch.jit.save(wrapped_value.value, f) + + else: + raise NotImplementedError( + f"Unknown serialization method: {wrapped_value.serialization}") + + return "\n".join(load_lines) + + +class _ValgrindWrapper: + def __init__(self) -> None: + self._bindings_module: Optional[CallgrindModuleType] = None + valgrind_symbols = ( + "_valgrind_supported_platform", + "_valgrind_toggle", + "_valgrind_toggle_and_dump_stats", + ) + if all(hasattr(torch._C, symbol) for symbol in valgrind_symbols): + self._supported_platform: bool = torch._C._valgrind_supported_platform() + + else: + print("Callgrind bindings are not present in `torch._C`. JIT-ing bindings.") + self._bindings_module = cpp_jit.get_compat_bindings() + assert all(hasattr(self._bindings_module, symbol) for symbol in valgrind_symbols) + self._supported_platform = self._bindings_module._valgrind_supported_platform() + + self._commands_available: Dict[str, bool] = {} + if self._supported_platform: + # Only bother checking on supported platforms. + for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"): + self._commands_available[cmd] = not subprocess.run( + ["which", cmd], + capture_output=True, + check=False, + ).returncode + + self._build_type: Optional[str] = None + build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show()) + if build_search is not None: + self._build_type = build_search.groups()[0].split(",")[0] + + def _validate(self) -> None: + if not self._supported_platform: + raise OSError("Valgrind is not supported on this platform.") + + missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available] + if missing_cmds: + raise OSError("Missing: " + ", ".join(missing_cmds)) + + def collect_callgrind( + self, + task_spec: common.TaskSpec, + globals: Dict[str, Any], + *, + number: int, + repeats: int, + collect_baseline: bool, + is_python: bool, + retain_out_file: bool, + ) -> Tuple[CallgrindStats, ...]: + """Collect stats, and attach a reference run which can be used to filter interpreter overhead.""" + self._validate() + assert is_python or not collect_baseline + + *task_stats, baseline_stats = self._invoke( + task_spec=task_spec, + globals=globals, + number=number, + repeats=repeats, + collect_baseline=collect_baseline, + is_python=is_python, + retain_out_file=retain_out_file, + ) + assert len(task_stats) == repeats + + return tuple( + CallgrindStats( + task_spec=task_spec, + number_per_run=number, + built_with_debug_symbols=self._build_type == "RelWithDebInfo", + baseline_inclusive_stats=baseline_stats[0], + baseline_exclusive_stats=baseline_stats[1], + stmt_inclusive_stats=stmt_inclusive_stats, + stmt_exclusive_stats=stmt_exclusive_stats, + stmt_callgrind_out=out_contents, + ) + for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats + ) + + def _invoke( + self, + *, + task_spec: common.TaskSpec, + globals: Dict[str, Any], + number: int, + repeats: int, + collect_baseline: bool, + is_python: bool, + retain_out_file: bool, + ) -> Tuple[Tuple[FunctionCounts, FunctionCounts, Optional[str]], ...]: + """Core invocation method for Callgrind collection. + + Valgrind operates by effectively replacing the CPU with an emulated + version which allows it to instrument any code at the cost of severe + performance degradation. This has the practical effect that in order + to collect Callgrind statistics, a new process has to be created + running under `valgrind`. The steps for this process are: + + 1) Create a scratch directory. + 2) Codegen a run script. (_ValgrindWrapper._construct_script) + Inside the run script: + * Validate that Python and torch match the parent process + * Validate that it is indeed running under valgrind + * Execute `setup` and warm up `stmt` + * Begin collecting stats + * Run the `stmt` loop + * Stop collecting stats + 3) Parse the run results. + 4) Cleanup the scratch directory. + """ + working_dir = common._make_temp_dir(prefix="callgrind") + data_dir = os.path.join(working_dir, "data") + script_file = os.path.join(working_dir, "timer_callgrind.py") + callgrind_out = os.path.join(working_dir, "callgrind.out") + error_log = os.path.join(working_dir, "error.txt") + stat_log = os.path.join(working_dir, "callgrind_stat.txt") + stdout_stderr_log = os.path.join(working_dir, "stdout_stderr.log") + + def run(args: List[str], **kwargs: Any) -> Tuple[CompletedProcessType, str]: + # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ + f_stdout_stderr = open(stdout_stderr_log, "wb") + try: + invocation = subprocess.run( + args, + stdout=f_stdout_stderr, + stderr=subprocess.STDOUT, + **kwargs, + ) + with open(stdout_stderr_log) as f: + return invocation, f.read() + finally: + f_stdout_stderr.close() + + try: + if is_python: + if self._bindings_module is not None: + shutil.copy( + self._bindings_module.__file__, + os.path.join(working_dir, os.path.split(self._bindings_module.__file__)[1]) + ) + + script_file = os.path.join(working_dir, "timer_callgrind.py") + with open(script_file, "w") as f: + f.write(self._construct_script( + task_spec, + globals=GlobalsBridge(globals, data_dir), + number=number, + repeats=repeats, + collect_baseline=collect_baseline, + error_log=error_log, + stat_log=stat_log, + bindings=self._bindings_module)) + + run_loop_cmd = ["python", script_file] + else: + assert not collect_baseline + run_loop_exec = cpp_jit.compile_callgrind_template( + stmt=task_spec.stmt, + setup=task_spec.setup, + global_setup=task_spec.global_setup, + ) + run_loop_cmd = [ + run_loop_exec, + "--number", str(number), + "--number-warmup", str(min(number, 10)), + "--repeats", str(repeats), + "--number-threads", str(task_spec.num_threads), + ] + + valgrind_invocation, valgrind_invocation_output = run([ + "valgrind", + "--tool=callgrind", + f"--callgrind-out-file={callgrind_out}", + "--dump-line=yes", + "--dump-instr=yes", + "--instr-atstart=yes", + "--collect-atstart=no", + ] + run_loop_cmd) + + if valgrind_invocation.returncode: + error_report = "" + if os.path.exists(error_log): + with open(error_log) as f: + error_report = f.read() + if not error_report: + error_report = "Unknown error.\n" + valgrind_invocation_output + + raise OSError(f"Failed to collect callgrind profile:\n{error_report}") + + def parse_output(fpath: str, inclusive: bool) -> FunctionCounts: + annotate_invocation, annotate_invocation_output = run([ + "callgrind_annotate", + f"--inclusive={'yes' if inclusive else 'no'}", + "--threshold=100", + "--show-percs=no", + fpath + ], check=True) + + total_pattern = re.compile(r"^([0-9,]+)\s+PROGRAM TOTALS") + begin_pattern = re.compile(r"Ir\s+file:function") + function_pattern = re.compile(r"^\s*([0-9,]+)\s+(.+:.+)$") + + class ScanState(enum.Enum): + SCANNING_FOR_TOTAL = 0 + SCANNING_FOR_START = 1 + PARSING = 2 + + scan_state = ScanState.SCANNING_FOR_TOTAL + fn_counts = [] + for l in annotate_invocation_output.splitlines(keepends=False): + if scan_state == ScanState.SCANNING_FOR_TOTAL: + total_match = total_pattern.match(l) + if total_match: + program_totals = int(total_match.groups()[0].replace(",", "")) + scan_state = ScanState.SCANNING_FOR_START + + elif scan_state == ScanState.SCANNING_FOR_START: + if begin_pattern.match(l): + scan_state = ScanState.PARSING + + else: + assert scan_state == ScanState.PARSING + fn_match = function_pattern.match(l) + if fn_match: + ir_str, file_function = fn_match.groups() + ir = int(ir_str.replace(",", "")) + if ir == program_totals: + # Callgrind includes some top level red herring symbols when + # a program dumps multiple profiles. + continue + fn_counts.append(FunctionCount(ir, file_function)) + + elif re.match(r"-+", l): + # Ignore heading separator lines. + continue + + else: + break + + assert scan_state == ScanState.PARSING, f"Failed to parse {fpath}" + return FunctionCounts(tuple(sorted(fn_counts, reverse=True)), inclusive=inclusive) + + def read_results(i: int) -> Tuple[FunctionCounts, FunctionCounts, Optional[str]]: + if i == repeats and not collect_baseline: + # Null baseline. + return ( + FunctionCounts((), inclusive=True), + FunctionCounts((), inclusive=False), + None, + ) + + fpath = f"{callgrind_out}.{i + 1}" # Callgrind one-indexes files. + callgrind_out_contents: Optional[str] = None + if retain_out_file: + with open(fpath) as f: + callgrind_out_contents = f.read() + + return ( + parse_output(fpath, inclusive=True), + parse_output(fpath, inclusive=False), + callgrind_out_contents + ) + + return tuple(read_results(i) for i in range(repeats + 1)) + finally: + shutil.rmtree(working_dir) + + @staticmethod + def _construct_script( + task_spec: common.TaskSpec, + globals: GlobalsBridge, + *, + number: int, + repeats: int, + collect_baseline: bool, + error_log: str, + stat_log: str, + bindings: Optional[CallgrindModuleType], + ) -> str: + def block_stmt(stmt: str, indent: int = 0) -> str: + """Partially unroll benchmark loop. + + The naive template looks something like: + "for _ in range({number}): {stmt}" + + However a loop in Python is surprisingly expensive, and significantly + increases the number of background Python instructions. So instead we + partially unroll the loops, with a block size of 100 chosen to keep + the instruction overhead from `range` low while also not ballooning + the size of the generated file. + """ + block_size = 100 + loop_count = number // block_size + if loop_count == 1: + # There is no point in having `for _ in range(1): ...` rather + # than just `...`, and this lets us save shave a few background + # instructions. + loop_count = 0 + remainder = number - block_size * loop_count + blocked_stmt = "" + + if loop_count: + unrolled_stmts = textwrap.indent("\n".join([stmt] * block_size), " " * 4) + blocked_stmt += f"for _ in range({loop_count}):\n{unrolled_stmts}\n" + + if remainder: + blocked_stmt += "\n".join([stmt] * remainder) + + return textwrap.indent(blocked_stmt, " " * indent) + + pass_baseline = ( + "callgrind_bindings._valgrind_toggle()\n" + f"{block_stmt('pass')}\n" + "callgrind_bindings._valgrind_toggle_and_dump_stats()" + ) + + return textwrap.dedent(r""" + import gc + import os + import pickle + import subprocess + import sys + import time + + # Mitigate https://github.com/pytorch/pytorch/issues/37377 + # which can sometimes cause the subprocess call to fail. + import numpy as np + + import torch + torch.set_num_threads({num_threads}) + + {bindings_import} + + PID = os.getpid() + + def log_failure(msg): + with open({error_log_repr}, "wt") as f: + f.write(msg) + sys.exit(1) + + def check_result(completed_process): + if completed_process.returncode: + log_failure(f"Command failed: {{' '.join(completed_process.args)}}") + return completed_process + + # ============================================================================= + # == Check that subprocess matches parent ===================================== + # ============================================================================= + if os.path.realpath(sys.executable) != "{parent_interpreter}": + log_failure( + "Interpreter mismatch:\n" + f" {{os.path.realpath(sys.executable)}}\n vs.\n {parent_interpreter}" + ) + + if torch.__file__ != "{torch_file}": + log_failure( + "PyTorch does not match expected file:\n" + f" {{torch.__file__}}\n vs.\n {torch_file}" + ) + + # ============================================================================= + # == User specified setup ===================================================== + # ============================================================================= + # Load serialized globals + {load_globals} + + # User setup str + {setup} + + for _ in range({warmup_number}): + {indented_stmt} + + # ============================================================================= + # == Callgrind management ===================================================== + # ============================================================================= + with open("{stat_log}", "wb") as stat_file: + # If many instances of callgrind are running at once, the output of + # `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE` + # to deadlock. So instead we use a file. + callgrind_stat = check_result(subprocess.run( + ["callgrind_control", "--stat"], + stdout=stat_file, + stderr=subprocess.STDOUT, + )) + + with open("{stat_log}", "rt") as stat_file: + stat_lines = stat_file.read().splitlines() + + if f"PID {{PID}}: python {{__file__}}" not in stat_lines: + log_failure("Process does not appear to be running callgrind.") + + gc.collect() + time.sleep(0.01) + + # ============================================================================= + # == User code block ========================================================== + # ============================================================================= + for _ in range({repeats}): + callgrind_bindings._valgrind_toggle() + {blocked_stmt} + callgrind_bindings._valgrind_toggle_and_dump_stats() + gc.collect() + + {baseline} + """).strip().format( + indented_stmt=textwrap.indent(task_spec.stmt, " " * 4), + blocked_stmt=block_stmt(task_spec.stmt, indent=4), + baseline=(pass_baseline if collect_baseline else ""), + number=number, + repeats=repeats, + load_globals=globals.construct(), + setup=task_spec.setup, + warmup_number=min(number, 10), + num_threads=task_spec.num_threads, + error_log_repr=repr(error_log), + stat_log=stat_log, + parent_interpreter=os.path.realpath(sys.executable), + torch_file=torch.__file__, + bindings_import=( + "import torch._C as callgrind_bindings" if bindings is None + else f"import {bindings.__name__} as callgrind_bindings"), + ) + + +CALLGRIND_SINGLETON: Optional[_ValgrindWrapper] = None +def wrapper_singleton() -> _ValgrindWrapper: + global CALLGRIND_SINGLETON + if CALLGRIND_SINGLETON is None: + CALLGRIND_SINGLETON = _ValgrindWrapper() + return CALLGRIND_SINGLETON diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccca2413fcd1b03136c818abff5c6cf2b5953dfd Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58f3ace6c03d093337c9fa417ccbe8bc267b6c69 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__init__.py @@ -0,0 +1 @@ +from .version import __version__ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fbc4becc7064a071a799e83eb71d5a8b6df7739 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad8794464578ae8de63b2c4f0c3ffb3ca27e406 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed47e2b561b0c9a3e07f9156ce4898621481d420 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a60a417e510ee747ba3b4cffecc3c9baa13f6de Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/constants.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fb56e7a77a3ed264905c92b7ae55009be738267b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/constants.py @@ -0,0 +1,62 @@ +"""Constants for annotations in the mapping. + +The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py. +They are based on +https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h +and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported +mapping. +""" + +CONV_VERSION = 0, +CONV_INIT = 1 +CONV_DEVICE = 2 +CONV_MEM = 3 +CONV_KERN = 4 +CONV_COORD_FUNC = 5 +CONV_MATH_FUNC = 6 +CONV_DEVICE_FUNC = 7 +CONV_SPECIAL_FUNC = 8 +CONV_STREAM = 9 +CONV_EVENT = 10 +CONV_OCCUPANCY = 11 +CONV_CONTEXT = 12 +CONV_PEER = 13 +CONV_MODULE = 14 +CONV_CACHE = 15 +CONV_EXEC = 16 +CONV_ERROR = 17 +CONV_DEF = 18 +CONV_TEX = 19 +CONV_GL = 20 +CONV_GRAPHICS = 21 +CONV_SURFACE = 22 +CONV_JIT = 23 +CONV_D3D9 = 24 +CONV_D3D10 = 25 +CONV_D3D11 = 26 +CONV_VDPAU = 27 +CONV_EGL = 28 +CONV_THREAD = 29 +CONV_OTHER = 30 +CONV_INCLUDE = 31 +CONV_INCLUDE_CUDA_MAIN_H = 32 +CONV_TYPE = 33 +CONV_LITERAL = 34 +CONV_NUMERIC_LITERAL = 35 +CONV_LAST = 36 + +API_DRIVER = 37 +API_RUNTIME = 38 +API_BLAS = 39 +API_SPECIAL = 40 +API_RAND = 41 +API_LAST = 42 +API_FFT = 43 +API_RTC = 44 +API_ROCTX = 45 + +HIP_UNSUPPORTED = 46 +API_PYTORCH = 1337 +API_CAFFE2 = 1338 +API_C10 = 1339 +API_ROCMSMI = 1340 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..fa727a7c078c8625c2b8d115a1b70d7f85dd3396 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py @@ -0,0 +1,8639 @@ +import collections +import os +import re +import subprocess + +from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT, + API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME, + API_SPECIAL, API_ROCMSMI, CONV_CACHE, CONV_CONTEXT, CONV_D3D9, + CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE, + CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT, + CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE, + CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT, + CONV_MATH_FUNC, CONV_MEM, CONV_MODULE, + CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER, + CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM, + CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE, + CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED) + +""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents +This closely follows the implementation in hipify-clang +https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp +and its structure. +There are different maps for fundamental names, include files, identifies, sparse, and +PyTorch specific translations. +Each of the entries in these maps translates a CUDA string to a tuple containing the +ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not +supported in ROCm/HIP yet. +""" + +# We need to know the ROCm version so we can conditionalize some of the mappings later. +# As of ROCm 5.0, the version is found in rocm_version.h header file under /opt/rocm/include. +rocm_path = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') or "/opt/rocm" +try: + rocm_path = subprocess.check_output(["hipconfig", "--rocmpath"]).decode("utf-8") +except subprocess.CalledProcessError: + print(f"Warning: hipconfig --rocmpath failed, assuming {rocm_path}") +except (FileNotFoundError, PermissionError, NotADirectoryError): + # Do not print warning. This is okay. This file can also be imported for non-ROCm builds. + pass + +rocm_version = (0, 0, 0) +rocm_version_h = f"{rocm_path}/include/rocm-core/rocm_version.h" +if not os.path.isfile(rocm_version_h): + rocm_version_h = f"{rocm_path}/include/rocm_version.h" + +# The file could be missing due to 1) ROCm version < 5.2, or 2) no ROCm install. +if os.path.isfile(rocm_version_h): + RE_MAJOR = re.compile(r"#define\s+ROCM_VERSION_MAJOR\s+(\d+)") + RE_MINOR = re.compile(r"#define\s+ROCM_VERSION_MINOR\s+(\d+)") + RE_PATCH = re.compile(r"#define\s+ROCM_VERSION_PATCH\s+(\d+)") + major, minor, patch = 0, 0, 0 + for line in open(rocm_version_h): + match = RE_MAJOR.search(line) + if match: + major = int(match.group(1)) + match = RE_MINOR.search(line) + if match: + minor = int(match.group(1)) + match = RE_PATCH.search(line) + if match: + patch = int(match.group(1)) + rocm_version = (major, minor, patch) + +# List of math functions that should be replaced inside device code only. +MATH_TRANSPILATIONS = collections.OrderedDict( + [ + ("std::max", ("::max")), + ("std::min", ("::min")), + ("std::ceil", ("::ceil")), + ("std::floor", ("::floor")), + ("std::exp", ("::exp")), + ("std::log", ("::log")), + ("std::pow", ("::pow")), + ("std::fabs", ("::fabs")), + ("std::fmod", ("::fmod")), + ("std::remainder", ("::remainder")), + ("std::frexp", ("::frexp")), + ] +) + +CUDA_TYPE_NAME_MAP = collections.OrderedDict( + [ + ("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)), + ("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)), + ("cudaError", ("hipError_t", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ARRAY3D_DESCRIPTOR", + ("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)), + ("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)), + ("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUDA_MEMCPY3D_PEER", + ("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS", + ( + "HIP_POINTER_ATTRIBUTE_P2P_TOKENS", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_RESOURCE_DESC", + ("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_RESOURCE_VIEW_DESC", + ("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUipcEventHandle", + ("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUarray_cubemap_face", + ("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)), + ("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUmem_range_attribute", + ("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)), + ("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), + ("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), + ("CUpointer_attribute", ("hipPointer_attribute", CONV_TYPE, API_DRIVER)), + ("CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL", ("HIP_POINTER_ATTRIBUTE_DEVICE_ORDINAL", CONV_TYPE, API_DRIVER)), + ("CU_POINTER_ATTRIBUTE_BUFFER_ID", ("HIP_POINTER_ATTRIBUTE_BUFFER_ID", CONV_TYPE, API_DRIVER)), + ("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)), + ("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)), + ("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)), + ("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), + ("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), + ("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)), + ( + "CUgraphicsResource", + ("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUmipmappedArray", + ("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUfunction_attribute", + ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUfunction_attribute_enum", + ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsMapResourceFlags", + ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsMapResourceFlags_enum", + ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsRegisterFlags", + ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsRegisterFlags_enum", + ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUoccupancy_flags", + ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUoccupancy_flags_enum", + ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)), + ("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)), + ("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUipcMem_flags_enum", + ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjit_cacheMode_enum", + ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjit_fallback_enum", + ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)), + ("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)), + ("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjitInputType_enum", + ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)), + ("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)), + ( + "CUmemAttach_flags", + ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUmemAttach_flags_enum", + ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUresourcetype_enum", + ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), + ("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), + ("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), + ("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), + ("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)), + ("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)), + ("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)), + ("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)), + ("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)), + ("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUsurfref", + ("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)), + ("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)), + ("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)), + ( + "CUstreamWaitValue_flags", + ("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUstreamWriteValue_flags", + ("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUstreamBatchMemOpType", + ("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUdevice_P2PAttribute", + ("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)), + ("CUevent_st", ("ihipEvent_t", CONV_TYPE, API_DRIVER)), + ("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)), + ("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUd3d9DeviceList", + ("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d9map_flags", + ("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d9register_flags", + ("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10DeviceList", + ("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10map_flags", + ("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10register_flags", + ("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d11DeviceList", + ("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUeglStreamConnection_st", + ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUeglStreamConnection", + ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "libraryPropertyType_t", + ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "libraryPropertyType", + ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)), + ("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)), + ("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)), + ("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)), + ("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)), + ( + "cudaMipmappedArray_const_t", + ("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME), + ), + ("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)), + ("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)), + ( + "cudaArraySurfaceLoadStore", + ("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME), + ), + ("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)), + ("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)), + ("cudaMemoryAdvise", ("hipMemoryAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemRangeAttribute", + ("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)), + ("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)), + ("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)), + ("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)), + ("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)), + ("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)), + ("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)), + ("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceP2PAttr", + ("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeMode", + ("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncAttributes", + ("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)), + ("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)), + ("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)), + ("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)), + ("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)), + ("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)), + ("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)), + ( + "surfaceReference", + ("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)), + ("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)), + ("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)), + ("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)), + ( + "cudaSurfaceBoundaryMode", + ("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSurfaceFormatMode", + ("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)), + ("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)), + ("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)), + ("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)), + ( + "cudaTextureType1DLayered", + ("hipTextureType1DLayered", CONV_TEX, API_RUNTIME), + ), + ( + "cudaTextureType2DLayered", + ("hipTextureType2DLayered", CONV_TEX, API_RUNTIME), + ), + ( + "cudaTextureTypeCubemapLayered", + ("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME), + ), + ("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), + ( + "cudaGraphicsCubeFace", + ("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapFlags", + ("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsRegisterFlags", + ("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceList", + ("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaD3D9DeviceList", + ("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapFlags", + ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterFlags", + ("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceList", + ("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10MapFlags", + ("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterFlags", + ("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceList", + ("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEglStreamConnection", + ("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cublasHandle_t", ("hipblasHandle_t", CONV_TYPE, API_BLAS)), + ("cublasOperation_t", ("hipblasOperation_t", CONV_TYPE, API_BLAS)), + ("cublasStatus_t", ("hipblasStatus_t", CONV_TYPE, API_BLAS)), + ("cublasFillMode_t", ("hipblasFillMode_t", CONV_TYPE, API_BLAS)), + ("cublasDiagType_t", ("hipblasDiagType_t", CONV_TYPE, API_BLAS)), + ("cublasSideMode_t", ("hipblasSideMode_t", CONV_TYPE, API_BLAS)), + ("cublasPointerMode_t", ("hipblasPointerMode_t", CONV_TYPE, API_BLAS)), + ( + "cublasAtomicsMode_t", + ("hipblasAtomicsMode_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDataType_t", + ("hipblasDatatype_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), + ), + ("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)), + ("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)), + ("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)), + ("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)), + ("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)), + ("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)), + ( + "curandDirectionVectorSet", + ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDirectionVectorSet_t", + ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ( + "curandOrdering_t", + ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistribution_st", + ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2V_st", + ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistribution_t", + ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2V_t", + ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionShift_st", + ("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionShift_t", + ("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionM2Shift_st", + ("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionM2Shift_t", + ("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2_st", + ("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2_t", + ("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2K_st", + ("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2K_t", + ("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDiscreteDistribution_st", + ("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND), + ), + ( + "curandDiscreteDistribution_t", + ("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND), + ), + ("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ( + "curandDirectionVectors32_t", + ("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND), + ), + ( + "curandDirectionVectors64_t", + ("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), + ("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), + ( + "curandStateScrambledSobol64_t", + ("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandStateSobol64_t", + ("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandStateScrambledSobol32_t", + ("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)), + ("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)), + ( + "curandStatePhilox4_32_10_t", + ("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND), + ), + ("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)), + ("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)), + ("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)), + ("CUuuid", ("hipUUID", CONV_TYPE, API_RUNTIME)), + ("cudaGraph_t", ("hipGraph_t", CONV_TYPE, API_RAND)), + ("cudaGraphExec_t", ("hipGraphExec_t", CONV_TYPE, API_RAND)), + ] +) + +CUDA_INCLUDE_MAP = collections.OrderedDict( + [ + # since pytorch uses "\b{pattern}\b" as the actual re pattern, + # patterns listed here have to begin and end with alnum chars + ( + "include " to differentiate + ("", ("", CONV_INCLUDE, API_RUNTIME)), + ("nvrtc.h", ("hip/hiprtc.h", CONV_INCLUDE, API_RTC)), + ("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)), + ("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_run_length_encode.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_radix_sort.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_select.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("nvToolsExt.h", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)), + ("nvml.h", ("rocm_smi/rocm_smi.h", CONV_INCLUDE, API_ROCMSMI)), + ] +) + +CUDA_IDENTIFIER_MAP = collections.OrderedDict( + [ + ("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)), + ( + "CUDA_ERROR_INVALID_CONTEXT", + ("hipErrorInvalidContext", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_CONTEXT_ALREADY_CURRENT", + ("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_ARRAY_IS_MAPPED", + ("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_ALREADY_ACQUIRED", + ("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_NOT_MAPPED_AS_ARRAY", + ("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_NOT_MAPPED_AS_POINTER", + ("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_CONTEXT_ALREADY_IN_USE", + ("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)), + ("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)), + ("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", + ( + "hipErrorLaunchIncompatibleTexturing", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", + ("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_CONTEXT_IS_DESTROYED", + ("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NOT_PERMITTED", + ("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NOT_SUPPORTED", + ("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMissingConfiguration", + ("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorPriorLaunchFailure", + ("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidDeviceFunction", + ("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidConfiguration", + ("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidPitchValue", + ("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidSymbol", + ("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidHostPointer", + ("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidDevicePointer", + ("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaErrorInvalidTexture", + ("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidTextureBinding", + ("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidChannelDescriptor", + ( + "hipErrorInvalidChannelDescriptor", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorInvalidMemcpyDirection", + ("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorAddressOfConstant", + ("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTextureFetchFailed", + ("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTextureNotBound", + ("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSynchronizationError", + ("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidFilterSetting", + ("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidNormSetting", + ("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMixedDeviceExecution", + ("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNotYetImplemented", + ("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMemoryValueTooLarge", + ("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInsufficientDriver", + ("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSetOnActiveProcess", + ("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidSurface", + ("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateVariableName", + ("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateTextureName", + ("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateSurfaceName", + ("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDevicesUnavailable", + ("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorIncompatibleDriverContext", + ( + "hipErrorIncompatibleDriverContext", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorDeviceAlreadyInUse", + ("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchMaxDepthExceeded", + ("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFileScopedTex", + ("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFileScopedSurf", + ("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSyncDepthExceeded", + ("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchPendingCountExceeded", + ( + "hipErrorLaunchPendingCountExceeded", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorNotPermitted", + ("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNotSupported", + ("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorStartupFailure", + ("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorApiFailureBase", + ("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)), + ("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)), + ("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ERROR_OUT_OF_MEMORY", + ("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorMemoryAllocation", + ("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_NOT_INITIALIZED", + ("hipErrorNotInitialized", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInitializationError", + ("hipErrorInitializationError", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorCudartUnloading", + ("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_DISABLED", + ("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerDisabled", + ("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_NOT_INITIALIZED", + ("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerNotInitialized", + ("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_ALREADY_STARTED", + ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerAlreadyStarted", + ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_ALREADY_STOPPED", + ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerAlreadyStopped", + ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)), + ("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)), + ("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorInvalidKernelImage", + ("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorMapBufferObjectFailed", + ("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorUnmapBufferObjectFailed", + ("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NO_BINARY_FOR_GPU", + ("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorNoKernelImageForDevice", + ("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_ECC_UNCORRECTABLE", + ("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorECCUncorrectable", + ("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_UNSUPPORTED_LIMIT", + ("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorUnsupportedLimit", + ("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", + ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessUnsupported", + ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_PTX", + ("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidPtx", + ("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", + ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidGraphicsContext", + ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NVLINK_UNCORRECTABLE", + ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNvlinkUncorrectable", + ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", + ("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorSharedObjectSymbolNotFound", + ( + "hipErrorSharedObjectSymbolNotFound", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", + ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorSharedObjectInitFailed", + ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_OPERATING_SYSTEM", + ("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorOperatingSystem", + ("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_HANDLE", + ("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidResourceHandle", + ("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)), + ("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ERROR_ILLEGAL_ADDRESS", + ("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorIllegalAddress", + ("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", + ("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorLaunchOutOfResources", + ("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorLaunchTimeout", + ("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", + ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessAlreadyEnabled", + ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", + ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessNotEnabled", + ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_ASSERT", + ("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorAssert", + ("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_TOO_MANY_PEERS", + ("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTooManyPeers", + ("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", + ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorHostMemoryAlreadyRegistered", + ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", + ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorHostMemoryNotRegistered", + ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_HARDWARE_STACK_ERROR", + ("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorHardwareStackError", + ("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_ILLEGAL_INSTRUCTION", + ("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorIllegalInstruction", + ("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_MISALIGNED_ADDRESS", + ("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMisalignedAddress", + ("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_ADDRESS_SPACE", + ("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidAddressSpace", + ("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_PC", + ("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidPc", + ("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_LAUNCH_FAILED", + ("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFailure", + ("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_UNKNOWN", + ("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)), + ( + "CU_TR_ADDRESS_MODE_WRAP", + ("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_CLAMP", + ("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_MIRROR", + ("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_BORDER", + ("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_X", + ("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_X", + ("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_Y", + ("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_Y", + ("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_Z", + ("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_Z", + ("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT8", + ("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT16", + ("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT32", + ("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT8", + ("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT16", + ("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT32", + ("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER), + ), + ("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)), + ("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)), + ( + "CU_COMPUTEMODE_DEFAULT", + ("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_EXCLUSIVE", + ("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_PROHIBITED", + ("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_EXCLUSIVE_PROCESS", + ("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_SET_READ_MOSTLY", + ("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_UNSET_READ_MOSTLY", + ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_SET_PREFERRED_LOCATION", + ( + "hipMemAdviseSetPreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION", + ( + "hipMemAdviseUnsetPreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_ADVISE_SET_ACCESSED_BY", + ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_UNSET_ACCESSED_BY", + ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY", + ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION", + ( + "hipMemRangeAttributePreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY", + ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION", + ( + "hipMemRangeAttributeLastPrefetchLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_CTX_SCHED_AUTO", + ("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_SPIN", + ("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_YIELD", + ("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_BLOCKING_SYNC", + ("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_BLOCKING_SYNC", + ("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_MASK", + ("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_MAP_HOST", + ("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_LMEM_RESIZE_TO_MAX", + ("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_FLAGS_MASK", + ("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LAUNCH_PARAM_BUFFER_POINTER", + ("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER), + ), + ( + "CU_LAUNCH_PARAM_BUFFER_SIZE", + ("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER), + ), + ("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)), + ( + "CU_IPC_HANDLE_SIZE", + ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_DEVICEMAP", + ("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_PORTABLE", + ("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_WRITECOMBINED", + ("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_DEVICEMAP", + ("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_IOMEMORY", + ("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_PORTABLE", + ("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PARAM_TR_DEFAULT", + ("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_LEGACY", + ("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_PER_THREAD", + ("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSA_OVERRIDE_FORMAT", + ("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSF_NORMALIZED_COORDINATES", + ("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSF_READ_AS_INTEGER", + ("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUDA_ARRAY3D_2DARRAY", + ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_CUBEMAP", + ("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_DEPTH_TEXTURE", + ("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_LAYERED", + ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_SURFACE_LDST", + ("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_TEXTURE_GATHER", + ("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK", + ( + "hipDeviceAttributeMaxThreadsPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X", + ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y", + ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z", + ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X", + ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y", + ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z", + ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK", + ( + "hipDeviceAttributeMaxSharedMemoryPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK", + ( + "hipDeviceAttributeMaxSharedMemoryPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY", + ( + "hipDeviceAttributeTotalConstantMemory", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_WARP_SIZE", + ("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_PITCH", + ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK", + ( + "hipDeviceAttributeMaxRegistersPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK", + ( + "hipDeviceAttributeMaxRegistersPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CLOCK_RATE", + ("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT", + ( + "hipDeviceAttributeTextureAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GPU_OVERLAP", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT", + ( + "hipDeviceAttributeMultiprocessorCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT", + ( + "hipDeviceAttributeKernelExecTimeout", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_INTEGRATED", + ("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY", + ( + "hipDeviceAttributeCanMapHostMemory", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_MODE", + ("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH", + ( + "hipDeviceAttributeMaxTexture3DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT", + ( + "hipDeviceAttributeMaxTexture3DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH", + ( + "hipDeviceAttributeMaxTexture3DDepth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT", + ( + "hipDeviceAttributeSurfaceAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS", + ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_ECC_ENABLED", + ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_BUS_ID", + ("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID", + ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_TCC_DRIVER", + ("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE", + ( + "hipDeviceAttributeMemoryClockRate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH", + ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE", + ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR", + ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING", + ( + "hipDeviceAttributeUnifiedAddressing", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTexture1DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER", + ( + "hipDeviceAttributeCanTex2DGather", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DGatherWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DGatherHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DWidthAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DHeightAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DDepthAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID", + ("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT", + ( + "hipDeviceAttributeTexturePitchAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH", + ( + "hipDeviceAttributeMaxTextureCubemapWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH", + ( + "hipDeviceAttributeMaxSurface1DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH", + ( + "hipDeviceAttributeMaxSurface2DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT", + ( + "hipDeviceAttributeMaxSurface2DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH", + ( + "hipDeviceAttributeMaxSurface3DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT", + ( + "hipDeviceAttributeMaxSurface3DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH", + ( + "hipDeviceAttributeMaxSurface3DDepth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurface1DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurface1DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurface2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT", + ( + "hipDeviceAttributeMaxSurface2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurface2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH", + ( + "hipDeviceAttributeMaxSurfaceCubemapWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DLinearWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLinearWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLinearHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH", + ( + "hipDeviceAttributeMaxTexture2DLinearPitch", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DMipmappedWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DMipmappedHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR", + ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR", + ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DMipmappedWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED", + ( + "hipDeviceAttributeStreamPrioritiesSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED", + ( + "hipDeviceAttributeGlobalL1CacheSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED", + ( + "hipDeviceAttributeLocalL1CacheSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", + ( + "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", + CONV_TYPE, + API_DRIVER, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR", + ( + "hipDeviceAttributeMaxRegistersPerMultiprocessor", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY", + ("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD", + ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID", + ( + "hipDeviceAttributeMultiGpuBoardGroupId", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED", + ( + "hipDeviceAttributeHostNativeAtomicSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO", + ( + "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS", + ( + "hipDeviceAttributePageableMemoryAccess", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS", + ( + "hipDeviceAttributeConcurrentManagedAccess", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED", + ( + "hipDeviceAttributeComputePreemptionSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM", + ( + "hipDeviceAttributeCanUseHostPointerForRegisteredMem", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX", + ("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_CONTEXT", + ("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_MEMORY_TYPE", + ("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_DEVICE_POINTER", + ( + "hipPointerAttributeDevicePointer", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_POINTER_ATTRIBUTE_HOST_POINTER", + ("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_P2P_TOKENS", + ("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_SYNC_MEMOPS", + ("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_BUFFER_ID", + ("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_IS_MANAGED", + ("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", + ( + "hipFuncAttributeMaxThreadsPerBlocks", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", + ("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES", + ("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME), + ), + ( + "CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", + ("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", + ("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_NUM_REGS", + ("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_PTX_VERSION", + ("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_BINARY_VERSION", + ("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_CACHE_MODE_CA", + ("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX", + ("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE", + ("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY", + ("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + ("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_NONE", + ("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY", + ( + "hipGraphicsRegisterFlagsReadOnly", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD", + ( + "hipGraphicsRegisterFlagsWriteDiscard", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST", + ( + "hipGraphicsRegisterFlagsSurfaceLoadStore", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER", + ( + "hipGraphicsRegisterFlagsTextureGather", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_OCCUPANCY_DEFAULT", + ("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE", + ( + "hipOccupancyDisableCachingOverride", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_FUNC_CACHE_PREFER_NONE", + ("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER), + ), + ( + "CU_FUNC_CACHE_PREFER_SHARED", + ("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER), + ), + ("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)), + ( + "CU_FUNC_CACHE_PREFER_EQUAL", + ("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER), + ), + ( + "CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS", + ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)), + ( + "CU_JIT_CACHE_OPTION_NONE", + ("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_CACHE_OPTION_CG", + ("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_CACHE_OPTION_CA", + ("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PREFER_PTX", + ("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PREFER_BINARY", + ("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_THREADS_PER_BLOCK", + ("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)), + ("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES", + ("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_ERROR_LOG_BUFFER", + ("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES", + ("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_OPTIMIZATION_LEVEL", + ("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_TARGET_FROM_CUCONTEXT", + ("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_FALLBACK_STRATEGY", + ("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_GENERATE_DEBUG_INFO", + ("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_GENERATE_LINE_INFO", + ("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)), + ("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)), + ("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)), + ("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)), + ( + "CU_TARGET_COMPUTE_10", + ("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_11", + ("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_12", + ("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_13", + ("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_20", + ("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_21", + ("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_30", + ("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_32", + ("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_35", + ("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_37", + ("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_50", + ("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_52", + ("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_53", + ("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_60", + ("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_61", + ("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_62", + ("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_CUBIN", + ("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_PTX", + ("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_FATBINARY", + ("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_OBJECT", + ("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_LIBRARY", + ("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_NUM_INPUT_TYPES", + ("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_STACK_SIZE", + ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_PRINTF_FIFO_SIZE", + ("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_MALLOC_HEAP_SIZE", + ("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER), + ), + ( + "CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH", + ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT", + ( + "hipLimitDevRuntimePendingLaunchCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_LIMIT_STACK_SIZE", + ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_GLOBAL", + ("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_HOST", + ("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_SINGLE", + ("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_HOST", + ("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_DEVICE", + ("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_ARRAY", + ("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_UNIFIED", + ("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_ARRAY", + ("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY", + ("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_LINEAR", + ("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_PITCH2D", + ("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)), + ( + "CU_RES_VIEW_FORMAT_UINT_1X8", + ("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X8", + ("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X8", + ("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X8", + ("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X8", + ("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X8", + ("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_1X16", + ("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X16", + ("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X16", + ("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X16", + ("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X16", + ("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X16", + ("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_1X32", + ("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X32", + ("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X32", + ("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X32", + ("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X32", + ("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X32", + ("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_1X16", + ("hipResViewFormatHalf1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_2X16", + ("hipResViewFormatHalf2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_4X16", + ("hipResViewFormatHalf4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_1X32", + ("hipResViewFormatFloat1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_2X32", + ("hipResViewFormatFloat2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_4X32", + ("hipResViewFormatFloat4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC1", + ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC2", + ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC3", + ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC4", + ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC4", + ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC5", + ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC5", + ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC6H", + ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC6H", + ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC7", + ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE", + ("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE", + ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE", + ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER), + ), + ("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)), + ("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)), + ( + "CU_STREAM_WAIT_VALUE_GEQ", + ("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_EQ", + ("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_AND", + ("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_FLUSH", + ("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WRITE_VALUE_DEFAULT", + ("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER", + ( + "hipStreamWriteValueNoMemoryBarrier", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_STREAM_MEM_OP_WAIT_VALUE_32", + ("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_MEM_OP_WRITE_VALUE_32", + ("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES", + ( + "hipStreamBatchMemOpFlushRemoteWrites", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGetErrorName", + ("hipGetErrorName", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGetErrorString", + ("hipDrvGetErrorString", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuInit", ("hipInit", CONV_INIT, API_DRIVER)), + ("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)), + ("cuCtxCreate", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)), + ("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)), + ("cuCtxDestroy", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)), + ("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)), + ("cuDeviceGetUuid", ("hipDeviceGetUuid", CONV_CONTEXT, API_DRIVER)), + ( + "cuCtxGetLimit", + ("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuCtxGetSharedMemConfig", + ("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuCtxGetStreamPriorityRange", + ("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)), + ("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)), + ( + "cuCtxSetLimit", + ("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuCtxSetSharedMemConfig", + ("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER), + ), + ("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)), + ("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), + ("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), + ("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)), + ("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)), + ("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)), + ( + "cuDeviceGetP2PAttribute", + ("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuDevicePrimaryCtxGetState", + ("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxRelease", + ("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxReset", + ("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxRetain", + ("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxSetFlags", + ("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER), + ), + ("cuDeviceGet", ("hipDeviceGet", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)), + ("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)), + ( + "cuDeviceComputeCapability", + ("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER), + ), + ("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)), + ("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuLinkComplete", + ("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)), + ("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)), + ( + "cuModuleGetSurfRef", + ("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)), + ("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)), + ("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)), + ("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)), + ( + "cuModuleLoadFatBinary", + ("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)), + ( + "CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK", + ( + "hipDeviceP2PAttributePerformanceRank", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED", + ( + "hipDeviceP2PAttributeAccessSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED", + ( + "hipDeviceP2PAttributeNativeAtomicSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)), + ("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)), + ("cuEventDestroy", ("hipEventDestroy", CONV_EVENT, API_DRIVER)), + ("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)), + ("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)), + ("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)), + ("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)), + ("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)), + ("cuFuncSetAttribute", ("hipFuncSetAttribute", CONV_EVENT, API_DRIVER)), + ( + "cuFuncGetAttribute", + ("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)), + ( + "cuFuncSetSharedMemConfig", + ("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)), + ( + "cuFuncSetBlockShape", + ("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuFuncSetSharedSize", + ("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuLaunchGridAsync", + ("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuParamSetSize", + ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuParamSetSize", + ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + ( + "hipModuleOccupancyMaxActiveBlocksPerMultiprocessor", + CONV_OCCUPANCY, + API_DRIVER, + ), + ), + ( + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + ( + "hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + CONV_OCCUPANCY, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuOccupancyMaxPotentialBlockSize", + ("hipModuleOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER), + ), + ( + "cuOccupancyMaxPotentialBlockSizeWithFlags", + ( + "hipModuleOccupancyMaxPotentialBlockSizeWithFlags", + CONV_OCCUPANCY, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)), + ( + "cuStreamAttachMemAsync", + ("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamCreate", + ("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamCreateWithPriority", + ("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)), + ("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)), + ("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)), + ( + "cuStreamGetPriority", + ("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)), + ("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)), + ("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)), + ( + "cuStreamWaitValue32", + ("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamWriteValue32", + ("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamBatchMemOp", + ("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)), + ( + "cuArray3DGetDescriptor", + ("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuArrayGetDescriptor", + ("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcCloseMemHandle", + ("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcGetEventHandle", + ("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcGetMemHandle", + ("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcOpenEventHandle", + ("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcOpenMemHandle", + ("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)), + ("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemAllocManaged", + ("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemAllocPitch", + ("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpy2DAsync", + ("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy2DUnaligned", + ("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpy3DAsync", + ("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy3DPeer", + ("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy3DPeerAsync", + ("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpyAtoHAsync", + ("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)), + ("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpyHtoAAsync", + ("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)), + ("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)), + ( + "cuMemcpyPeerAsync", + ("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemFree", ("hipFree", CONV_MEM, API_DRIVER)), + ("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)), + ("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)), + ( + "cuMemGetAddressRange", + ("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)), + ("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)), + ( + "cuMemHostGetDevicePointer", + ("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemHostGetFlags", + ("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)), + ("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)), + ("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD16Async", + ("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D16Async", + ("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D32Async", + ("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D8Async", + ("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)), + ("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)), + ("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD8Async", + ("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayCreate", + ("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayDestroy", + ("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayGetLevel", + ("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemPrefetchAsync", + ("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemRangeGetAttribute", + ("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemRangeGetAttributes", + ("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuPointerGetAttribute", + ("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemGetAddressRange_v2", + ("hipMemGetAddressRange", CONV_MEM, API_DRIVER), + ), + ( + "cuPointerGetAttributes", + ("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuPointerSetAttribute", + ("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)), + ( + "CU_TR_FILTER_MODE_LINEAR", + ("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetAddress", + ("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetAddressMode", + ("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetArray", + ("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetBorderColor", + ("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFilterMode", + ("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFlags", + ("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFormat", + ("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMaxAnisotropy", + ("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapFilterMode", + ("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapLevelBias", + ("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapLevelClamp", + ("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmappedArray", + ("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetAddress", + ("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetAddress2D", + ("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)), + ("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)), + ( + "cuTexRefSetBorderColor", + ("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)), + ("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)), + ("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)), + ( + "cuTexRefSetMaxAnisotropy", + ("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapFilterMode", + ("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapLevelBias", + ("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapLevelClamp", + ("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmappedArray", + ("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuTexRefDestroy", + ("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfRefGetArray", + ("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfRefSetArray", + ("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectCreate", + ("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectDestroy", + ("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetResourceDesc", + ("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetResourceViewDesc", + ("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetTextureDesc", + ("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectCreate", + ("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectDestroy", + ("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectGetResourceDesc", + ("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsMapResources", + ("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsResourceGetMappedMipmappedArray", + ( + "hipGraphicsResourceGetMappedMipmappedArray", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsResourceGetMappedPointer", + ( + "hipGraphicsResourceGetMappedPointer", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsResourceSetMapFlags", + ( + "hipGraphicsResourceSetMapFlags", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsSubResourceGetMappedArray", + ( + "hipGraphicsSubResourceGetMappedArray", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsUnmapResources", + ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsUnregisterResource", + ( + "hipGraphicsUnregisterResource", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuProfilerInitialize", + ("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)), + ("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)), + ( + "CU_GL_DEVICE_LIST_ALL", + ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_DEVICE_LIST_CURRENT_FRAME", + ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_DEVICE_LIST_NEXT_FRAME", + ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CU_GL_MAP_RESOURCE_FLAGS_NONE", + ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + CONV_GL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + CONV_GL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuGLMapBufferObject", + ("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLMapBufferObjectAsync", + ("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLRegisterBufferObject", + ("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLSetBufferObjectMapFlags", + ("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnmapBufferObject", + ("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnmapBufferObjectAsync", + ("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnregisterBufferObject", + ("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_DEVICE_LIST_ALL", + ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_DEVICE_LIST_NEXT_FRAME", + ("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9CtxCreate", + ("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9CtxCreateOnDevice", + ("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDevice", + ("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDevices", + ("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDirect3DDevice", + ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D9RegisterResource", + ("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_NONE", + ("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_READONLY", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_REGISTER_FLAGS_NONE", + ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_REGISTER_FLAGS_ARRAY", + ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9MapResources", + ("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9RegisterResource", + ("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedArray", + ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedPitch", + ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedPointer", + ("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedSize", + ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetSurfaceDimensions", + ( + "hipD3D9ResourceGetSurfaceDimensions", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D9ResourceSetMapFlags", + ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9UnmapResources", + ("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9UnregisterResource", + ("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_DEVICE_LIST_ALL", + ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_DEVICE_LIST_NEXT_FRAME", + ( + "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D10GetDevice", + ("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10GetDevices", + ("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D10RegisterResource", + ( + "hipGraphicsD3D10RegisterResource", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_NONE", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_READONLY", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_REGISTER_FLAGS_NONE", + ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_REGISTER_FLAGS_ARRAY", + ("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10CtxCreate", + ("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10CtxCreateOnDevice", + ("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10GetDirect3DDevice", + ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10MapResources", + ("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10RegisterResource", + ("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedArray", + ("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedPitch", + ("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedPointer", + ( + "hipD3D10ResourceGetMappedPointer", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D10ResourceGetMappedSize", + ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetSurfaceDimensions", + ( + "hipD3D10ResourceGetSurfaceDimensions", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD310ResourceSetMapFlags", + ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10UnmapResources", + ("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10UnregisterResource", + ("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D11_DEVICE_LIST_ALL", + ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D11_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D11_DEVICE_LIST_NEXT_FRAME", + ( + "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D11CtxCreate", + ("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11CtxCreateOnDevice", + ("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11GetDirect3DDevice", + ("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsVDPAURegisterOutputSurface", + ( + "hipGraphicsVDPAURegisterOutputSurface", + CONV_VDPAU, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsVDPAURegisterVideoSurface", + ( + "hipGraphicsVDPAURegisterVideoSurface", + CONV_VDPAU, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuVDPAUGetDevice", + ("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuVDPAUCtxCreate", + ("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerAcquireFrame", + ("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerConnect", + ("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerConnectWithFlags", + ( + "hipEGLStreamConsumerConnectWithFlags", + CONV_EGL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuEGLStreamConsumerDisconnect", + ("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerReleaseFrame", + ("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerConnect", + ("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerDisconnect", + ("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerPresentFrame", + ("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerReturnFrame", + ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsEGLRegisterImage", + ("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsResourceGetMappedEglFrame", + ( + "hipGraphicsResourceGetMappedEglFrame", + CONV_EGL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cudaDataType_t", ("hipDataType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaDataType", ("hipDataType", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_16BF", ("HIP_R_16BF", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_16BF", ("HIP_C_16BF", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_16F", ("HIP_R_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_16F", ("HIP_C_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32F", ("HIP_R_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32F", ("HIP_C_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_64F", ("HIP_R_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_64F", ("HIP_C_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_8I", ("HIP_R_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_8I", ("HIP_C_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_8U", ("HIP_R_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_8U", ("HIP_C_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32I", ("HIP_R_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32I", ("HIP_C_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32U", ("HIP_R_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32U", ("HIP_C_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "MAJOR_VERSION", + ("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "MINOR_VERSION", + ("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "PATCH_LEVEL", + ("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachGlobal", + ("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachHost", + ("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachSingle", + ("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyDefault", + ("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyDisableCachingOverride", + ( + "hipOccupancyDisableCachingOverride", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)), + ("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)), + ("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)), + ("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)), + ("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy3DPeerParms", + ("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy2DArrayToArray", + ("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DFromArray", + ("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DFromArrayAsync", + ("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DToArrayAsync", + ("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy3DAsync", + ("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy3DPeer", + ("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy3DPeerAsync", + ("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpyArrayToArray", + ("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpyFromArrayAsync", + ("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpyFromSymbolAsync", + ("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME), + ), + ("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemRangeGetAttribute", + ("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeGetAttributes", + ("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseSetReadMostly", + ("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseUnsetReadMostly", + ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseSetPreferredLocation", + ( + "hipMemAdviseSetPreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemAdviseUnsetPreferredLocation", + ( + "hipMemAdviseUnsetPreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemAdviseSetAccessedBy", + ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseUnsetAccessedBy", + ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributeReadMostly", + ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributePreferredLocation", + ( + "hipMemRangeAttributePreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemRangeAttributeAccessedBy", + ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributeLastPrefetchLocation", + ( + "hipMemRangeAttributeLastPrefetchLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpyDeviceToDevice", + ("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME), + ), + ("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)), + ("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)), + ("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)), + ( + "cudaMemset2DAsync", + ("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemset3DAsync", + ("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)), + ( + "cudaArrayGetInfo", + ("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFreeMipmappedArray", + ("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetMipmappedArrayLevel", + ("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSymbolAddress", + ("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSymbolSize", + ("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemPrefetchAsync", + ("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)), + ("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)), + ("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)), + ( + "cudaMallocManaged", + ("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMallocMipmappedArray", + ("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)), + ("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)), + ("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)), + ("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)), + ("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)), + ("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)), + ("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)), + ("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)), + ("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)), + ("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)), + ("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)), + ( + "cudaHostAllocWriteCombined", + ("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME), + ), + ("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)), + ("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)), + ( + "cudaHostRegisterPortable", + ("hipHostRegisterPortable", CONV_MEM, API_RUNTIME), + ), + ("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)), + ( + "cudaHostRegisterIoMemory", + ("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME), + ), + # ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...)), + ("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)), + ( + "cudaEventCreateWithFlags", + ("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME), + ), + ("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)), + ("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)), + ("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)), + ("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)), + ("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)), + ("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)), + ("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)), + ("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)), + ("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)), + ("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)), + ( + "cudaStreamCreateWithFlags", + ("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME), + ), + ( + "cudaStreamCreateWithPriority", + ("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)), + ("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)), + ("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)), + ("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)), + ("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)), + ("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)), + ( + "cudaStreamAttachMemAsync", + ("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaStreamGetPriority", + ("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaCpuDeviceId", ("hipCpuDeviceId", CONV_TYPE, API_RUNTIME)), + ("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)), + ("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)), + ("cudaStreamGetCaptureInfo", ("hipStreamGetCaptureInfo", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureStatus", ("hipStreamCaptureStatus", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureStatusActive", ("hipStreamCaptureStatusActive", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureMode", ("hipStreamCaptureMode", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeGlobal", ("hipStreamCaptureModeGlobal", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeRelaxed", ("hipStreamCaptureModeRelaxed", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeThreadLocal", ("hipStreamCaptureModeThreadLocal", CONV_TYPE, API_RUNTIME)), + ("cudaStreamBeginCapture", ("hipStreamBeginCapture", CONV_TYPE, API_RUNTIME)), + ("cudaStreamEndCapture", ("hipStreamEndCapture", CONV_TYPE, API_RUNTIME)), + ("cudaGraphInstantiate", ("hipGraphInstantiate", CONV_TYPE, API_RUNTIME)), + ("cudaGraphDestroy", ("hipGraphDestroy", CONV_TYPE, API_RUNTIME)), + ("cudaGraphExecDestroy", ("hipGraphExecDestroy", CONV_TYPE, API_RUNTIME)), + ("cudaGraphLaunch", ("hipGraphLaunch", CONV_TYPE, API_RUNTIME)), + ("cudaGraphGetNodes", ("hipGraphGetNodes", CONV_TYPE, API_RUNTIME)), + ("cudaGraphDebugDotPrint", ("hipGraphDebugDotPrint", CONV_TYPE, API_RUNTIME)), + ("cudaThreadExchangeStreamCaptureMode", ("hipThreadExchangeStreamCaptureMode", CONV_TYPE, API_RUNTIME)), + ("cudaStreamIsCapturing", ("hipStreamIsCapturing", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)), + ("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)), + ("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)), + ("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)), + ( + "cudaThreadGetCacheConfig", + ("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME), + ), + ( + "cudaThreadGetLimit", + ("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaThreadSetCacheConfig", + ("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME), + ), + ( + "cudaThreadSetLimit", + ("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)), + ("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)), + ( + "cudaDevAttrMaxThreadsPerBlock", + ("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimX", + ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimY", + ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimZ", + ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimX", + ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimY", + ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimZ", + ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxSharedMemoryPerBlock", + ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTotalConstantMemory", + ("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME), + ), + ("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)), + ( + "cudaDevAttrMaxPitch", + ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMaxRegistersPerBlock", + ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrClockRate", + ("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTextureAlignment", + ( + "hipDeviceAttributeTextureAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrGpuOverlap", + ("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMultiProcessorCount", + ("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrKernelExecTimeout", + ( + "hipDeviceAttributeKernelExecTimeout", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrIntegrated", + ("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrCanMapHostMemory", + ( + "hipDeviceAttributeCanMapHostMemory", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputeMode", + ("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxTexture1DWidth", + ( + "hipDeviceAttributeMaxTexture1DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DWidth", + ( + "hipDeviceAttributeMaxTexture2DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DHeight", + ( + "hipDeviceAttributeMaxTexture2DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DWidth", + ( + "hipDeviceAttributeMaxTexture3DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DHeight", + ( + "hipDeviceAttributeMaxTexture3DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DDepth", + ( + "hipDeviceAttributeMaxTexture3DDepth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredWidth", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredHeight", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredLayers", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrSurfaceAlignment", + ( + "hipDeviceAttributeSurfaceAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrConcurrentKernels", + ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrEccEnabled", + ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)), + ( + "cudaDevAttrPciDeviceId", + ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTccDriver", + ("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMemoryClockRate", + ("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrGlobalMemoryBusWidth", + ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrL2CacheSize", + ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxThreadsPerMultiProcessor", + ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrAsyncEngineCount", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrUnifiedAddressing", + ( + "hipDeviceAttributeUnifiedAddressing", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLayeredWidth", + ( + "hipDeviceAttributeMaxTexture1DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLayeredLayers", + ( + "hipDeviceAttributeMaxTexture1DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DGatherWidth", + ( + "hipDeviceAttributeMaxTexture2DGatherWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DGatherHeight", + ( + "hipDeviceAttributeMaxTexture2DGatherHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DWidthAlt", + ( + "hipDeviceAttributeMaxTexture3DWidthAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DHeightAlt", + ( + "hipDeviceAttributeMaxTexture3DHeightAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DDepthAlt", + ( + "hipDeviceAttributeMaxTexture3DDepthAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrPciDomainId", + ("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrTexturePitchAlignment", + ( + "hipDeviceAttributeTexturePitchAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapWidth", + ( + "hipDeviceAttributeMaxTextureCubemapWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapLayeredWidth", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapLayeredLayers", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DWidth", + ( + "hipDeviceAttributeMaxSurface1DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DWidth", + ( + "hipDeviceAttributeMaxSurface2DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DHeight", + ( + "hipDeviceAttributeMaxSurface2DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DWidth", + ( + "hipDeviceAttributeMaxSurface3DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DHeight", + ( + "hipDeviceAttributeMaxSurface3DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DDepth", + ( + "hipDeviceAttributeMaxSurface3DDepth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DLayeredWidth", + ( + "hipDeviceAttributeMaxSurface1DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DLayeredLayers", + ( + "hipDeviceAttributeMaxSurface1DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredWidth", + ( + "hipDeviceAttributeMaxSurface2DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredHeight", + ( + "hipDeviceAttributeMaxSurface2DLayeredHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredLayers", + ( + "hipDeviceAttributeMaxSurface2DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapWidth", + ( + "hipDeviceAttributeMaxSurfaceCubemapWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapLayeredWidth", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapLayeredLayers", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLinearWidth", + ( + "hipDeviceAttributeMaxTexture1DLinearWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearWidth", + ( + "hipDeviceAttributeMaxTexture2DLinearWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearHeight", + ( + "hipDeviceAttributeMaxTexture2DLinearHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearPitch", + ( + "hipDeviceAttributeMaxTexture2DLinearPitch", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DMipmappedWidth", + ( + "hipDeviceAttributeMaxTexture2DMipmappedWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DMipmappedHeight", + ( + "hipDeviceAttributeMaxTexture2DMipmappedHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputeCapabilityMajor", + ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrComputeCapabilityMinor", + ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxTexture1DMipmappedWidth", + ( + "hipDeviceAttributeMaxTexture1DMipmappedWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrStreamPrioritiesSupported", + ( + "hipDeviceAttributeStreamPrioritiesSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrGlobalL1CacheSupported", + ( + "hipDeviceAttributeGlobalL1CacheSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrLocalL1CacheSupported", + ( + "hipDeviceAttributeLocalL1CacheSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSharedMemoryPerMultiprocessor", + ( + "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", + CONV_TYPE, + API_RUNTIME, + ), + ), + ( + "cudaDevAttrMaxRegistersPerMultiprocessor", + ( + "hipDeviceAttributeMaxRegistersPerMultiprocessor", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrManagedMemory", + ( + "hipDeviceAttributeManagedMemory", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrIsMultiGpuBoard", + ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMultiGpuBoardGroupID", + ( + "hipDeviceAttributeMultiGpuBoardGroupID", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrHostNativeAtomicSupported", + ( + "hipDeviceAttributeHostNativeAtomicSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrSingleToDoublePrecisionPerfRatio", + ( + "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrPageableMemoryAccess", + ( + "hipDeviceAttributePageableMemoryAccess", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrConcurrentManagedAccess", + ( + "hipDeviceAttributeConcurrentManagedAccess", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputePreemptionSupported", + ( + "hipDeviceAttributeComputePreemptionSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrCanUseHostPointerForRegisteredMem", + ( + "hipDeviceAttributeCanUseHostPointerForRegisteredMem", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaPointerGetAttributes", + ("hipPointerGetAttributes", CONV_MEM, API_RUNTIME), + ), + ( + "cudaHostGetDevicePointer", + ("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME), + ), + ( + "cudaGetDeviceProperties", + ("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME), + ), + ("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)), + ( + "cudaDeviceGetByPCIBusId", + ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaDeviceGetStreamPriorityRange", + ( + "hipDeviceGetStreamPriorityRange", + CONV_DEVICE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaSetValidDevices", + ("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevP2PAttrPerformanceRank", + ( + "hipDeviceP2PAttributePerformanceRank", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevP2PAttrAccessSupported", + ( + "hipDeviceP2PAttributeAccessSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevP2PAttrNativeAtomicSupported", + ( + "hipDeviceP2PAttributeNativeAtomicSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDeviceGetP2PAttribute", + ("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeDefault", + ("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeExclusive", + ("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeProhibited", + ("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeExclusiveProcess", + ("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetDeviceFlags", + ("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)), + ("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceBlockingSync", + ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceScheduleBlockingSync", + ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceScheduleMask", + ("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceLmemResizeToMax", + ("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaDeviceSetCacheConfig", + ("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaDeviceGetCacheConfig", + ("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncAttributes", + ("hipFuncAttributes", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncAttributeMaxDynamicSharedMemorySize", + ("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncAttributePreferredSharedMemoryCarveout", + ("hipFuncAttributePreferredSharedMemoryCarveout", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncSetAttribute", + ("hipFuncSetAttribute", CONV_EXEC, API_RUNTIME), + ), + ("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncCachePreferNone", + ("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncCachePreferShared", + ("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME), + ), + ("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncCachePreferEqual", + ("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncGetAttributes", + ("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFuncSetSharedMemConfig", + ("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetParameterBuffer", + ("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSetDoubleForDevice", + ("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSetDoubleForHost", + ("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaConfigureCall", + ("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaLaunchCooperativeKernel", + ("hipLaunchCooperativeKernel", CONV_EXEC, API_RUNTIME), + ), + ( + "cudaSetupArgument", + ("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)), + ( + "cudaRuntimeGetVersion", + ("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyMaxPotentialBlockSize", + ("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeWithFlags", + ( + "hipOccupancyMaxPotentialBlockSizeWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxActiveBlocksPerMultiprocessor", + ( + "hipOccupancyMaxActiveBlocksPerMultiprocessor", + CONV_OCCUPANCY, + API_RUNTIME, + ), + ), + ( + "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + ( + "hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeVariableSMem", + ( + "hipOccupancyMaxPotentialBlockSizeVariableSMem", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", + ( + "hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)), + ( + "cudaDeviceDisablePeerAccess", + ("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME), + ), + ( + "cudaDeviceEnablePeerAccess", + ("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME), + ), + ("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)), + ( + "cudaIpcMemLazyEnablePeerAccess", + ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceSetSharedMemConfig", + ("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaDeviceGetSharedMemConfig", + ("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeDefault", + ("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeFourByte", + ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeEightByte", + ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaLimitStackSize", + ("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaLimitPrintfFifoSize", + ("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)), + ( + "cudaLimitDevRuntimeSyncDepth", + ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaLimitDevRuntimePendingLaunchCount", + ( + "hipLimitDevRuntimePendingLaunchCount", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)), + ( + "cudaProfilerInitialize", + ("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)), + ("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)), + ( + "cudaKeyValuePair", + ("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)), + ( + "cudaReadModeNormalizedFloat", + ("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME), + ), + ("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)), + ("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)), + ("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)), + ("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)), + ("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)), + ("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)), + ( + "cudaBindTextureToMipmappedArray", + ("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureAlignmentOffset", + ("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME), + ), + ("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)), + ( + "cudaChannelFormatKindSigned", + ("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindUnsigned", + ("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindFloat", + ("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindNone", + ("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME), + ), + ("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)), + ("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)), + ( + "cudaResourceTypeMipmappedArray", + ("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME), + ), + ("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)), + ("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)), + ( + "cudaResViewFormatUnsignedChar1", + ("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedChar2", + ("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedChar4", + ("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar1", + ("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar2", + ("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar4", + ("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort1", + ("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort2", + ("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort4", + ("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort1", + ("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort2", + ("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort4", + ("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt1", + ("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt2", + ("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt4", + ("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt1", + ("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt2", + ("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt4", + ("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME), + ), + ("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)), + ( + "cudaResViewFormatUnsignedBlockCompressed1", + ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed2", + ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed3", + ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed4", + ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed4", + ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed5", + ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed5", + ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed6H", + ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed6H", + ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed7", + ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME), + ), + ("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)), + ("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)), + ( + "cudaDestroyTextureObject", + ("hipDestroyTextureObject", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectResourceDesc", + ("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectResourceViewDesc", + ("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectTextureDesc", + ("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaBindSurfaceToArray", + ("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSurfaceReference", + ("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeZero", + ("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeClamp", + ("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeTrap", + ("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFormatModeForced", + ("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFormatModeAuto", + ("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaCreateSurfaceObject", + ("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDestroySurfaceObject", + ("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSurfaceObjectResourceDesc", + ( + "hipGetSurfaceObjectResourceDesc", + CONV_SURFACE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)), + ( + "cudaGLGetDevices", + ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaWGLGetDevice", + ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapResources", + ("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsResourceGetMappedMipmappedArray", + ( + "hipGraphicsResourceGetMappedMipmappedArray", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsResourceGetMappedPointer", + ( + "hipGraphicsResourceGetMappedPointer", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsResourceSetMapFlags", + ( + "hipGraphicsResourceSetMapFlags", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsSubResourceGetMappedArray", + ( + "hipGraphicsSubResourceGetMappedArray", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsUnmapResources", + ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsUnregisterResource", + ( + "hipGraphicsUnregisterResource", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveX", + ( + "hipGraphicsCubeFacePositiveX", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeX", + ( + "hipGraphicsCubeFaceNegativeX", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveY", + ( + "hipGraphicsCubeFacePositiveY", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeY", + ( + "hipGraphicsCubeFaceNegativeY", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveZ", + ( + "hipGraphicsCubeFacePositiveZ", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeZ", + ( + "hipGraphicsCubeFaceNegativeZ", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsMapFlagsNone", + ("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapFlagsReadOnly", + ( + "hipGraphicsMapFlagsReadOnly", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsMapFlagsWriteDiscard", + ( + "hipGraphicsMapFlagsWriteDiscard", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsNone", + ( + "hipGraphicsRegisterFlagsNone", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsReadOnly", + ( + "hipGraphicsRegisterFlagsReadOnly", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsWriteDiscard", + ( + "hipGraphicsRegisterFlagsWriteDiscard", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsSurfaceLoadStore", + ( + "hipGraphicsRegisterFlagsSurfaceLoadStore", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsTextureGather", + ( + "hipGraphicsRegisterFlagsTextureGather", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLDeviceListAll", + ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceListCurrentFrame", + ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceListNextFrame", + ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLGetDevices", + ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaWGLGetDevice", + ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapFlagsNone", + ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapFlagsReadOnly", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + CONV_GL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLMapFlagsWriteDiscard", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + CONV_GL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLMapBufferObject", + ("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapBufferObjectAsync", + ("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLRegisterBufferObject", + ("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLSetBufferObjectMapFlags", + ("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLSetGLDevice", + ("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnmapBufferObject", + ("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnmapBufferObjectAsync", + ("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnregisterBufferObject", + ("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9DeviceListAll", + ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9DeviceListCurrentFrame", + ( + "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9DeviceListNextFrame", + ( + "HIP_D3D9_DEVICE_LIST_NEXT_FRAME", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9GetDevice", + ("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9GetDevices", + ("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9GetDirect3DDevice", + ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9SetDirect3DDevice", + ("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D9RegisterResource", + ( + "hipGraphicsD3D9RegisterResource", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlags", + ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapFlagsNone", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_NONE", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlagsReadOnly", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlagsWriteDiscard", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9RegisterFlagsNone", + ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterFlagsArray", + ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapResources", + ("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterResource", + ("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedArray", + ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedPitch", + ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedPointer", + ( + "hipD3D9ResourceGetMappedPointer", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9ResourceGetMappedSize", + ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetSurfaceDimensions", + ( + "hipD3D9ResourceGetSurfaceDimensions", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9ResourceSetMapFlags", + ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9UnmapResources", + ("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9UnregisterResource", + ("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceListAll", + ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceListCurrentFrame", + ( + "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10DeviceListNextFrame", + ( + "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10GetDevice", + ("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10GetDevices", + ("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D10RegisterResource", + ( + "hipGraphicsD3D10RegisterResource", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsNone", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsReadOnly", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsWriteDiscard", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10RegisterFlagsNone", + ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterFlagsArray", + ( + "HIP_D3D10_REGISTER_FLAGS_ARRAY", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10GetDirect3DDevice", + ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10MapResources", + ("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterResource", + ("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10ResourceGetMappedArray", + ( + "hipD3D10ResourceGetMappedArray", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedPitch", + ( + "hipD3D10ResourceGetMappedPitch", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedPointer", + ( + "hipD3D10ResourceGetMappedPointer", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedSize", + ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10ResourceGetSurfaceDimensions", + ( + "hipD3D10ResourceGetSurfaceDimensions", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceSetMapFlags", + ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10SetDirect3DDevice", + ("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10UnmapResources", + ("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10UnregisterResource", + ("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceListAll", + ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceListCurrentFrame", + ( + "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11DeviceListNextFrame", + ( + "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsVDPAURegisterOutputSurface", + ( + "hipGraphicsVDPAURegisterOutputSurface", + CONV_VDPAU, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsVDPAURegisterVideoSurface", + ( + "hipGraphicsVDPAURegisterVideoSurface", + CONV_VDPAU, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaVDPAUGetDevice", + ("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaVDPAUSetVDPAUDevice", + ("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamConsumerAcquireFrame", + ( + "hipEGLStreamConsumerAcquireFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamConsumerConnect", + ("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamConsumerConnectWithFlags", + ( + "hipEGLStreamConsumerConnectWithFlags", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamConsumerReleaseFrame", + ( + "hipEGLStreamConsumerReleaseFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamProducerConnect", + ("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamProducerDisconnect", + ("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamProducerPresentFrame", + ( + "hipEGLStreamProducerPresentFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamProducerReturnFrame", + ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsEGLRegisterImage", + ("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsResourceGetMappedEglFrame", + ( + "hipGraphicsResourceGetMappedEglFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cublasInit", ("hipblasInit", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasShutdown", + ("hipblasShutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetVersion", + ("hipblasGetVersion", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetError", + ("hipblasGetError", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasAlloc", ("hipblasAlloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasFree", ("hipblasFree", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSetKernelStream", + ("hipblasSetKernelStream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetAtomicsMode", + ("hipblasGetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetAtomicsMode", + ("hipblasSetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetMathMode", + ("hipblasGetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetMathMode", + ("hipblasSetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("CUBLAS_OP_N", ("HIPBLAS_OP_N", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUBLAS_OP_T", + ("HIPBLAS_OP_T", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_OP_C", + ("HIPBLAS_OP_C", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_SUCCESS", + ("HIPBLAS_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_NOT_INITIALIZED", + ("HIPBLAS_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_ALLOC_FAILED", + ("HIPBLAS_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_INVALID_VALUE", + ("HIPBLAS_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_MAPPING_ERROR", + ("HIPBLAS_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_EXECUTION_FAILED", + ("HIPBLAS_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_INTERNAL_ERROR", + ("HIPBLAS_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_NOT_SUPPORTED", + ("HIPBLAS_STATUS_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_ARCH_MISMATCH", + ("HIPBLAS_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_FILL_MODE_LOWER", + ("HIPBLAS_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_FILL_MODE_UPPER", + ("HIPBLAS_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_DIAG_NON_UNIT", + ("HIPBLAS_DIAG_NON_UNIT", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ("CUBLAS_DIAG_UNIT", ("HIPBLAS_DIAG_UNIT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_SIDE_LEFT", ("HIPBLAS_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_SIDE_RIGHT", ("HIPBLAS_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUBLAS_POINTER_MODE_HOST", + ("HIPBLAS_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_POINTER_MODE_DEVICE", + ("HIPBLAS_POINTER_MODE_DEVICE", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_ATOMICS_NOT_ALLOWED", + ( + "HIPBLAS_ATOMICS_NOT_ALLOWED", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_ATOMICS_ALLOWED", + ( + "HIPBLAS_ATOMICS_ALLOWED", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_FLOAT", + ( + "HIPBLAS_DATA_FLOAT", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_DOUBLE", + ( + "HIPBLAS_DATA_DOUBLE", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_HALF", + ("HIPBLAS_DATA_HALF", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "CUBLAS_DATA_INT8", + ("HIPBLAS_DATA_INT8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), + ), + ("CUBLAS_GEMM_DEFAULT", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_GEMM_DEFAULT_TENSOR_OP", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("cublasCreate", ("hipblasCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasDestroy", ("hipblasDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasSetVector", ("hipblasSetVector", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetVector", ("hipblasGetVector", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSetVectorAsync", + ("hipblasSetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetVectorAsync", + ("hipblasGetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSetMatrix", ("hipblasSetMatrix", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetMatrix", ("hipblasGetMatrix", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetMatrixAsync", + ("hipblasGetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetMatrixAsync", + ("hipblasSetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasXerbla", ("hipblasXerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSnrm2", ("hipblasSnrm2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDnrm2", ("hipblasDnrm2", CONV_MATH_FUNC, API_BLAS)), + ("cublasScnrm2", ("hipblasScnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDznrm2", ("hipblasDznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasNrm2Ex", + ("hipblasNrm2Ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSdot", ("hipblasSdot", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSdotBatched", + ("hipblasSdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDdot", ("hipblasDdot", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDdotBatched", + ("hipblasDdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCdotu", ("hipblasCdotu", CONV_MATH_FUNC, API_BLAS)), + ("cublasCdotc", ("hipblasCdotc", CONV_MATH_FUNC, API_BLAS)), + ("cublasZdotu", ("hipblasZdotu", CONV_MATH_FUNC, API_BLAS)), + ("cublasZdotc", ("hipblasZdotc", CONV_MATH_FUNC, API_BLAS)), + ("cublasSscal", ("hipblasSscal", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSscalBatched", + ("hipblasSscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDscal", ("hipblasDscal", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDscalBatched", + ("hipblasDscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCscal", ("hipblasCscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsscal", ("hipblasCsscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZscal", ("hipblasZscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdscal", ("hipblasZdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSaxpy", ("hipblasSaxpy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSaxpyBatched", + ("hipblasSaxpyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDaxpy", ("hipblasDaxpy", CONV_MATH_FUNC, API_BLAS)), + ("cublasCaxpy", ("hipblasCaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZaxpy", ("hipblasZaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasScopy", ("hipblasScopy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScopyBatched", + ("hipblasScopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDcopy", ("hipblasDcopy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDcopyBatched", + ("hipblasDcopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCcopy", ("hipblasCcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZcopy", ("hipblasZcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSswap", ("hipblasSswap", CONV_MATH_FUNC, API_BLAS)), + ("cublasDswap", ("hipblasDswap", CONV_MATH_FUNC, API_BLAS)), + ("cublasCswap", ("hipblasCswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZswap", ("hipblasZswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIsamax", ("hipblasIsamax", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamax", ("hipblasIdamax", CONV_MATH_FUNC, API_BLAS)), + ("cublasIcamax", ("hipblasIcamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIzamax", ("hipblasIzamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIsamin", ("hipblasIsamin", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamin", ("hipblasIdamin", CONV_MATH_FUNC, API_BLAS)), + ("cublasIcamin", ("hipblasIcamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIzamin", ("hipblasIzamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSasum", ("hipblasSasum", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSasumBatched", + ("hipblasSasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDasum", ("hipblasDasum", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDasumBatched", + ("hipblasDasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScasum", ("hipblasScasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDzasum", ("hipblasDzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrot", ("hipblasSrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrot", ("hipblasDrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrot", ("hipblasCrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsrot", ("hipblasCsrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZrot", ("hipblasZrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdrot", ("hipblasZdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotg", ("hipblasSrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotg", ("hipblasDrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrotg", ("hipblasCrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZrotg", ("hipblasZrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotm", ("hipblasSrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotm", ("hipblasDrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotmg", ("hipblasSrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotmg", ("hipblasDrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgemv", ("hipblasSgemv", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSgemvBatched", + ("hipblasSgemvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDgemv", ("hipblasDgemv", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgemv", ("hipblasCgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgemv", ("hipblasZgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgbmv", ("hipblasSgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDgbmv", ("hipblasDgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCgbmv", ("hipblasCgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgbmv", ("hipblasZgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrmv", ("hipblasStrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrmv", ("hipblasDtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrmv", ("hipblasCtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrmv", ("hipblasZtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStbmv", ("hipblasStbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtbmv", ("hipblasDtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtbmv", ("hipblasCtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtbmv", ("hipblasZtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpmv", ("hipblasStpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpmv", ("hipblasDtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpmv", ("hipblasCtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpmv", ("hipblasZtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrsv", ("hipblasStrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrsv", ("hipblasDtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrsv", ("hipblasCtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrsv", ("hipblasZtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpsv", ("hipblasStpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpsv", ("hipblasDtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpsv", ("hipblasCtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpsv", ("hipblasZtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStbsv", ("hipblasStbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtbsv", ("hipblasDtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtbsv", ("hipblasCtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtbsv", ("hipblasZtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsymv", ("hipblasSsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsymv", ("hipblasDsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsymv", ("hipblasCsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsymv", ("hipblasZsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChemv", ("hipblasChemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhemv", ("hipblasZhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsbmv", ("hipblasSsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsbmv", ("hipblasDsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChbmv", ("hipblasChbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhbmv", ("hipblasZhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspmv", ("hipblasSspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspmv", ("hipblasDspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpmv", ("hipblasChpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpmv", ("hipblasZhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSger", ("hipblasSger", CONV_MATH_FUNC, API_BLAS)), + ("cublasDger", ("hipblasDger", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgeru", ("hipblasCgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCgerc", ("hipblasCgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgeru", ("hipblasZgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgerc", ("hipblasZgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr", ("hipblasSsyr", CONV_MATH_FUNC, API_BLAS)), + ("cublasDsyr", ("hipblasDsyr", CONV_MATH_FUNC, API_BLAS)), + ("cublasCher", ("hipblasCher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher", ("hipblasZher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr", ("hipblasSspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr", ("hipblasDspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr", ("hipblasChpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr", ("hipblasZhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr2", ("hipblasSsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr2", ("hipblasDsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher2", ("hipblasCher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher2", ("hipblasZher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr2", ("hipblasSspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr2", ("hipblasDspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr2", ("hipblasChpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr2", ("hipblasZhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSgemmBatched", + ("hipblasSgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgemmBatched", + ("hipblasDgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasHgemmBatched", + ("hipblasHgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgemmStridedBatched", + ("hipblasSgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasDgemmStridedBatched", + ("hipblasDgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasHgemmStridedBatched", + ("hipblasHgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasCgemmBatched", + ("hipblasCgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3mBatched", + ("hipblasCgemm3mBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemmBatched", + ("hipblasZgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemmStridedBatched", + ( + "hipblasCgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasCgemm3mStridedBatched", + ( + "hipblasCgemm3mStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasZgemmStridedBatched", + ( + "hipblasZgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasHgemmStridedBatched", + ( + "hipblasHgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ("cublasSgemm", ("hipblasSgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemm", ("hipblasDgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgemm", ("hipblasCgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasZgemm", ("hipblasZgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasHgemm", ("hipblasHgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasSsyrk", ("hipblasSsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyrk", ("hipblasDsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyrk", ("hipblasCsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyrk", ("hipblasZsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCherk", ("hipblasCherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZherk", ("hipblasZherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr2k", ("hipblasSsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr2k", ("hipblasDsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyr2k", ("hipblasCsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyr2k", ("hipblasZyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyrkx", ("hipblasSsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyrkx", ("hipblasDsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyrkx", ("hipblasCsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyrkx", ("hipblasZsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher2k", ("hipblasCher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher2k", ("hipblasZher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCherkx", ("hipblasCherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZherkx", ("hipblasZherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsymm", ("hipblasSsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsymm", ("hipblasDsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsymm", ("hipblasCsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsymm", ("hipblasZsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChemm", ("hipblasChemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhemm", ("hipblasZhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrsm", ("hipblasStrsm", CONV_MATH_FUNC, API_BLAS)), + ("cublasDtrsm", ("hipblasDtrsm", CONV_MATH_FUNC, API_BLAS)), + ("cublasCtrsm", ("hipblasCtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrsm", ("hipblasZtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasStrsmBatched", + ("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsmBatched", + ("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsmBatched", + ("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsmBatched", + ("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasStrmm", ("hipblasStrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrmm", ("hipblasDtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrmm", ("hipblasCtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrmm", ("hipblasZtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgeam", ("hipblasSgeam", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgeam", ("hipblasDgeam", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgeam", ("hipblasCgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgeam", ("hipblasZgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSgetrfBatched", + ("hipblasSgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetrfBatched", + ("hipblasDgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetrfBatched", + ("hipblasCgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetrfBatched", + ("hipblasZgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgetriBatched", + ("hipblasSgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetriBatched", + ("hipblasDgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetriBatched", + ("hipblasCgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetriBatched", + ("hipblasZgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgetrsBatched", + ("hipblasSgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetrsBatched", + ("hipblasDgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetrsBatched", + ("hipblasCgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetrsBatched", + ("hipblasZgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsmBatched", + ("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsmBatched", + ("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsmBatched", + ("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsmBatched", + ("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSmatinvBatched", + ("hipblasSmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDmatinvBatched", + ("hipblasDmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCmatinvBatched", + ("hipblasCmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZmatinvBatched", + ("hipblasZmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgeqrfBatched", + ("hipblasSgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgeqrfBatched", + ("hipblasDgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgeqrfBatched", + ("hipblasCgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgeqrfBatched", + ("hipblasZgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgelsBatched", + ("hipblasSgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgelsBatched", + ("hipblasDgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgelsBatched", + ("hipblasCgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgelsBatched", + ("hipblasZgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSdgmm", ("hipblasSdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDdgmm", ("hipblasDdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCdgmm", ("hipblasCdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdgmm", ("hipblasZdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpttr", ("hipblasStpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpttr", ("hipblasDtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpttr", ("hipblasCtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpttr", ("hipblasZtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrttp", ("hipblasStrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrttp", ("hipblasDtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrttp", ("hipblasCtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrttp", ("hipblasZtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCreate_v2", ("hipblasCreate_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDestroy_v2", ("hipblasDestroy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetVersion_v2", + ("hipblasGetVersion_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSetStream", ("hipblasSetStream", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetStream", ("hipblasGetStream", CONV_MATH_FUNC, API_BLAS)), + ("cublasSetStream_v2", ("hipblasSetStream_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetStream_v2", ("hipblasGetStream_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetPointerMode", + ("hipblasGetPointerMode", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasSetPointerMode", + ("hipblasSetPointerMode", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasGetPointerMode_v2", + ("hipblasGetPointerMode_v2", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasSetPointerMode_v2", + ("hipblasSetPointerMode_v2", CONV_MATH_FUNC, API_BLAS), + ), + ("cublasSgemv_v2", ("hipblasSgemv_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemv_v2", ("hipblasDgemv_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgemv_v2", + ("hipblasCgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemv_v2", + ("hipblasZgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgbmv_v2", + ("hipblasSgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgbmv_v2", + ("hipblasDgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgbmv_v2", + ("hipblasCgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgbmv_v2", + ("hipblasZgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrmv_v2", + ("hipblasStrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrmv_v2", + ("hipblasDtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrmv_v2", + ("hipblasCtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrmv_v2", + ("hipblasZtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStbmv_v2", + ("hipblasStbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtbmv_v2", + ("hipblasDtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtbmv_v2", + ("hipblasCtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtbmv_v2", + ("hipblasZtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStpmv_v2", + ("hipblasStpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtpmv_v2", + ("hipblasDtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtpmv_v2", + ("hipblasCtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtpmv_v2", + ("hipblasZtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsv_v2", + ("hipblasStrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsv_v2", + ("hipblasDtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsv_v2", + ("hipblasCtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsv_v2", + ("hipblasZtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStpsv_v2", + ("hipblasStpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtpsv_v2", + ("hipblasDtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtpsv_v2", + ("hipblasCtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtpsv_v2", + ("hipblasZtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStbsv_v2", + ("hipblasStbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtbsv_v2", + ("hipblasDtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtbsv_v2", + ("hipblasCtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtbsv_v2", + ("hipblasZtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsymv_v2", + ("hipblasSsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsymv_v2", + ("hipblasDsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsymv_v2", + ("hipblasCsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsymv_v2", + ("hipblasZsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChemv_v2", + ("hipblasChemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhemv_v2", + ("hipblasZhemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsbmv_v2", + ("hipblasSsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsbmv_v2", + ("hipblasDsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChbmv_v2", + ("hipblasChbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhbmv_v2", + ("hipblasZhbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSspmv_v2", + ("hipblasSspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDspmv_v2", + ("hipblasDspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChpmv_v2", + ("hipblasChpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhpmv_v2", + ("hipblasZhpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSger_v2", ("hipblasSger_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDger_v2", ("hipblasDger_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgeru_v2", + ("hipblasCgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgerc_v2", + ("hipblasCergc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgeru_v2", + ("hipblasZgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgerc_v2", + ("hipblasZgerc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSsyr_v2", ("hipblasSsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr_v2", ("hipblasDsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyr_v2", ("hipblasCsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyr_v2", ("hipblasZsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher_v2", ("hipblasCher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher_v2", ("hipblasZher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr_v2", ("hipblasSspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr_v2", ("hipblasDspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr_v2", ("hipblasChpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr_v2", ("hipblasZhpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSsyr2_v2", + ("hipblasSsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyr2_v2", + ("hipblasDsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyr2_v2", + ("hipblasCsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyr2_v2", + ("hipblasZsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCher2_v2", + ("hipblasCher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZher2_v2", + ("hipblasZher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSspr2_v2", + ("hipblasSspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDspr2_v2", + ("hipblasDspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChpr2_v2", + ("hipblasChpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhpr2_v2", + ("hipblasZhpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSgemm_v2", ("hipblasSgemm_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemm_v2", ("hipblasDgemm_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgemm_v2", + ("hipblasCgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3m", + ("hipblasCgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3mEx", + ("hipblasCgemm3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemm_v2", + ("hipblasZgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemm3m", + ("hipblasZgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgemmEx", + ("hipblasSgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasGemmEx", ("hipblasGemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasGemmBatchedEx", + ("hipblasGemmBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGemmStridedBatchedEx", + ("hipblasGemmStridedBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemmEx", + ("hipblasCgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasUint8gemmBias", + ("hipblasUint8gemmBias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsyrk_v2", + ("hipblasSsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyrk_v2", + ("hipblasDsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrk_v2", + ("hipblasCsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyrk_v2", + ("hipblasZsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrkEx", + ("hipblasCsyrkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrk3mEx", + ("hipblasCsyrk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherk_v2", + ("hipblasCherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherkEx", + ("hipblasCherkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherk3mEx", + ("hipblasCherk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZherk_v2", + ("hipblasZherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsyr2k_v2", + ("hipblasSsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyr2k_v2", + ("hipblasDsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyr2k_v2", + ("hipblasCsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyr2k_v2", + ("hipblasZsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCher2k_v2", + ("hipblasCher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZher2k_v2", + ("hipblasZher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsymm_v2", + ("hipblasSsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsymm_v2", + ("hipblasDsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsymm_v2", + ("hipblasCsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsymm_v2", + ("hipblasZsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChemm_v2", + ("hipblasChemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhemm_v2", + ("hipblasZhemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsm_v2", + ("hipblasStrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsm_v2", + ("hipblasDtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsm_v2", + ("hipblasCtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsm_v2", + ("hipblasZtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrmm_v2", + ("hipblasStrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrmm_v2", + ("hipblasDtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrmm_v2", + ("hipblasCtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrmm_v2", + ("hipblasZtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSnrm2_v2", ("hipblasSnrm2_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDnrm2_v2", ("hipblasDnrm2_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScnrm2_v2", + ("hipblasScnrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDznrm2_v2", + ("hipblasDznrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDotEx", ("hipblasDotEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDotcEx", ("hipblasDotcEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSdot_v2", ("hipblasSdot_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDdot_v2", ("hipblasDdot_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCdotu_v2", + ("hipblasCdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCdotc_v2", + ("hipblasCdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdotu_v2", + ("hipblasZdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdotc_v2", + ("hipblasZdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScalEx", ("hipblasScalEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSscal_v2", ("hipblasSscal_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDscal_v2", ("hipblasDscal_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCscal_v2", + ("hipblasCscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsscal_v2", + ("hipblasCsscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZscal_v2", + ("hipblasZcsal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdscal_v2", + ("hipblasZdscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasAxpyEx", ("hipblasAxpyEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSaxpy_v2", ("hipblasSaxpy_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDaxpy_v2", ("hipblasDaxpy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCaxpy_v2", + ("hipblasCaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZaxpy_v2", + ("hipblasZaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScopy_v2", ("hipblasScopy_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDcopy_v2", ("hipblasDcopy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCcopy_v2", + ("hipblasCcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZcopy_v2", + ("hipblasZcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSswap_v2", ("hipblasSswap_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDswap_v2", ("hipblasDswap_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCswap_v2", + ("hipblasCswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZswap_v2", + ("hipblasZswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasIsamax_v2", ("hipblasIsamax_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamax_v2", ("hipblasIdamax_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasIcamax_v2", + ("hipblasIcamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasIzamax_v2", + ("hipblasIzamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasIsamin_v2", ("hipblasIsamin_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamin_v2", ("hipblasIdamin_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasIcamin_v2", + ("hipblasIcamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasIzamin_v2", + ("hipblasIzamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSasum_v2", ("hipblasSasum_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDasum_v2", ("hipblasDasum_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScasum_v2", + ("hipblasScasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDzasum_v2", + ("hipblasDzasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSrot_v2", ("hipblasSrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrot_v2", ("hipblasDrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrot_v2", ("hipblasCrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasCsrot_v2", + ("hipblasCsrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasZrot_v2", ("hipblasZrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasZdrot_v2", + ("hipblasZdrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotg_v2", + ("hipblasSrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotg_v2", + ("hipblasDrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCrotg_v2", + ("hipblasCrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZrotg_v2", + ("hipblasZrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotm_v2", + ("hipblasSrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotm_v2", + ("hipblasDrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotmg_v2", + ("hipblasSrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotmg_v2", + ("hipblasDrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "CURAND_STATUS_SUCCESS", + ("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_VERSION_MISMATCH", + ("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_NOT_INITIALIZED", + ("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_ALLOCATION_FAILED", + ("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_TYPE_ERROR", + ("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_OUT_OF_RANGE", + ("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_LENGTH_NOT_MULTIPLE", + ("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED", + ( + "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED", + CONV_NUMERIC_LITERAL, + API_RAND, + ), + ), + ( + "CURAND_STATUS_LAUNCH_FAILURE", + ("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_PREEXISTING_FAILURE", + ("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_INITIALIZATION_FAILED", + ("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_ARCH_MISMATCH", + ("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_INTERNAL_ERROR", + ("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND), + ), + ("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)), + ( + "mtgp32dc_params_fast_11213", + ("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_DEFAULT", + ("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_XORWOW", + ("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MRG32K3A", + ("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MTGP32", + ("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MT19937", + ("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_PHILOX4_32_10", + ("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_DEFAULT", + ("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SOBOL32", + ("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SCRAMBLED_SOBOL32", + ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SOBOL64", + ("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SCRAMBLED_SOBOL64", + ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "curand_ORDERING_PSEUDO_BEST", + ( + "HIPRAND_ORDERING_PSEUDO_BEST", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_PSEUDO_DEFAULT", + ( + "HIPRAND_ORDERING_PSEUDO_DEFAULT", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_PSEUDO_SEEDED", + ( + "HIPRAND_ORDERING_PSEUDO_SEEDED", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_QUASI_DEFAULT", + ( + "HIPRAND_ORDERING_QUASI_DEFAULT", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_DIRECTION_VECTORS_32_JOEKUO6", + ( + "HIPRAND_DIRECTION_VECTORS_32_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", + ( + "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_DIRECTION_VECTORS_64_JOEKUO6", + ( + "HIPRAND_DIRECTION_VECTORS_64_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", + ( + "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_CHOOSE_BEST", + ("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_ITR", + ("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_KNUTH", + ("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_HITR", + ("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), + ("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), + ( + "curand_BINARY_SEARCH", + ("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DISCRETE_GAUSS", + ("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_REJECTION", + ("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DEVICE_API", + ("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_FAST_REJECTION", + ("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_3RD", + ("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DEFINITION", + ("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_POISSON", + ("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)), + ( + "curandCreateGeneratorHost", + ("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandCreatePoissonDistribution", + ("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandDestroyDistribution", + ("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandDestroyGenerator", + ("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND), + ), + ("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateLogNormal", + ("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGenerateLogNormalDouble", + ("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGenerateLongLong", + ("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateNormalDouble", + ("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND), + ), + ("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)), + ("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)), + ("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateUniformDouble", + ("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGetDirectionVectors32", + ("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetDirectionVectors64", + ("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetProperty", + ("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetScrambleConstants32", + ( + "hiprandGetScrambleConstants32", + CONV_MATH_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curandGetScrambleConstants64", + ( + "hiprandGetScrambleConstants64", + CONV_MATH_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)), + ( + "curandSetGeneratorOffset", + ("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandSetGeneratorOrdering", + ("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandSetPseudoRandomGeneratorSeed", + ("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandSetQuasiRandomGeneratorDimensions", + ("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND), + ), + ("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)), + ("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)), + ("curand4", ("hiprand4", CONV_DEVICE_FUNC, API_RAND)), + ("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)), + ("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal_double", + ("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal2_double", + ("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal4_double", + ("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curand_mtgp32_single", + ("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_mtgp32_single_specific", + ( + "hiprand_mtgp32_single_specific", + CONV_DEVICE_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_mtgp32_specific", + ("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)), + ( + "curandMakeMTGP32Constants", + ("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curandMakeMTGP32KernelState", + ("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)), + ("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_normal2_double", + ("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_normal4_double", + ("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_uniform_double", + ("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curand_uniform2_double", + ("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_uniform4_double", + ("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)), + ("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)), + ("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)), + ("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_Philox4x32_10", + ("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)), + ("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUFFT_COMPATIBILITY_DEFAULT", + ( + "HIPFFT_COMPATIBILITY_DEFAULT", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ("cuComplex", ("hipblasComplex", CONV_TYPE, API_BLAS)), + ("cuDoubleComplex", ("hipblasDoubleComplex", CONV_TYPE, API_BLAS)), + ("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)), + ("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)), + ("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_INVALID_VALUE", + ("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INTERNAL_ERROR", + ("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_UNALIGNED_DATA", + ("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INCOMPLETE_PARAMETER_LIST", + ("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INVALID_DEVICE", + ("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_NOT_IMPLEMENTED", + ("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_LICENSE_ERROR", + ("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED), + ), + ( + "CUFFT_NOT_SUPPORTED", + ("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)), + ("cufftType", ("hipfftType", CONV_TYPE, API_FFT)), + ("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "cufftCompatibility_t", + ("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), + ), + ( + "cufftCompatibility", + ("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), + ), + ( + "CUFFT_COMPATIBILITY_FFTW_PADDING", + ( + "HIPFFT_COMPATIBILITY_FFTW_PADDING", + CONV_NUMERIC_LITERAL, + API_FFT, + HIP_UNSUPPORTED, + ), + ), + ("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)), + ("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)), + ("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)), + ("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)), + ("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)), + ("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)), + ("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)), + ("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)), + ( + "cufftSetAutoAllocation", + ("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT), + ), + ("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)), + ("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)), + ("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)), + ("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)), + ("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)), + ("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)), + ("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)), + ("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)), + ("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)), + ( + "cufftGetProperty", + ("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED), + ), + ("nvrtcResult", ("hiprtcResult", CONV_TYPE, API_RTC)), + ("NVRTC_SUCCESS", ("HIPRTC_SUCCESS", CONV_TYPE, API_RTC)), + ( + "NVRTC_ERROR_OUT_OF_MEMORY", + ("HIPRTC_ERROR_OUT_OF_MEMORY", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_PROGRAM_CREATION_FAILURE", + ("HIPRTC_ERROR_PROGRAM_CREATION_FAILURE", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INVALID_INPUT", + ("HIPRTC_ERROR_INVALID_INPUT", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INVALID_PROGRAM", + ("HIPRTC_ERROR_INVALID_PROGRAM", CONV_TYPE, API_RTC), + ), + ("NVRTC_ERROR_COMPILATION", ("HIPRTC_ERROR_COMPILATION", CONV_TYPE, API_RTC)), + ( + "NVRTC_ERROR_BUILTIN_OPERATION_FAILURE", + ("HIPRTC_ERROR_BUILTIN_OPERATION_FAILURE", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", + ("HIPRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID", + ("HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INTERNAL_ERROR", + ("HIPRTC_ERROR_INTERNAL_ERROR", CONV_TYPE, API_RTC), + ), + ("nvrtcGetErrorString", ("hiprtcGetErrorString", CONV_JIT, API_RTC)), + ("nvrtcVersion", ("hiprtcVersion", CONV_JIT, API_RTC)), + ("nvrtcProgram", ("hiprtcProgram", CONV_TYPE, API_RTC)), + ("nvrtcAddNameExpression", ("hiprtcAddNameExpression", CONV_JIT, API_RTC)), + ("nvrtcCompileProgram", ("hiprtcCompileProgram", CONV_JIT, API_RTC)), + ("nvrtcCreateProgram", ("hiprtcCreateProgram", CONV_JIT, API_RTC)), + ("nvrtcDestroyProgram", ("hiprtcDestroyProgram", CONV_JIT, API_RTC)), + ("nvrtcGetLoweredName", ("hiprtcGetLoweredName", CONV_JIT, API_RTC)), + ("nvrtcGetProgramLog", ("hiprtcGetProgramLog", CONV_JIT, API_RTC)), + ("nvrtcGetProgramLogSize", ("hiprtcGetProgramLogSize", CONV_JIT, API_RTC)), + ("nvrtcGetPTX", ("hiprtcGetCode", CONV_JIT, API_RTC)), + ("nvrtcGetPTXSize", ("hiprtcGetCodeSize", CONV_JIT, API_RTC)), + ("thrust::cuda", ("thrust::hip", CONV_MATH_FUNC, API_BLAS)), + ( + "cudaCpuDeviceId", + ("hipCpuDeviceId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + # The caffe2 directory does a string match; pytorch does a word-boundary match. + # Patterns such as 'cub::' will not match for pytorch. + # We list all current uses of cub symbols for this reason. + ("cub::", ("hipcub::", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgMax", ("hipcub::ArgMax", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgMin", ("hipcub::ArgMin", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BLOCK_REDUCE_WARP_REDUCTIONS", ("hipcub::BLOCK_REDUCE_WARP_REDUCTIONS", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BlockReduce", ("hipcub::BlockReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceReduce", ("hipcub::DeviceReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceRunLengthEncode", ("hipcub::DeviceRunLengthEncode", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceScan", ("hipcub::DeviceScan", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSegmentedRadixSort", ("hipcub::DeviceSegmentedRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSegmentedReduce", ("hipcub::DeviceSegmentedReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSelect", ("hipcub::DeviceSelect", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::KeyValuePair", ("hipcub::KeyValuePair", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)), + ("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)), + ("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)), + ("nvtxRangePop", ("roctxRangePop", CONV_OTHER, API_ROCTX)), + ("nvtxRangeStartA", ("roctxRangeStartA", CONV_OTHER, API_ROCTX)), + ("nvtxRangeEnd", ("roctxRangeStop", CONV_OTHER, API_ROCTX)), + ("nvmlReturn_t", ("rsmi_status_t", CONV_OTHER, API_ROCMSMI)), + ("NVML_SUCCESS", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_P2P_CAPS_INDEX_READ", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_P2P_STATUS_OK", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_ERROR_INSUFFICIENT_SIZE", ("RSMI_STATUS_INSUFFICIENT_SIZE", CONV_OTHER, API_ROCMSMI)), + ("nvmlDevice_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)), + ("nvmlGpuP2PStatus_t", ("bool", CONV_OTHER, API_ROCMSMI)), + ("nvmlProcessInfo_t", ("rsmi_process_info_t", CONV_OTHER, API_ROCMSMI)), + ("nvmlGpuP2PCapsIndex_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)), + ] +) + +CUDA_SPECIAL_MAP = collections.OrderedDict( + [ + # SPARSE + ("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cuComplex", ("hipComplex", CONV_TYPE, API_SPECIAL)), + ("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_SPECIAL)), + ( + "CUSPARSE_POINTER_MODE_HOST", + ("HIPSPARSE_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPECIAL)), + ( + "cusparseCreateMatDescr", + ("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseDestroyMatDescr", + ("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDiagType_t", ("hipsparseDiagType_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_DIAG_TYPE_UNIT", ("HIPSPARSE_DIAG_TYPE_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_DIAG_TYPE_NON_UNIT", ("HIPSPARSE_DIAG_TYPE_NON_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSetMatDiagType", ("hipsparseSetMatDiagType", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseFillMode_t", ("hipsparseFillMode_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_FILL_MODE_UPPER", ("HIPSPARSE_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_FILL_MODE_LOWER", ("HIPSPARSE_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSetMatFillMode", ("hipsparseSetMatFillMode", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDirection_t", ("hipsparseDirection_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_DIRECTION_ROW", ("HIPSPARSE_DIRECTION_ROW", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_DIRECTION_COLUMN", ("HIPSPARSE_DIRECTION_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSolvePolicy_t", ("hipsparseSolvePolicy_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_SOLVE_POLICY_NO_LEVEL", ("HIPSPARSE_SOLVE_POLICY_NO_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SOLVE_POLICY_USE_LEVEL", ("HIPSPARSE_SOLVE_POLICY_USE_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseCreateBsrsv2Info", ("hipsparseCreateBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateBsrsm2Info", ("hipsparseCreateBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyBsrsv2Info", ("hipsparseDestroyBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyBsrsm2Info", ("hipsparseDestroyBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrmm", ("hipsparseSbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrmm", ("hipsparseDbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrmm", ("hipsparseCbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrmm", ("hipsparseZbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrmv", ("hipsparseSbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrmv", ("hipsparseDbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrmv", ("hipsparseCbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrmv", ("hipsparseZbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_bufferSize", ("hipsparseSbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_bufferSize", ("hipsparseDbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_bufferSize", ("hipsparseCbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_bufferSize", ("hipsparseZbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_analysis", ("hipsparseSbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_analysis", ("hipsparseDbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_analysis", ("hipsparseCbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_analysis", ("hipsparseZbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_solve", ("hipsparseSbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_solve", ("hipsparseDbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_solve", ("hipsparseCbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_solve", ("hipsparseZbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_bufferSize", ("hipsparseSbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_bufferSize", ("hipsparseDbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_bufferSize", ("hipsparseCbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_bufferSize", ("hipsparseZbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_analysis", ("hipsparseSbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_analysis", ("hipsparseDbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_analysis", ("hipsparseCbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_analysis", ("hipsparseZbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_solve", ("hipsparseSbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_solve", ("hipsparseDbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_solve", ("hipsparseCbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_solve", ("hipsparseZbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrmm2", ("hipsparseCcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrmm2", ("hipsparseZcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseXcsrsort_bufferSizeExt", + ("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseCreateCsrgemm2Info", ("hipsparseCreateCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseDestroyCsrgemm2Info", + ("hipsparseDestroyCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseXcsrgemm2Nnz", ("hipsparseXcsrgemm2Nnz", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgemm2_bufferSizeExt", ("hipsparseDcsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgemm2_bufferSizeExt", ("hipsparseScsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgemm2", ("hipsparseDcsrgemm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgemm2", ("hipsparseScsrgemm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSetPointerMode", ("hipsparseSetPointerMode", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcsrgeam2Nnz", ("hipsparseXcsrgeam2Nnz", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgeam2_bufferSizeExt", ("hipsparseScsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgeam2_bufferSizeExt", ("hipsparseDcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrgeam2_bufferSizeExt", ("hipsparseCcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrgeam2_bufferSizeExt", ("hipsparseZcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgeam2", ("hipsparseScsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgeam2", ("hipsparseDcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrgeam2", ("hipsparseCcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrgeam2", ("hipsparseZcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXbsrsm2_zeroPivot", ("hipsparseXbsrsm2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXbsrsv2_zeroPivot", ("hipsparseXbsrsv2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseXcoosort_bufferSizeExt", + ("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL), + ), + ( + "cusparseXcoosortByRow", + ("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseCreateIdentityPermutation", + ("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPECIAL), + ), + ( + "cusparseSetMatIndexBase", + ("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMV", ("hipsparseSpMV", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMV_bufferSize", ("hipsparseSpMV_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMM", ("hipsparseSpMM", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMM_bufferSize", ("hipsparseSpMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateDnMat", ("hipsparseCreateDnMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCsrSetStridedBatch", ("hipsparseCsrSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateDnVec", ("hipsparseCreateDnVec", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyDnMat", ("hipsparseDestroyDnMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyDnVec", ("hipsparseDestroyDnVec", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroySpMat", ("hipsparseDestroySpMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_destroyDescr", ("hipsparseSpGEMM_destroyDescr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCoo", ("hipsparseCreateCoo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_createDescr", ("hipsparseSpGEMM_createDescr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_copy", ("hipsparseSpGEMM_copy", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM_bufferSize", ("hipsparseSDDMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM_preprocess", ("hipsparseSDDMM_preprocess", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM", ("hipsparseSDDMM", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_compute", ("hipsparseSpGEMM_compute", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_workEstimation", ("hipsparseSpGEMM_workEstimation", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMatGetSize", ("hipsparseSpMatGetSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCsrSetPointers", ("hipsparseCsrSetPointers", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMVAlg_t", ("hipsparseSpMVAlg_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpMMAlg_t", ("hipsparseSpMMAlg_t", CONV_TYPE, API_SPECIAL)), + ("cusparseIndexType_t", ("hipsparseIndexType_t", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseMatDescr", ("hipsparseMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseDnMatDescr", ("hipsparseDnMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseDnVecDescr", ("hipsparseDnVecDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseSpMatDescr", ("hipsparseSpMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseSpGEMMDescr", ("hipsparseSpGEMMDescr", CONV_TYPE, API_SPECIAL)), + ("cusparseDnMatDescr_t", ("hipsparseDnMatDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseDnVecDescr_t", ("hipsparseDnVecDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpMatDescr_t", ("hipsparseSpMatDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpGEMMDescr_t", ("hipsparseSpGEMMDescr_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_INDEX_32I", ("HIPSPARSE_INDEX_32I", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_INDEX_64I", ("HIPSPARSE_INDEX_64I", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_ORDER_COL", ("HIPSPARSE_ORDER_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_MV_ALG_DEFAULT", ("HIPSPARSE_MV_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_MM_ALG_DEFAULT", ("HIPSPARSE_MM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_COO_ALG1", ("HIPSPARSE_SPMM_COO_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_COO_ALG2", ("HIPSPARSE_SPMM_COO_ALG2", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_COOMV_ALG", ("HIPSPARSE_COOMV_ALG", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_CSR_ALG1", ("HIPSPARSE_CSRMM_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPGEMM_DEFAULT", ("HIPSPARSE_SPGEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SDDMM_ALG_DEFAULT", ("HIPSPARSE_SDDMM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ( + "CUSPARSE_STATUS_SUCCESS", + ("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_NOT_INITIALIZED", + ("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_ALLOC_FAILED", + ("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_INVALID_VALUE", + ("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_MAPPING_ERROR", + ("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_EXECUTION_FAILED", + ("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_INTERNAL_ERROR", + ("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", + ( + "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", + CONV_NUMERIC_LITERAL, + API_SPECIAL, + ), + ), + ( + "CUSPARSE_STATUS_ARCH_MISMATCH", + ("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_ZERO_PIVOT", + ("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_TRANSPOSE", + ("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_NON_TRANSPOSE", + ("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE", + ( + "HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE", + CONV_NUMERIC_LITERAL, + API_SPECIAL, + ), + ), + ( + "CUSPARSE_INDEX_BASE_ZERO", + ("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_INDEX_BASE_ONE", + ("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_MATRIX_TYPE_GENERAL", + ("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + # SOLVER + ("cublasOperation_t", ("hipsolverOperation_t", CONV_TYPE, API_SPECIAL)), + ("CUBLAS_OP_N", ("HIPSOLVER_OP_N", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ( + "CUBLAS_OP_T", + ("HIPSOLVER_OP_T", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUBLAS_OP_C", + ("HIPSOLVER_OP_C", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cublasFillMode_t", ("hipsolverFillMode_t", CONV_TYPE, API_SPECIAL)), + ( + "CUBLAS_FILL_MODE_LOWER", + ("HIPSOLVER_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUBLAS_FILL_MODE_UPPER", + ("HIPSOLVER_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cublasSideMode_t", ("hipsolverSideMode_t", CONV_TYPE, API_SPECIAL)), + ("CUBLAS_SIDE_LEFT", ("HIPSOLVER_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUBLAS_SIDE_RIGHT", ("HIPSOLVER_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + + ("cusolverEigMode_t", ("hipsolverEigMode_t", CONV_TYPE, API_SPECIAL)), + ("CUSOLVER_EIG_MODE_VECTOR", ("HIPSOLVER_EIG_MODE_VECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSOLVER_EIG_MODE_NOVECTOR", ("HIPSOLVER_EIG_MODE_NOVECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)), + + ("syevjInfo_t", ("hipsolverSyevjInfo_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreateSyevjInfo", ("hipsolverDnCreateSyevjInfo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnXsyevjSetSortEig", ("hipsolverDnXsyevjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroySyevjInfo", ("hipsolverDnDestroySyevjInfo", CONV_MATH_FUNC, API_SPECIAL)), + + ("gesvdjInfo_t", ("hipsolverGesvdjInfo_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreateGesvdjInfo", ("hipsolverDnCreateGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnXgesvdjSetSortEig", ("hipsolverDnXgesvdjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroyGesvdjInfo", ("hipsolverDnDestroyGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)), + + ("cusolverDnHandle_t", ("hipsolverDnHandle_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreate", ("hipsolverDnCreate", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnSetStream", ("hipsolverDnSetStream", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroy", ("hipsolverDnDestroy", CONV_MATH_FUNC, API_SPECIAL)), + + # from aten/src/ATen/native/hip/linalg/HIPSolver.cpp + ('cusolverDnParams_t', ('hipsolverDnParams_t', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgeqrf', ('hipsolverDnCgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgeqrf_bufferSize', ('hipsolverDnCgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvd', ('hipsolverDnCgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvd_bufferSize', ('hipsolverDnCgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdj', ('hipsolverDnCgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdjBatched', ('hipsolverDnCgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdjBatched_bufferSize', ('hipsolverDnCgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdj_bufferSize', ('hipsolverDnCgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrf', ('hipsolverDnCgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrf_bufferSize', ('hipsolverDnCgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrs', ('hipsolverDnCgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevd', ('hipsolverDnCheevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevd_bufferSize', ('hipsolverDnCheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevj', ('hipsolverDnCheevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevjBatched', ('hipsolverDnCheevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevjBatched_bufferSize', ('hipsolverDnCheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevj_bufferSize', ('hipsolverDnCheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrf', ('hipsolverDnCpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrfBatched', ('hipsolverDnCpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrf_bufferSize', ('hipsolverDnCpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrs', ('hipsolverDnCpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrsBatched', ('hipsolverDnCpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCungqr', ('hipsolverDnCungqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCungqr_bufferSize', ('hipsolverDnCungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCunmqr', ('hipsolverDnCunmqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCunmqr_bufferSize', ('hipsolverDnCunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgeqrf', ('hipsolverDnDgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgeqrf_bufferSize', ('hipsolverDnDgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvd', ('hipsolverDnDgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvd_bufferSize', ('hipsolverDnDgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdj', ('hipsolverDnDgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdjBatched', ('hipsolverDnDgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdjBatched_bufferSize', ('hipsolverDnDgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdj_bufferSize', ('hipsolverDnDgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrf', ('hipsolverDnDgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrf_bufferSize', ('hipsolverDnDgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrs', ('hipsolverDnDgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDorgqr', ('hipsolverDnDorgqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDorgqr_bufferSize', ('hipsolverDnDorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDormqr', ('hipsolverDnDormqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDormqr_bufferSize', ('hipsolverDnDormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrf', ('hipsolverDnDpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrfBatched', ('hipsolverDnDpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrf_bufferSize', ('hipsolverDnDpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrs', ('hipsolverDnDpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrsBatched', ('hipsolverDnDpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevd', ('hipsolverDnDsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevd_bufferSize', ('hipsolverDnDsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevj', ('hipsolverDnDsyevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevjBatched', ('hipsolverDnDsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevjBatched_bufferSize', ('hipsolverDnDsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevj_bufferSize', ('hipsolverDnDsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgeqrf', ('hipsolverDnSgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgeqrf_bufferSize', ('hipsolverDnSgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvd', ('hipsolverDnSgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvd_bufferSize', ('hipsolverDnSgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdj', ('hipsolverDnSgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdjBatched', ('hipsolverDnSgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdjBatched_bufferSize', ('hipsolverDnSgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdj_bufferSize', ('hipsolverDnSgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrf', ('hipsolverDnSgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrf_bufferSize', ('hipsolverDnSgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrs', ('hipsolverDnSgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSorgqr', ('hipsolverDnSorgqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSorgqr_bufferSize', ('hipsolverDnSorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSormqr', ('hipsolverDnSormqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSormqr_bufferSize', ('hipsolverDnSormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrf', ('hipsolverDnSpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrfBatched', ('hipsolverDnSpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrf_bufferSize', ('hipsolverDnSpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrs', ('hipsolverDnSpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrsBatched', ('hipsolverDnSpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevd', ('hipsolverDnSsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevd_bufferSize', ('hipsolverDnSsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevj', ('hipsolverDnSsyevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevjBatched', ('hipsolverDnSsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevjBatched_bufferSize', ('hipsolverDnSsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevj_bufferSize', ('hipsolverDnSsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgeqrf', ('hipsolverDnXgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgeqrf_bufferSize', ('hipsolverDnXgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrf', ('hipsolverDnXpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrf_bufferSize', ('hipsolverDnXpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrs', ('hipsolverDnXpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXsyevd', ('hipsolverDnXsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXsyevd_bufferSize', ('hipsolverDnXsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgeqrf', ('hipsolverDnZgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgeqrf_bufferSize', ('hipsolverDnZgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvd', ('hipsolverDnZgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvd_bufferSize', ('hipsolverDnZgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdj', ('hipsolverDnZgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdjBatched', ('hipsolverDnZgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdjBatched_bufferSize', ('hipsolverDnZgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdj_bufferSize', ('hipsolverDnZgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrf', ('hipsolverDnZgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrf_bufferSize', ('hipsolverDnZgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrs', ('hipsolverDnZgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevd', ('hipsolverDnZheevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevd_bufferSize', ('hipsolverDnZheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevj', ('hipsolverDnZheevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevjBatched', ('hipsolverDnZheevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevjBatched_bufferSize', ('hipsolverDnZheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevj_bufferSize', ('hipsolverDnZheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrf', ('hipsolverDnZpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrfBatched', ('hipsolverDnZpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrf_bufferSize', ('hipsolverDnZpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrs', ('hipsolverDnZpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrsBatched', ('hipsolverDnZpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZungqr', ('hipsolverDnZungqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZungqr_bufferSize', ('hipsolverDnZungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZunmqr', ('hipsolverDnZunmqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZunmqr_bufferSize', ('hipsolverDnZunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + + # sytrf + ('cusolverDnDsytrf_bufferSize', ('hipsolverDnDsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsytrf_bufferSize', ('hipsolverDnSsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZsytrf_bufferSize', ('hipsolverDnZsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCsytrf_bufferSize', ('hipsolverDnCsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsytrf', ('hipsolverDnDsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsytrf', ('hipsolverDnSsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZsytrf', ('hipsolverDnZsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCsytrf', ('hipsolverDnCsytrf', CONV_MATH_FUNC, API_SPECIAL)), + + # gesdva strided + ( + 'cusolverDnSgesvdaStridedBatched_bufferSize', + ('hipsolverDnSgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnDgesvdaStridedBatched_bufferSize', + ('hipsolverDnDgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnCgesvdaStridedBatched_bufferSize', + ('hipsolverDnCgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnZgesvdaStridedBatched_bufferSize', + ('hipsolverDnZgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ('cusolverDnSgesvdaStridedBatched', ('hipsolverDnSgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdaStridedBatched', ('hipsolverDnDgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdaStridedBatched', ('hipsolverDnCgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdaStridedBatched', ('hipsolverDnZgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + + # gesvdj SetXXX + ('cusolverDnXgesvdjSetTolerance', ('hipsolverDnXgesvdjSetTolerance', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgesvdjSetMaxSweeps', ('hipsolverDnXgesvdjSetMaxSweeps', CONV_MATH_FUNC, API_SPECIAL)), + ] +) + +PYTORCH_SPECIFIC_MAPPINGS = collections.OrderedDict( + [ + ("USE_CUDA", ("USE_ROCM", API_PYTORCH)), + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)), + ("cudaHostAllocator", ("hipHostAllocator", API_PYTORCH)), + ("cudaDeviceAllocator", ("hipDeviceAllocator", API_PYTORCH)), + ("define MAX_NUM_BLOCKS 200", ("define MAX_NUM_BLOCKS 64", API_PYTORCH)), + ("cuda::CUDAGuard", ("hip::HIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ("CUDAGuard", ("HIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::OptionalCUDAGuard", + ("hip::OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ("OptionalCUDAGuard", ("OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::CUDAStreamGuard", + ("hip::HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ("CUDAStreamGuard", ("HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::OptionalCUDAStreamGuard", + ("hip::OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "OptionalCUDAStreamGuard", + ("OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::CUDAMultiStreamGuard", + ("hip::HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "CUDAMultiStreamGuard", + ("HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + # Only get needs to be transformed this way; all the other ones can go + # straight to the normal versions hip::HIPCachingAllocator + ( + "cuda::CUDACachingAllocator::get", + ("hip::HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), + ), + ( + "CUDACachingAllocator::get", + ("HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), + ), + ( + "cuda::CUDACachingAllocator::recordStream", + ( + "hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "CUDACachingAllocator::recordStream", + ( + "HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "cuda::CUDAAllocator::recordStream", + ( + "hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "CUDAAllocator::recordStream", + ( + "HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ("cuda::CUDAStream", ("hip::HIPStreamMasqueradingAsCUDA", API_PYTORCH)), + ("CUDAStream", ("HIPStreamMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getStreamFromPool", + ("hip::getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH), + ), + ("getStreamFromPool", ("getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getDefaultCUDAStream", + ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::getStreamFromExternal", + ("hip::getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH), + ), + ("getStreamFromExternal", ("getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getDefaultCUDAStream", + ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "getDefaultCUDAStream", + ("getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::getCurrentCUDAStream", + ("hip::getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "getCurrentCUDAStream", + ("getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::setCurrentCUDAStream", + ("hip::setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "setCurrentCUDAStream", + ("setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "ATen/cudnn/Handle.h", + ("ATen/miopen/Handle.h", API_PYTORCH), + ), + # TODO: Undo this special-case; see the header for motivation behind this + # hack. It's VERY important this is only applied to PyTorch HIPify. + ( + "c10/cuda/CUDAGuard.h", + ("ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h", API_PYTORCH), + ), + ( + "c10/cuda/CUDACachingAllocator.h", + ("ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h", API_PYTORCH), + ), + ( + "c10/cuda/CUDAStream.h", + ("ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h", API_PYTORCH), + ), + ("gloo/cuda.h", ("gloo/hip.h", API_PYTORCH)), + ( + "gloo/cuda_allreduce_halving_doubling.h", + ("gloo/hip_allreduce_halving_doubling.h", API_PYTORCH), + ), + ( + "gloo/cuda_allreduce_halving_doubling_pipelined.h", + ("gloo/hip_allreduce_halving_doubling_pipelined.h", API_PYTORCH), + ), + ("gloo/cuda_allreduce_ring.h", ("gloo/hip_allreduce_ring.h", API_PYTORCH)), + ( + "gloo/cuda_broadcast_one_to_all.h", + ("gloo/hip_broadcast_one_to_all.h", API_PYTORCH), + ), + ( + "gloo::CudaAllreduceHalvingDoublingPipelined", + ("gloo::HipAllreduceHalvingDoublingPipelined", API_PYTORCH), + ), + ("gloo::CudaBroadcastOneToAll", ("gloo::HipBroadcastOneToAll", API_PYTORCH)), + ("gloo::CudaHostWorkspace", ("gloo::HipHostWorkspace", API_PYTORCH)), + ("gloo::CudaDeviceWorkspace", ("gloo::HipDeviceWorkspace", API_PYTORCH)), + ("CUDNN_RNN_RELU", ("miopenRNNRELU", API_PYTORCH)), + ("CUDNN_RNN_TANH", ("miopenRNNTANH", API_PYTORCH)), + ("CUDNN_LSTM", ("miopenLSTM", API_PYTORCH)), + ("CUDNN_GRU", ("miopenGRU", API_PYTORCH)), + ("cudnnRNNMode_t", ("miopenRNNMode_t", API_PYTORCH)), + ("magma_queue_create_from_cuda", ("magma_queue_create_from_hip", API_PYTORCH)), + ] +) + +CAFFE2_SPECIFIC_MAPPINGS = collections.OrderedDict( + [ + ("cuda_stream", ("hip_stream", API_CAFFE2)), + # if the header is a native hip folder (under hip directory), + # there is no need to add a hip path to it; the trie in hipify script + # takes this mapping order to forbid further replacement + ("/hip/", ("/hip/", API_CAFFE2)), + ("/context_gpu", ("/hip/context_gpu", API_CAFFE2)), + ("/common_gpu", ("/hip/common_gpu", API_CAFFE2)), + ("/cuda_nccl_gpu", ("/hip/hip_nccl_gpu", API_CAFFE2)), + ("/mixed_utils", ("/hip/mixed_utils", API_CAFFE2)), + ("/operator_fallback_gpu", ("/hip/operator_fallback_gpu", API_CAFFE2)), + ( + "/spatial_batch_norm_op_impl", + ("/hip/spatial_batch_norm_op_impl", API_CAFFE2), + ), + ( + "/recurrent_network_executor_gpu", + ("/hip/recurrent_network_executor_gpu", API_CAFFE2), + ), + ( + "/generate_proposals_op_util_nms_gpu", + ("/hip/generate_proposals_op_util_nms_gpu", API_CAFFE2), + ), + ("/max_pool_with_index_gpu", ("/hip/max_pool_with_index_gpu", API_CAFFE2)), + ("/THCCachingAllocator_gpu", ("/hip/THCCachingAllocator_gpu", API_CAFFE2)), + ("/top_k_heap_selection", ("/hip/top_k_heap_selection", API_CAFFE2)), + ("/top_k_radix_selection", ("/hip/top_k_radix_selection", API_CAFFE2)), + ("/GpuAtomics", ("/hip/GpuAtomics", API_CAFFE2)), + ("/GpuDefs", ("/hip/GpuDefs", API_CAFFE2)), + ("/GpuScanUtils", ("/hip/GpuScanUtils", API_CAFFE2)), + ("/GpuBitonicSort", ("/hip/GpuBitonicSort", API_CAFFE2)), + ("/math/reduce.cuh", ("/math/hip/reduce.cuh", API_CAFFE2)), + ("/sgd/adagrad_fused_op_gpu.cuh", ("/sgd/hip/adagrad_fused_op_gpu.cuh", API_CAFFE2)), + ("/operators/segment_reduction_op_gpu.cuh", ("/operators/hip/segment_reduction_op_gpu.cuh", API_CAFFE2)), + ("/gather_op.cuh", ("/hip/gather_op.cuh", API_CAFFE2)), + ("caffe2/core/common_cudnn.h", ("caffe2/core/hip/common_miopen.h", API_CAFFE2)), + ("REGISTER_CUDA_OPERATOR", ("REGISTER_HIP_OPERATOR", API_CAFFE2)), + ("CUDA_1D_KERNEL_LOOP", ("HIP_1D_KERNEL_LOOP", API_CAFFE2)), + ("CUDAContext", ("HIPContext", API_CAFFE2)), + ("CAFFE_CUDA_NUM_THREADS", ("CAFFE_HIP_NUM_THREADS", API_CAFFE2)), + ("HasCudaGPU", ("HasHipGPU", API_CAFFE2)), + ("__expf", ("expf", API_CAFFE2)), + ("CUBLAS_ENFORCE", ("HIPBLAS_ENFORCE", API_CAFFE2)), + ("CUBLAS_CHECK", ("HIPBLAS_CHECK", API_CAFFE2)), + ("cublas_handle", ("hipblas_handle", API_CAFFE2)), + ("CURAND_ENFORCE", ("HIPRAND_ENFORCE", API_CAFFE2)), + ("CURAND_CHECK", ("HIPRAND_CHECK", API_CAFFE2)), + ("curandGenerateUniform", ("hiprandGenerateUniform", API_CAFFE2)), + ("curand_generator", ("hiprand_generator", API_CAFFE2)), + ("CaffeCudaGetDevice", ("CaffeHipGetDevice", API_CAFFE2)), + # do not rename CUDA_KERNEL_ASSERT, lazyInitCUDA in caffe2 sources + # the ordered dict guarantees this pattern will match first, before "CUDA" + ("CUDA_KERNEL_ASSERT", ("CUDA_KERNEL_ASSERT", API_CAFFE2)), + ("lazyInitCUDA", ("lazyInitCUDA", API_CAFFE2)), + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_CAFFE2)), + ("CUDA", ("HIP", API_CAFFE2)), + ("Cuda", ("Hip", API_CAFFE2)), + ("cuda_", ("hip_", API_CAFFE2)), + ("_cuda", ("_hip", API_CAFFE2)), + ("CUDNN", ("MIOPEN", API_CAFFE2)), + ("CuDNN", ("MIOPEN", API_CAFFE2)), + ("cudnn", ("miopen", API_CAFFE2)), + ("namespace cuda", ("namespace hip", API_CAFFE2)), + ("cuda::CUDAGuard", ("hip::HIPGuard", API_CAFFE2)), + ("cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuard", API_CAFFE2)), + ("cuda::CUDAStreamGuard", ("hip::HIPStreamGuard", API_CAFFE2)), + ("cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuard", API_CAFFE2)), + ("c10/cuda/CUDAGuard.h", ("c10/hip/HIPGuard.h", API_CAFFE2)), + ("gloo/cuda", ("gloo/hip", API_CAFFE2)), + ] +) + +# We must tread very carefully here. Blanket conversions like are done +# in CAFFE2_SPECIFIC_MAPPINGS are not presently supported on PyTorch, +# because a regex for CUDA will also match a filename like CUDAGuard.h, +# but the HIPIFY script doesn't presently move the file and so the substitution +# will be invalid. Instead, we specifically list out every identifier +# and file from c10/cuda which may be used externally, and do substitutions this +# way. +# +# NB: if you want a transformation to ONLY apply to the c10/ directory, +# put it as API_CAFFE2 +C10_MAPPINGS = collections.OrderedDict( + [ + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)), + ("cuda::compat::", ("hip::compat::", API_C10)), + ("c10/cuda/CUDAAlgorithm.h", ("c10/hip/HIPAlgorithm.h", API_C10)), + ("c10/cuda/CUDADeviceAssertion.h", ("c10/hip/HIPDeviceAssertion.h", API_C10)), + ("c10/cuda/CUDADeviceAssertionHost.h", ("c10/hip/HIPDeviceAssertionHost.h", API_C10)), + ("c10/cuda/CUDAException.h", ("c10/hip/HIPException.h", API_C10)), + ("c10/cuda/CUDAMacros.h", ("c10/hip/HIPMacros.h", API_C10)), + ("c10/cuda/CUDAMathCompat.h", ("c10/hip/HIPMathCompat.h", API_C10)), + ("c10/cuda/CUDAFunctions.h", ("c10/hip/HIPFunctions.h", API_C10)), + ("c10/cuda/CUDAMiscFunctions.h", ("c10/hip/HIPMiscFunctions.h", API_C10)), + ("c10/cuda/CUDAStream.h", ("c10/hip/HIPStream.h", API_C10)), + ("c10/cuda/CUDAGraphsC10Utils.h", ("c10/hip/HIPGraphsC10Utils.h", API_C10)), + ("c10/cuda/CUDAAllocatorConfig.h", ("c10/hip/HIPAllocatorConfig.h", API_C10)), + ("c10/cuda/CUDACachingAllocator.h", ("c10/hip/HIPCachingAllocator.h", API_C10)), + ("c10/cuda/impl/CUDATest.h", ("c10/hip/impl/HIPTest.h", API_C10)), + ("c10/cuda/impl/CUDAGuardImpl.h", ("c10/hip/impl/HIPGuardImpl.h", API_C10)), + ( + "c10/cuda/impl/cuda_cmake_macros.h", + ("c10/hip/impl/hip_cmake_macros.h", API_C10), + ), + ("C10_CUDA_CHECK", ("C10_HIP_CHECK", API_C10)), + ("C10_CUDA_CHECK_WARN", ("C10_HIP_CHECK_WARN", API_C10)), + ("C10_CUDA_ERROR_HANDLED", ("C10_HIP_ERROR_HANDLED", API_C10)), + ("C10_CUDA_IGNORE_ERROR", ("C10_HIP_IGNORE_ERROR", API_C10)), + ("C10_CUDA_CLEAR_ERROR", ("C10_HIP_CLEAR_ERROR", API_C10)), + ("c10::cuda", ("c10::hip", API_C10)), + ("cuda::CUDAStream", ("hip::HIPStream", API_C10)), + ("CUDAStream", ("HIPStream", API_C10)), + # This substitution is not permissible, because there's another copy of this + # function in torch/cuda.h + # ("cuda::device_count", ("hip::device_count", API_C10)), + ("cuda::current_device", ("hip::current_device", API_C10)), + ("cuda::set_device", ("hip::set_device", API_C10)), + ("cuda::device_synchronize", ("hip::device_synchronize", API_C10)), + ("cuda::getStreamFromPool", ("hip::getStreamFromPool", API_C10)), + ("getStreamFromPool", ("getStreamFromPool", API_C10)), + ("cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStream", API_C10)), + ("getDefaultCUDAStream", ("getDefaultHIPStream", API_C10)), + ("cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStream", API_C10)), + ("getCurrentCUDAStream", ("getCurrentHIPStream", API_C10)), + ("cuda::get_cuda_check_prefix", ("hip::get_cuda_check_prefix", API_C10)), + ("cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStream", API_C10)), + ("setCurrentCUDAStream", ("setCurrentHIPStream", API_C10)), + ("cuda::CUDACachingAllocator", ("hip::HIPCachingAllocator", API_C10)), + ("CUDACachingAllocator", ("HIPCachingAllocator", API_C10)), + ("cuda::CUDAAllocatorConfig", ("hip::HIPAllocatorConfig", API_C10)), + ("CUDAAllocatorConfig", ("HIPAllocatorConfig", API_C10)), + ("pinned_use_cuda_host_register", ("pinned_use_hip_host_register", API_C10)), + ("c10::cuda::CUDAAllocator", ("c10::hip::HIPAllocator", API_C10)), + ("cuda::CUDAAllocator", ("hip::HIPAllocator", API_C10)), + ("CUDAAllocator", ("HIPAllocator", API_C10)), + ("C10_CUDA_KERNEL_LAUNCH_CHECK", ("C10_HIP_KERNEL_LAUNCH_CHECK", API_C10)) + ] +) + +# NB: C10 mappings are more specific than Caffe2 mappings, so run them +# first +CUDA_TO_HIP_MAPPINGS = [ + CUDA_IDENTIFIER_MAP, + CUDA_TYPE_NAME_MAP, + CUDA_INCLUDE_MAP, + CUDA_SPECIAL_MAP, + C10_MAPPINGS, + PYTORCH_SPECIFIC_MAPPINGS, + CAFFE2_SPECIFIC_MAPPINGS, +] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py new file mode 100644 index 0000000000000000000000000000000000000000..4d7bf7233cfccce21f37a3b7320693df51c9b931 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py @@ -0,0 +1,1129 @@ +#!/usr/bin/env python3 +""" The Python Hipify script. +## +# Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. +# 2017-2018 Advanced Micro Devices, Inc. and +# Facebook Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +""" +import argparse +import fnmatch +import re +import shutil +import sys +import os + +from . import constants +from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS +from .cuda_to_hip_mappings import MATH_TRANSPILATIONS + +from typing import Dict, List, Iterator, Optional +from collections.abc import Mapping, Iterable +from enum import Enum + +class CurrentState(Enum): + INITIALIZED = 1 + DONE = 2 + +class HipifyResult: + def __init__(self, current_state, hipified_path): + self.current_state = current_state + self.hipified_path = hipified_path + self.status = "" + + def __str__(self): + return ("HipifyResult:: current_state: {}, hipified_path : {}, status: {}".format(self.current_state, + self.hipified_path, self.status)) + +HipifyFinalResult = Dict[str, HipifyResult] +HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n" +HIPIFY_FINAL_RESULT: HipifyFinalResult = {} + +# Hardcode the PyTorch template map +"""This dictionary provides the mapping from PyTorch kernel template types +to their actual types.""" +PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"} + +__all__ = ['InputError', 'openf', 'bcolors', 'GeneratedFileCleaner', 'match_extensions', 'matched_files_iter', + 'preprocess_file_and_save_result', 'compute_stats', 'add_dim3', 'processKernelLaunches', 'find_closure_group', + 'find_bracket_group', 'find_parentheses_group', 'replace_math_functions', 'hip_header_magic', 'replace_extern_shared', + 'get_hip_file_path', 'is_out_of_place', 'is_pytorch_file', 'is_cusparse_file', 'is_special_file', 'is_caffe2_gpu_file', + 'is_caffe2_gpu_file', 'Trie', 'preprocessor', 'file_specific_replacement', 'file_add_header', + 'fix_static_global_kernels', 'extract_arguments', 'str2bool', 'CurrentState', 'HipifyResult', 'hipify'] + + +class InputError(Exception): + # Exception raised for errors in the input. + + def __init__(self, message): + super().__init__(message) + self.message = message + + def __str__(self): + return f"Input error: {self.message}" + + +def openf(filename, mode): + return open(filename, mode, errors='ignore') + + +# Color coding for printing +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +# To the programmer, the output of hipify most likely are intermediates. +# This class allows users of hipify to ask for a cleanup by running the +# hipify and compilation in a with instantiating this context manager class +# with keep_intermediates=False. +# The main usecase is the cpp_extensions, specifically the load method. +# It is a good idea to keep intermediates (in case of errors or to +# not recompile unchanged files), but in cases where you don't want to +# keep them (e.g. in the CI), this can be used to remove files. +class GeneratedFileCleaner: + """Context Manager to clean up generated files""" + def __init__(self, keep_intermediates=False): + self.keep_intermediates = keep_intermediates + self.files_to_clean = set() + self.dirs_to_clean = [] + + def __enter__(self): + return self + + def open(self, fn, *args, **kwargs): + if not os.path.exists(fn): + self.files_to_clean.add(os.path.abspath(fn)) + return open(fn, *args, **kwargs) + + def makedirs(self, dn, exist_ok=False): + parent, n = os.path.split(dn) + if not n: + parent, n = os.path.split(parent) + if parent and n and not os.path.exists(parent): + self.makedirs(parent, exist_ok=True) + if not os.path.isdir(dn) or not exist_ok: + os.mkdir(dn) + self.dirs_to_clean.append(os.path.abspath(dn)) + + def __exit__(self, type, value, traceback): + if not self.keep_intermediates: + for f in self.files_to_clean: + os.unlink(f) + for d in self.dirs_to_clean[::-1]: + os.rmdir(d) + + +def match_extensions(filename: str, extensions: Iterable) -> bool: + """Helper method to see if filename ends with certain extension""" + return any(filename.endswith(e) for e in extensions) + + +def _fnmatch(filepath, patterns): + return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns) + + +def matched_files_iter( + root_path: str, + includes: Iterable = (), + ignores: Iterable = (), + extensions: Iterable = (), + out_of_place_only: bool = False, + is_pytorch_extension: bool = False) -> Iterator[str]: + + exact_matches = set(includes) + + # This is a very rough heuristic; really, we want to avoid scanning + # any file which is not checked into source control, but this script + # needs to work even if you're in a Git or Hg checkout, so easier to + # just block the biggest time sinks that won't matter in the + # end. + for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True): + rel_dirpath = os.path.relpath(abs_dirpath, root_path) + if rel_dirpath == '.': + # Blah blah blah O(n) blah blah + if ".git" in dirs: + dirs.remove(".git") + if "build" in dirs: + dirs.remove("build") + if "third_party" in dirs: + dirs.remove("third_party") + dirs.append("third_party/nvfuser") + for filename in filenames: + filepath = os.path.join(abs_dirpath, filename) + rel_filepath = os.path.join(rel_dirpath, filename) + # We respect extensions, UNLESS you wrote the entire + # filename verbatim, in which case we always accept it + if ( + _fnmatch(filepath, includes) + and (not _fnmatch(filepath, ignores)) + and (match_extensions(filepath, extensions) or filepath in exact_matches) + ): + if not is_pytorch_extension: # for pytorch extensions, consider all files + if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath): + continue + if out_of_place_only and not is_out_of_place(rel_filepath): + continue + yield filepath + + +def preprocess_file_and_save_result( + output_directory: str, + filepath: str, + all_files: Iterable, + header_include_dirs: Iterable, + stats: Dict[str, List], + hip_clang_launch: bool, + is_pytorch_extension: bool, + clean_ctx: GeneratedFileCleaner, + show_progress: bool) -> None: + fin_path = os.path.abspath(os.path.join(output_directory, filepath)) + hipify_result = HipifyResult(current_state=CurrentState.INITIALIZED, hipified_path=fin_path) + HIPIFY_FINAL_RESULT[fin_path] = hipify_result + result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats, + hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) + + # Show what happened + if show_progress and "ignored" not in result.status: + print( + fin_path, "->", + result.hipified_path, result.status, flush=True) + + HIPIFY_FINAL_RESULT[fin_path] = result + + +def compute_stats(stats): + unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]} + + # Print the number of unsupported calls + print(f"Total number of unsupported CUDA function calls: {len(unsupported_calls):d}") + + # Print the list of unsupported calls + print(", ".join(unsupported_calls)) + + # Print the number of kernel launches + print(f"\nTotal number of replaced kernel launches: {len(stats['kernel_launches']):d}") + + +def add_dim3(kernel_string, cuda_kernel): + '''adds dim3() to the second and third arguments in the kernel launch''' + count = 0 + closure = 0 + kernel_string = kernel_string.replace("<<<", "").replace(">>>", "") + arg_locs: List[Dict[str, int]] = [{} for _ in range(2)] + arg_locs[count]['start'] = 0 + for ind, c in enumerate(kernel_string): + if count > 1: + break + if c == "(": + closure += 1 + elif c == ")": + closure -= 1 + if (c == "," or ind == len(kernel_string) - 1) and closure == 0: + arg_locs[count]['end'] = ind + (c != ",") + count += 1 + if count < 2: + arg_locs[count]['start'] = ind + 1 + + first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1] + second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']] + + first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ") + second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ") + + first_arg_dim3 = f"dim3({first_arg_clean})" + second_arg_dim3 = f"dim3({second_arg_clean})" + + first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3) + second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3) + cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3) + return cuda_kernel + + +RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+') + + +def processKernelLaunches(string, stats): + """ Replace the CUDA style Kernel launches with the HIP style kernel launches.""" + # Concat the namespace with the kernel names. (Find cleaner way of doing this later). + string = RE_KERNEL_LAUNCH.sub(lambda inp: f"{inp.group(1)}{inp.group(2)}::", string) + + def grab_method_and_template(in_kernel): + # The positions for relevant kernel components. + pos = { + "kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]}, + "kernel_name": {"start": -1, "end": -1}, + "template": {"start": -1, "end": -1} + } + + # Count for balancing template + count = {"<>": 0} + + # Status for whether we are parsing a certain item. + START = 0 + AT_TEMPLATE = 1 + AFTER_TEMPLATE = 2 + AT_KERNEL_NAME = 3 + + status = START + + # Parse the string character by character + for i in range(pos["kernel_launch"]["start"] - 1, -1, -1): + char = string[i] + + # Handle Templating Arguments + if status in (START, AT_TEMPLATE): + if char == ">": + if status == START: + status = AT_TEMPLATE + pos["template"]["end"] = i + count["<>"] += 1 + + if char == "<": + count["<>"] -= 1 + if count["<>"] == 0 and (status == AT_TEMPLATE): + pos["template"]["start"] = i + status = AFTER_TEMPLATE + + # Handle Kernel Name + if status != AT_TEMPLATE: + if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}: + if status != AT_KERNEL_NAME: + status = AT_KERNEL_NAME + pos["kernel_name"]["end"] = i + + # Case: Kernel name starts the string. + if i == 0: + pos["kernel_name"]["start"] = 0 + + # Finished + return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] + + else: + # Potential ending point if we're already traversing a kernel's name. + if status == AT_KERNEL_NAME: + pos["kernel_name"]["start"] = i + + # Finished + return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] + + def find_kernel_bounds(string): + """Finds the starting and ending points for all kernel launches in the string.""" + kernel_end = 0 + kernel_positions = [] + + # Continue until we cannot find any more kernels anymore. + while string.find("<<<", kernel_end) != -1: + # Get kernel starting position (starting from the previous ending point) + kernel_start = string.find("<<<", kernel_end) + + # Get kernel ending position (adjust end point past the >>>) + kernel_end = string.find(">>>", kernel_start) + 3 + if kernel_end <= 0: + raise InputError("no kernel end found") + + # Add to list of traversed kernels + kernel_positions.append({"start": kernel_start, "end": kernel_end, + "group": string[kernel_start: kernel_end]}) + + return kernel_positions + + # Replace comments and string literals from the code so that find_kernel_bounds does not + # wrongly capture kernels in comments and string literals. + # This function replaces them with "x" to keep positions. + def mask_comments(string): + in_comment = '' + prev_c = '' + new_string = '' + for c in string: + if in_comment == '': + # Outside comments + if c == '/' and prev_c == '/': + in_comment = '//' + elif c == '*' and prev_c == '/': + in_comment = '/*' + elif c == '"' and prev_c != '\\' and prev_c != "'": + in_comment = '"' + elif in_comment == '//': + # In // xxx + if c == '\r' or c == '\n': + in_comment = '' + elif in_comment == '/*': + # In /* xxx */ + if c == '/' and prev_c == '*': + in_comment = '' + elif in_comment == '"': + # In "" + if c == '"' and prev_c != '\\': + in_comment = '' + prev_c = c + if in_comment == '': + new_string += c + else: + new_string += 'x' + return new_string + + # Grab positional ranges of all kernel launches + get_kernel_positions = list(find_kernel_bounds(mask_comments(string))) + output_string = string + + # Replace each CUDA kernel with a HIP kernel. + for kernel in get_kernel_positions: + # Get kernel components + params = grab_method_and_template(kernel) + + # Find parenthesis after kernel launch + parenthesis = string.find("(", kernel["end"]) + + # Extract cuda kernel + cuda_kernel = string[params[0]["start"]:parenthesis + 1] + kernel_string = string[kernel['start']:kernel['end']] + end_param_index = 0 if params[1]['end'] == -1 else 1 + kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1] + cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel) + # Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size) + num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")"))) + + hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace( + ">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace( + ">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")") + + # Replace cuda kernel with hip kernel + output_string = output_string.replace(cuda_kernel, hip_kernel) + + # Update the statistics + stats["kernel_launches"].append(hip_kernel) + + return output_string + + +def find_closure_group(input_string, start, group): + """Generalization for finding a balancing closure group + + if group = ["(", ")"], then finds the first balanced parentheses. + if group = ["{", "}"], then finds the first balanced bracket. + + Given an input string, a starting position in the input string, and the group type, + find_closure_group returns the positions of group[0] and group[1] as a tuple. + + Example: + >>> find_closure_group("(hi)", 0, ["(", ")"]) + (0, 3) + """ + + inside_parenthesis = False + parens = 0 + pos = start + p_start, p_end = -1, -1 + + while pos < len(input_string): + if input_string[pos] == group[0]: + if inside_parenthesis is False: + inside_parenthesis = True + parens = 1 + p_start = pos + else: + parens += 1 + elif input_string[pos] == group[1] and inside_parenthesis: + parens -= 1 + + if parens == 0: + p_end = pos + return p_start, p_end + + pos += 1 + return None, None + + +def find_bracket_group(input_string, start): + """Finds the first balanced parantheses.""" + return find_closure_group(input_string, start, group=["{", "}"]) + + +def find_parentheses_group(input_string, start): + """Finds the first balanced bracket.""" + return find_closure_group(input_string, start, group=["(", ")"]) + + +RE_ASSERT = re.compile(r"\bassert[ ]*\(") + + +def replace_math_functions(input_string): + """FIXME: Temporarily replace std:: invocations of math functions + with non-std:: versions to prevent linker errors NOTE: This + can lead to correctness issues when running tests, since the + correct version of the math function (exp/expf) might not get + called. Plan is to remove this function once HIP supports + std:: math function calls inside device code + + """ + output_string = input_string + for func in MATH_TRANSPILATIONS: + output_string = output_string.replace(fr'{func}(', f'{MATH_TRANSPILATIONS[func]}(') + + return output_string + + +RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()") + + +def hip_header_magic(input_string): + """If the file makes kernel builtin calls and does not include the cuda_runtime.h header, + then automatically add an #include to match the "magic" includes provided by NVCC. + TODO: + Update logic to ignore cases where the cuda_runtime.h is included by another file. + """ + + # Copy the input. + output_string = input_string + + # Check if one of the following headers is already included. + headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"] + if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers): + return output_string + + # Rough logic to detect if we're inside device code + hasDeviceLogic: int + hasDeviceLogic = "hipLaunchKernelGGL" in output_string + hasDeviceLogic += "__global__" in output_string + hasDeviceLogic += "__shared__" in output_string + hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None + + # If device logic found, provide the necessary header. + if hasDeviceLogic: + output_string = '#include "hip/hip_runtime.h"\n' + input_string + + return output_string + + +RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;") + + +def replace_extern_shared(input_string): + """Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead. + https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__ + Example: + "extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)" + "extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)" + """ + output_string = input_string + output_string = RE_EXTERN_SHARED.sub( + lambda inp: f"HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})", output_string) + + return output_string + + +def get_hip_file_path(rel_filepath, is_pytorch_extension=False): + """ + Returns the new name of the hipified file + """ + # At the moment, some PyTorch source files are HIPified in place. The predicate + # is_out_of_place tells us if this is the case or not. + assert not os.path.isabs(rel_filepath) + if not is_pytorch_extension and not is_out_of_place(rel_filepath): + return rel_filepath + + dirpath, filename = os.path.split(rel_filepath) + root, ext = os.path.splitext(filename) + + # Here's the plan: + # + # In general, we need to disambiguate the HIPified filename so that + # it gets a different name from the original filename, so + # that we don't overwrite the original file + # + # There's a lot of different naming conventions across PyTorch + # and Caffe2, but the general recipe is to convert occurrences + # of cuda/gpu to hip, and add hip if there are no occurrences + # of cuda/gpu anywhere. + # + # Concretely, we do the following: + # + # - If there is a directory component named "cuda", replace + # it with "hip", AND + # + # - If the file name contains "CUDA", replace it with "HIP", AND + # + # - ALWAYS replace '.cu' with '.hip', because those files + # contain CUDA kernels that needs to be hipified and processed with + # hip compiler + # + # - If we are not hipifying a PyTorch extension, and the parent + # directory name did not change as a result of the above + # transformations, insert "hip" in the file path + # as the direct parent folder of the file + # + # - If we are hipifying a PyTorch extension, and the parent directory + # name as well as the filename (incl. extension) did not change as + # a result of the above transformations, insert "_hip" in the filename + # + # This isn't set in stone; we might adjust this to support other + # naming conventions. + + if ext == '.cu': + ext = '.hip' + + orig_filename = filename + orig_dirpath = dirpath + + dirpath = dirpath.replace('cuda', 'hip') + dirpath = dirpath.replace('CUDA', 'HIP') + dirpath = dirpath.replace('THC', 'THH') + + root = root.replace('cuda', 'hip') + root = root.replace('CUDA', 'HIP') + # Special case to handle caffe2/core/THCCachingAllocator + if dirpath != "caffe2/core": + root = root.replace('THC', 'THH') + + if not is_pytorch_extension and dirpath == orig_dirpath: + dirpath = os.path.join(dirpath, 'hip') + + if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename: + root = root + "_hip" + + return os.path.join(dirpath, root + ext) + + +def is_out_of_place(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("torch/"): + return False + if rel_filepath.startswith("third_party/nvfuser/"): + return False + if rel_filepath.startswith("tools/autograd/templates/"): + return False + return True + + +# Keep this synchronized with includes/ignores in build_amd.py +def is_pytorch_file(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("aten/"): + if rel_filepath.startswith("aten/src/ATen/core/"): + return False + return True + if rel_filepath.startswith("torch/"): + return True + if rel_filepath.startswith("third_party/nvfuser/"): + return True + if rel_filepath.startswith("tools/autograd/templates/"): + return True + return False + + +def is_cusparse_file(rel_filepath): + if is_pytorch_file(rel_filepath): + return "sparse" in rel_filepath.lower() + return False + + +def is_special_file(rel_filepath): + if is_pytorch_file(rel_filepath): + if "sparse" in rel_filepath.lower(): + return True + elif "linalg" in rel_filepath.lower(): + if "batchlinearalgebralibblas" in rel_filepath.lower(): + return False # don't use "special" mappings for this specific linalg cublas file + return True + return False + +def is_caffe2_gpu_file(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("c10/cuda"): + return True + filename = os.path.basename(rel_filepath) + _, ext = os.path.splitext(filename) + return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename) + + +# Cribbed from https://stackoverflow.com/questions/42742810/speed-up-millions-of-regex-replacements-in-python-3/42789508#42789508 +class Trie: + """Regex::Trie in Python. Creates a Trie out of a list of words. The trie can be exported to a Regex pattern. + The corresponding Regex should match much faster than a simple Regex union.""" + + def __init__(self): + self.data = {} + + def add(self, word): + ref = self.data + for char in word: + ref[char] = char in ref and ref[char] or {} + ref = ref[char] + ref[''] = 1 + + def dump(self): + return self.data + + def quote(self, char): + return re.escape(char) + + def _pattern(self, pData): + data = pData + if "" in data and len(data.keys()) == 1: + return None + + alt = [] + cc = [] + q = 0 + for char in sorted(data.keys()): + if isinstance(data[char], dict): + try: + recurse = self._pattern(data[char]) + alt.append(self.quote(char) + recurse) + except Exception: + cc.append(self.quote(char)) + else: + q = 1 + cconly = not len(alt) > 0 + + if len(cc) > 0: + if len(cc) == 1: + alt.append(cc[0]) + else: + alt.append('[' + ''.join(cc) + ']') + + if len(alt) == 1: + result = alt[0] + else: + result = "(?:" + "|".join(alt) + ")" + + if q: + if cconly: + result += "?" + else: + result = f"(?:{result})?" + return result + + def pattern(self): + return self._pattern(self.dump()) + + +CAFFE2_TRIE = Trie() +CAFFE2_MAP = {} +PYTORCH_TRIE = Trie() +PYTORCH_MAP: Dict[str, object] = {} + +# In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip. +# The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance. +# Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex. +# In the case of SPARSE, we must use the hip types for complex instead of the roc types, +# but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority. +# Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place. +# When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices. +# Similarly, "linalg" files require rocBLAS -> hipSOLVER so they also need special handling. +PYTORCH_SPECIAL_MAP = {} + +for mapping in CUDA_TO_HIP_MAPPINGS: + assert isinstance(mapping, Mapping) + for src, value in mapping.items(): + dst = value[0] + meta_data = value[1:] + if constants.API_CAFFE2 not in meta_data: + PYTORCH_TRIE.add(src) + # if src is already in PYTORCH_MAP and dst belongs to API_SPECIAL + # do not overwrite PYTORCH_MAP, store dst separately + if constants.API_SPECIAL in meta_data and PYTORCH_MAP.get(src, ""): + PYTORCH_SPECIAL_MAP[src] = dst + else: + PYTORCH_MAP[src] = dst + if constants.API_PYTORCH not in meta_data and constants.API_SPECIAL not in meta_data: + CAFFE2_TRIE.add(src) + CAFFE2_MAP[src] = dst +RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.pattern()) +RE_PYTORCH_PREPROCESSOR = re.compile(fr'(?<=\W)({PYTORCH_TRIE.pattern()})(?=\W)') + +RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"') +RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>') +RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"') +RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh + +""" +Returns a HipifyResult object with the following details: + "hipified_path" : absolute path of hipified source file + "status" : "ok" if hipified file was written out + "skipped" if an identical hipified file already existed or hipified file couldn't be written out + "ignored" if the source file was a hipified file itself or not meant to be hipified + "current_state" : CurrentState.INITIALIZED if source file is first ready to be hipified + CurrentState.DONE if source file is done with hipification process +""" + + +def preprocessor( + output_directory: str, + filepath: str, + all_files: Iterable, + header_include_dirs: Iterable, + stats: Dict[str, List], + hip_clang_launch: bool, + is_pytorch_extension: bool, + clean_ctx: GeneratedFileCleaner, + show_progress: bool) -> HipifyResult: + """ Executes the CUDA -> HIP conversion on the specified file. """ + fin_path = os.path.abspath(os.path.join(output_directory, filepath)) + hipify_result = HIPIFY_FINAL_RESULT[fin_path] + if filepath not in all_files: + hipify_result.hipified_path = None + hipify_result.status = "[ignored, not to be hipified]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + + rel_filepath = os.path.relpath(filepath, output_directory) + + with open(fin_path, encoding='utf-8') as fin: + if fin.readline() == HIPIFY_C_BREADCRUMB: + hipify_result.hipified_path = None + hipify_result.status = "[ignored, input is hipified output]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + fin.seek(0) + output_source = fin.read() + + orig_output_source = output_source + + # get_hip_file_path needs a relative path to work correctly + fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension))) + if not os.path.exists(os.path.dirname(fout_path)): + clean_ctx.makedirs(os.path.dirname(fout_path)) + + # unsupported_calls statistics reporting is broken atm + def pt_repl(m): + return PYTORCH_MAP[m.group(0)] + + def pt_special_repl(m): + # checks SPECIAL map first, and if a miss occurs, falls back to pytorch mappings + return PYTORCH_SPECIAL_MAP.get(m.group(0), pt_repl(m)) + + + if is_pytorch_extension: + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) + else: + if is_special_file(rel_filepath): + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_special_repl, output_source) + elif is_pytorch_file(rel_filepath): + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) + else: + def c2_repl(m): + return CAFFE2_MAP[m.group(0)] + output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source) + + # Header rewrites + def mk_repl(templ, include_current_dir=True): + def repl(m): + f = m.group(1) + dirpath, filename = os.path.split(f) + if ( + f.startswith(("ATen/cuda", + "ATen/native/cuda", + "ATen/native/nested/cuda", + "ATen/native/quantized/cuda", + "ATen/native/sparse/cuda", + "ATen/native/transformers/cuda", + "THC/")) or + (f.startswith("THC") and not f.startswith("THCP")) + ): + return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension)) + # if filename is one of the files being hipified for this extension + if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)): + header_dir = None + header_filepath = None + # If include_current_dir True, look first in same dir as the including source file + if include_current_dir: + header_dir_to_check = os.path.dirname(fin_path) + header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) + if os.path.exists(header_path_to_check): + header_dir = header_dir_to_check + header_filepath = header_path_to_check + # If not found, look in include dirs one by one and first match wins + if header_filepath is None: + for header_include_dir in header_include_dirs: + header_dir_to_check = os.path.join(output_directory, header_include_dir) + header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) + if os.path.exists(header_path_to_check): + header_dir = header_dir_to_check + header_filepath = header_path_to_check + # If header file not found, keep as is + if header_filepath is None: + return m.group(0) + # Hipify header file first if needed + if header_filepath not in HIPIFY_FINAL_RESULT: + preprocess_file_and_save_result(output_directory, + header_filepath, + all_files, header_include_dirs, stats, hip_clang_launch, + is_pytorch_extension, clean_ctx, show_progress) + elif header_filepath in HIPIFY_FINAL_RESULT: + header_result = HIPIFY_FINAL_RESULT[header_filepath] + if header_result.current_state == CurrentState.INITIALIZED: + # get_hip_file_path needs a relative path to work correctly + header_rel_path = os.path.relpath(header_filepath, output_directory) + header_fout_path = os.path.abspath(os.path.join(output_directory, + get_hip_file_path(header_rel_path, is_pytorch_extension))) + header_result.hipified_path = header_fout_path + HIPIFY_FINAL_RESULT[header_filepath] = header_result + return templ.format(os.path.relpath(header_fout_path if header_fout_path is not None + else header_filepath, header_dir)) + hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath].hipified_path + return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None + else header_filepath, header_dir)) + + return m.group(0) + return repl + output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source) + output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source) + output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source) + + # CMakeLists.txt rewrites + if filepath.endswith('CMakeLists.txt'): + output_source = output_source.replace('CUDA', 'HIP') + output_source = output_source.replace('THC', 'THH') + output_source = RE_CU_SUFFIX.sub('.hip', output_source) + + # Perform Kernel Launch Replacements + if not hip_clang_launch: + output_source = processKernelLaunches(output_source, stats) + + # Replace std:: with non-std:: versions + if (filepath.endswith((".cu", ".cuh"))) and "PowKernel" not in filepath: + output_source = replace_math_functions(output_source) + + # Include header if device code is contained. + output_source = hip_header_magic(output_source) + + # Replace the extern __shared__ + # NOTE: No longer needed after transition from hcc to hipclang. + # output_source = replace_extern_shared(output_source) + + # Don't write out identical hipified files for extensions if dirpath has not changed + if ( + is_pytorch_extension + and orig_output_source == output_source + and os.path.dirname(fin_path) == os.path.dirname(fout_path) + ): + hipify_result.hipified_path = fin_path + hipify_result.status = "[skipped, no changes]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + + # Add hipify breadcrumb for C-style files to avoid re-hipification + if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")): + output_source = HIPIFY_C_BREADCRUMB + output_source + + do_write = True + if os.path.exists(fout_path): + with open(fout_path, encoding='utf-8') as fout_old: + do_write = fout_old.read() != output_source + if do_write: + try: + with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout: + fout.write(output_source) + hipify_result.hipified_path = fout_path + hipify_result.status = "[ok]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + except PermissionError as e: + print(f"{bcolors.WARNING}Failed to save {fout_path} with \"{e.strerror}\", leaving {fin_path} unchanged.{bcolors.ENDC}", + file=sys.stderr) + hipify_result.hipified_path = fin_path + hipify_result.status = "[skipped, no permissions]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + else: + hipify_result.hipified_path = fout_path + hipify_result.status = "[skipped, already hipified]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + +def file_specific_replacement(filepath, search_string, replace_string, strict=False): + with openf(filepath, "r+") as f: + contents = f.read() + if strict: + contents = re.sub(fr'\b({re.escape(search_string)})\b', lambda x: replace_string, contents) + else: + contents = contents.replace(search_string, replace_string) + f.seek(0) + f.write(contents) + f.truncate() + + +def file_add_header(filepath, header): + with openf(filepath, "r+") as f: + contents = f.read() + if header[0] != "<" and header[-1] != ">": + header = f'"{header}"' + contents = (f'#include {header} \n') + contents + f.seek(0) + f.write(contents) + f.truncate() + + +def fix_static_global_kernels(in_txt): + """Static global kernels in HIP results in a compilation error.""" + in_txt = in_txt.replace(" __global__ static", "__global__") + return in_txt + + +RE_INCLUDE = re.compile(r"#include .*\n") + + +def extract_arguments(start, string): + """ Return the list of arguments in the upcoming function parameter closure. + Example: + string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' + arguments (output): + '[{'start': 1, 'end': 7}, + {'start': 8, 'end': 16}, + {'start': 17, 'end': 19}, + {'start': 20, 'end': 53}]' + """ + + arguments = [] + closures = { + "<": 0, + "(": 0 + } + current_position = start + argument_start_pos = current_position + 1 + + # Search for final parenthesis + while current_position < len(string): + if string[current_position] == "(": + closures["("] += 1 + elif string[current_position] == ")": + closures["("] -= 1 + elif string[current_position] == "<": + closures["<"] += 1 + elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0: + closures["<"] -= 1 + + # Finished all arguments + if closures["("] == 0 and closures["<"] == 0: + # Add final argument + arguments.append({"start": argument_start_pos, "end": current_position}) + break + + # Finished current argument + if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",": + arguments.append({"start": argument_start_pos, "end": current_position}) + argument_start_pos = current_position + 1 + + current_position += 1 + + return arguments + + +def str2bool(v): + """ArgumentParser doesn't support type=bool. Thus, this helper method will convert + from possible string types to True / False.""" + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def hipify( + project_directory: str, + show_detailed: bool = False, + extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"), + header_extensions: Iterable = (".cuh", ".h", ".hpp"), + output_directory: str = "", + header_include_dirs: Iterable = (), + includes: Iterable = ('*',), + extra_files: Iterable = (), + out_of_place_only: bool = False, + ignores: Iterable = (), + show_progress: bool = True, + hip_clang_launch: bool = False, + is_pytorch_extension: bool = False, + hipify_extra_files_only: bool = False, + clean_ctx: Optional[GeneratedFileCleaner] = None +) -> HipifyFinalResult: + if project_directory == "": + project_directory = os.getcwd() + + # Verify the project directory exists. + if not os.path.exists(project_directory): + print("The project folder specified does not exist.") + sys.exit(1) + + # If no output directory, provide a default one. + if not output_directory: + project_directory.rstrip("/") + output_directory = project_directory + "_amd" + + if project_directory != output_directory: + includes = [include.replace(project_directory, output_directory) for include in includes] + ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores] + + # Copy from project directory to output directory if not done already. + if not os.path.exists(output_directory): + shutil.copytree(project_directory, output_directory) + + all_files = list(matched_files_iter(output_directory, includes=includes, + ignores=ignores, extensions=extensions, + out_of_place_only=out_of_place_only, + is_pytorch_extension=is_pytorch_extension)) + all_files_set = set(all_files) + for f in extra_files: + if not os.path.isabs(f): + f = os.path.join(output_directory, f) + if f not in all_files_set: + all_files.append(f) + + # List all files in header_include_paths to ensure they are hipified + from pathlib import Path + for header_include_dir in header_include_dirs: + if os.path.isabs(header_include_dir): + header_include_dir_path = Path(header_include_dir) + else: + header_include_dir_path = Path(os.path.join(output_directory, header_include_dir)) + for path in header_include_dir_path.rglob('*'): + if ( + path.is_file() + and _fnmatch(str(path), includes) + and (not _fnmatch(str(path), ignores)) + and match_extensions(path.name, header_extensions) + ): + all_files.append(str(path)) + + if clean_ctx is None: + clean_ctx = GeneratedFileCleaner(keep_intermediates=True) + + # Preprocessing statistics. + stats: Dict[str, List] = {"unsupported_calls": [], "kernel_launches": []} + + for filepath in (all_files if not hipify_extra_files_only else extra_files): + preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs, + stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) + + print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr) + + # Show detailed summary + if show_detailed: + compute_stats(stats) + + return HIPIFY_FINAL_RESULT diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/version.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1f356cc57bfa00a3b251402604c54702fb414c96 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/version.py @@ -0,0 +1 @@ +__version__ = '1.0.0' diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/jit/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/jit/__init__.py @@ -0,0 +1 @@ + diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cca0fb95146039f6dcaedf23485219904b8ee570 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py @@ -0,0 +1,13 @@ +import tensorboard +from torch._vendor.packaging.version import Version + +if not hasattr(tensorboard, "__version__") or Version( + tensorboard.__version__ +) < Version("1.15"): + raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above") + +del Version +del tensorboard + +from .writer import FileWriter, SummaryWriter # noqa: F401 +from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35b91f105c083c34f63b33eb20f58e527b6cdd0f Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5efe9fced2f72915d99810a738584fa060068f24 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b66c3cd42cc951c7d78f2d7dc92cc8c2222c78 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b77e2815377563b85013ea418cd8c9e24cb2e6bf Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9411d226ae274cefe125ccb49dba0c03a8df554e Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b741d70f97e0035ae36d346c524cb35af06979e Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30fbffa69fb3e94ac2d84c047c117420fa6bfd53 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ddf1ea3e5dd296756b501e3aaea3ac770058c35 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64bebe1c6318c198a325a458993aee6d7dd2410a Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e701fdda4fc24065f8f0c0614b18a6e9c8e7b231 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..536746026052d92438e62b60a2ffcdbb61c386ab --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py @@ -0,0 +1,822 @@ +import copy +import logging +import os +import re + +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, workspace + +from typing import Set, Dict, Tuple, List + +log = logging.getLogger(__name__) + +def _make_unique_name(seen: Set[str], name: str, min_version: int = 0): + """ + Make the name unique by appending a unique number to the name. Used for SSA. + + Args: + seen (set): Set of names that have already been used (with respect to + some context). + name (str): The name to make unique + min_version (number): Starting index. Is incremented continually until + it can make the resulting name unique relative to 'seen'. + + Returns: + x (str): A version of name that is not in seen. + """ + assert name is not None + i = min_version + x = "%s_%d" % (name, i) if i else name + while x in seen: + i += 1 + x = "%s_%d" % (name, i) + seen.add(x) + return x + + +def _rename_tensorflow_style(shapes, blob_name_tracker, ops): + """ + Convert some of the common names in Caffe2 to tensorflow. + + NOTE: The common names in both Caffe2 and Tensorflow are currently + hardcoded, if either side changes at some point, then this code should + change as well. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. The _rename_all() call modifies blob_name_tracker and ops in-place. + """ + WEIGHT = re.compile(r"(_w)$") + WEIGHT_ = re.compile(r"(_w_)") + BN = re.compile(r"(_bn)$") + BN_ = re.compile(r"(_bn_)") + BIAS = re.compile(r"(_b)$") + BIAS_ = re.compile(r"(_b_)") + SCALE = re.compile(r"(_s)$") + SCALE_ = re.compile(r"(_s_)") + SUM = re.compile(r"(_sum)$") + SUM_ = re.compile(r"(_sum_)") + BRANCH = re.compile(r"(_branch)") + + def f(name): + inter_name = WEIGHT_.sub("/weight_", WEIGHT.sub("/weight", name)) + inter_name = BN_.sub("/batchnorm_", BN.sub("/batchnorm", inter_name)) + inter_name = BIAS_.sub("/bias_", BIAS.sub("/bias", inter_name)) + inter_name = SCALE_.sub("/scale_", SCALE.sub("/scale", inter_name)) + inter_name = SUM_.sub("/sum_", SUM.sub("/sum", inter_name)) + new_name = BRANCH.sub("/branch", inter_name) + return new_name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _convert_to_ssa(shapes, blob_name_tracker, ops): + """ + Convert an operator graph to SSA (i.e. out-of-place). + + i.e. blobs will be renamed so that each blob is produced only once. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies blob_name_tracker and ops in-place. + """ + ir = core.IR(ops) + seen: Set[str] = set() + versioned: Dict[Tuple[str, int], int] = {} + new_shapes = {} + new_blob_name_tracker = {} + + def ssa_name(name: str, versions: Dict[str, int]) -> int: + assert name in versions + version = versions[name] + if (name, version) in versioned: + return versioned[(name, version)] + # Always setting name2 = `{name}_{version}` would work, but we also try + # to avoid a trailing `_0`, so we have to be careful not to introduce + # name collisions, such as (foo_1, 0) = foo_1 = (foo, 1). + # Note: operator names (if any) will be handled later. + new_name = _make_unique_name(seen, name, min_version=version) + versioned[(name, version)] = new_name + # Transfer shape. + if name in shapes: + new_shapes[new_name] = shapes[name] + if blob_name_tracker and name in blob_name_tracker: + new_blob_name_tracker[new_name] = blob_name_tracker[name] + return new_name + + for (op, ssa) in zip(ops, ir.ssa): + assert op is ssa.op + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs) + op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs) + + shapes.clear() + shapes.update(new_shapes) + if blob_name_tracker: + blob_name_tracker.clear() + blob_name_tracker.update(new_blob_name_tracker) + + +def _get_blob_names(ops): + """ + Get all the operator input and output blobs and perform dedup on their names. + + Args: + ops: List of Caffe2 operators to extract inputs and outputs from + + Returns: + set containing distinct inputs and outputs from 'ops' + """ + names = set() + for op in ops: + names.update(op.input) + names.update(op.output) + return {name: name for name in names} + + +def _remap_keys(old_dict, rename_fn): + """ + Rename keys of 'old_dict' according to 'rename_fn'. + + Args: + old_dict: Dictionary (i.e. containing blob_name -> blob_name + relationships.) + rename_fn: Function string -> string for renaming. + + Returns: + None. Modifies old_dict in-place. + """ + new_dict = {rename_fn(key): value for key, value in old_dict.items()} + old_dict.clear() + old_dict.update(new_dict) + + +def _rename_all(shapes, blob_name_tracker, ops, rename_fn): + """ + Rename all the names in the operators. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + rename_fn: Function string -> string that specifies how to rename + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place using the + specified 'rename_fn'. + """ + seen: Set[str] = set() + renamed: Dict[Tuple[str, int], int] = {} + + def g(name): + """Collision-free version of f.""" + if name is None: + return None + if name in renamed: + return renamed[name] + new_name = _make_unique_name(seen, rename_fn(name)) + renamed[name] = new_name + return new_name + + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(g(name) for name in inputs) + op.output.extend(g(name) for name in outputs) + + _remap_keys(shapes, g) + if blob_name_tracker: + _remap_keys(blob_name_tracker, g) + # Rename all operator names (if any) independently so that the + # unique-fication happens only once in _fill_missing_operator_names(). + seen.clear() + renamed.clear() + for op in ops: + op.name = g(op.name) + + +def _add_gradient_scope(shapes, blob_name_tracker, ops): + """ + For all operators or blobs with name containing "_grad", add a "GRADIENTS/" scope. + + Note: breaks graph execution since the blob -> gradient mapping is + hardcoded. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place by renaming. + """ + + def f(name): + if "_grad" in name: + return f"GRADIENTS/{name}" + else: + return name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _replace_colons(shapes, blob_name_tracker, ops, repl): + """ + `:i` has a special meaning in Tensorflow. This function replaces all colons with $ to avoid any possible conflicts. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + repl: String representing the text to replace ':' with. Usually this is + '$'. + + Returns: + None. Modifies blob_name_tracker in-place. + + """ + + def f(name): + return name.replace(":", repl) + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _fill_missing_operator_names(ops): + """ + Give missing operators a name. + + We expect C2 operators to be generally unnamed. This gives them a scope + (inferred from their outputs) and a name after their type. Duplicates will + be postfixed by an index. + + Args: + ops: List of Caffe2 operators to assign names to. + + Returns: + None: Modifies 'ops' in-place. + """ + seen = set() + for op in ops: + # Make sure operator names don't collide with blobs. + seen.update(op.input) + seen.update(op.output) + for op in ops: + if op.name: + name = op.name + elif op.output or op.input: + name_list = [os.path.dirname(name) for name in op.output or op.input] + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + else: + name = op.type + assert name + op.name = _make_unique_name(seen, name) + + +def _tf_device(device_option): + """ + Handle the devices. + + Args: + device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf, + associated to an operator, that contains information such as + device_type (optional), cuda_gpu_id (optional), node_name (optional, + tells which node the operator should execute on). See caffe2.proto + in caffe2/proto for the full list. + + Returns: + Formatted string representing device information contained in + device_option. + """ + if not device_option.HasField("device_type"): + return "" + if ( + device_option.device_type == caffe2_pb2.CPU + or device_option.device_type == caffe2_pb2.MKLDNN + ): + return "/cpu:*" + if device_option.device_type == caffe2_pb2.CUDA: + return f"/gpu:{device_option.device_id}" + raise Exception("Unhandled device", device_option) + + +def _add_tf_shape(attr_dict, ints): + """ + Convert a list of ints to a TensorShapeProto representing the dimensions of a blob/object. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + ints: List of integers representing dimensions of some object. + + Returns: + None. Modifies attr_dict in-place. + """ + shape_proto = TensorShapeProto() + for i in ints: + dim = TensorShapeProto.Dim() + dim.size = i + shape_proto.dim.extend([dim]) + attr_dict["_output_shapes"].list.shape.extend([shape_proto]) + + +def _set_tf_attr(attr_dict, arg): + """ + Add attributes to a node. Key is the arg.name, and values can be shape, floats, strings, ints or an empty list. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + arg: Object with name and data fields. + + Returns: + None. Modifies attr_dict in-place. + """ + k = arg.name + if k == "shape" and arg.ints: + _add_tf_shape(attr_dict, arg.ints) + return + # Float + if arg.HasField("f"): + attr_dict[k].f = arg.f + return + # Integer + if arg.HasField("i"): + attr_dict[k].i = arg.i + return + # String + if arg.HasField("s"): + attr_dict[k].s = ( + arg.s if isinstance(arg.s, bytes) else str(arg.s).encode("utf-8") + ) + return + if arg.floats: + attr_dict[k].list.f.extend(arg.floats) + return + if arg.ints: + attr_dict[k].list.i.extend(arg.ints) + return + if arg.strings: + attr_dict[k].list.s.extend( + s if isinstance(s, bytes) else str(s).encode("utf-8") for s in arg.strings + ) + return + # The value is an empty list. + attr_dict[k].list.s.extend([]) + + +def _operator_to_node(shapes, op): + """ + Convert an operator to a node in a TF graph. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + op: The Caffe2 operator to convert to a TF graph node. + + Returns: + n: The TF graph node created from op. + """ + assert op.name, op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + if shapes: + # Add shapes in order. + for output in op.output: + if output not in shapes: + break + _add_tf_shape(n.attr, shapes[output]) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + return n + + +def _operator_to_node_simp(op, inter_blobs, seen): + """ + Convert the operators to nodes. + + Args: + op: Caffe2 operator to convert to node + inter_blobs: Set of intermediate blobs + seen: Names that have already been used and are not unique + + Returns: + nodes: Nodes representing 'op' and the outputs of 'op' + """ + assert op + nodes = [] + outputs = [o for o in op.output if o not in inter_blobs] + seen.update(outputs) + len_outputs = len(outputs) + if len_outputs == 1: + n = NodeDef() + n.name = outputs[0] + # Here we are sure the name is unique. + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + elif len_outputs > 1: + # Create a name that is likely unique + if op.name: + name = op.name + else: + name_list = list(outputs) + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + assert name + op.name = _make_unique_name(seen, name) + device = _tf_device(op.device_option) + + # Create additional output nodes + for output in outputs: + n = NodeDef() + n.name = output + n.input.extend([op.name]) + n.op = "Blob" + n.device = device + nodes.append(n) + + # Node for the current op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = device + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + + return nodes + + +def _blob_to_node(producing_ops, shapes, name): + """ + Convert a blob (operator input or output) to a node in a TF graph. + + Args: + producing_ops: Dictionary of blob name to list of + (producing_op, blob_index within producing_op.output) mapping. + shapes: Dictionary mapping blob names to their shapes/dimensions. + name: String representing the name of this blob. + + Returns: + n: The TF graph node created from this blob. + """ + assert name + n = NodeDef() + n.name = name + # Get all ops that have the blob corresponding to 'name' as one of their + # outputs. See _operators_to_graph_def. + produced_by = producing_ops.get(name, []) + if len(produced_by) > 0: + n.op = "Blob" + else: + # This blob is not produced but is instead a TF Placeholder where a + # value is passed in. + n.op = "Placeholder" + n.input.extend("%s:%d" % (p_op.name, i) for p_op, i in produced_by) + if produced_by: + device = produced_by[0][0].device_option + if all(producer[0].device_option == device for producer in produced_by): + n.device = _tf_device(device) + if shapes and name in shapes: + _add_tf_shape(n.attr, shapes[name]) + return n + + +def _clear_debug_info(ops, perform_clear): + """ + Remove debug information from operators, they are copious. + + Args: + ops: List of Caffe2 operators + perform_clear: Boolean passed from _operators_to_graph_def specifying + whether to remove the debug information. This boolean is passed into + this function to reduce the complexity of _operators_to_graph_def. + + Returns: + None. Modifies the list of Caffe2 operators in-place and removes the + 'debug_info' field. + + """ + if not perform_clear: + return + + for op in ops: + if op.HasField("debug_info"): + op.ClearField("debug_info") + + +def _check_if_forward(blob): + """ + Blobs with names containing '_m' or 'grad' are part of the backward pass. + + This function references facebookresearch/Detectron/detectron/utils/net.py. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is part of the forward pass + """ + # + return blob.find("__m") < 0 or blob.find("grad") < 0 + + +def _check_if_cpu(blob): + """ + Check if the blob's name starts with '_gpu'. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is associated with a gpu + """ + return not blob.startswith("_gpu") + + +def _compute_in_out(ops): + """ + Find the input, intermediate and output nodes of a set of operators. + + Args: + ops: List of Caffe2 operators to look through + + Returns: + input_blobs: The input nodes of the set of operators + inter_blobs: The intermediate nodes of the set of operators + output_blobs: The output nodes of the set of operators + """ + in_blobs = set() + out_blobs = set() + + for op in ops: + for input_blob in op.input: + in_blobs.add(input_blob) + for output_blob in op.output: + out_blobs.add(output_blob) + + input_blobs = list(in_blobs.difference(out_blobs)) + output_blobs = list(out_blobs.difference(in_blobs)) + inter_blobs = {b for b in output_blobs if b.startswith("_")} + output_blobs = [b for b in output_blobs if b not in inter_blobs] + + return input_blobs, inter_blobs, output_blobs + + +def _filter_ops(ops, filter_fn, perform_filter): + """ + Filter unwanted operators based on criteria in 'filter_fn'. + + Args: + ops: List of Caffe2 operators to filter + filter_fn: Criteria function for whether inputs/outputs in an operator + should be filtered. + perform_filter: Boolean passed from _operators_to_graph_def specifying + whether to filter operators + + Returns: + new_ops: Subset of ops containing a subset of their inputs and outputs. + """ + if not perform_filter: + return ops + + new_ops = [] + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + new_inputs = [i for i in inputs if filter_fn(i)] + new_outputs = [o for o in outputs if filter_fn(o)] + + # Only add the op if output is not empty + if new_outputs: + op.input.extend(new_inputs) + op.output.extend(new_outputs) + new_ops.append(op) + + return new_ops + + +def _operators_to_graph_def( + shapes, + ops, + colon_replacement="$", + with_ssa=True, + with_gradient_scope=True, + blob_name_tracker=None, + show_simplified=False, + custom_rename=None, +): + """ + Convert a set of operators to a graph using the main function. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + ops: List of Caffe2 operators, representing some computation graph + ### **kwargs (model_to_graph_def, nets_to_graph_def, protos_to_graph_def) ### + colon_replacement: Symbol to replace ':' with. ':i' in TF has a special + meaning, so we need to replace it with a non-conflicting symbol. + with_ssa: Boolean + with_gradient_scope: Boolean + blob_name_tracker: Dictionary tracking names of blobs (inputs/outputs + from operators) + show_simplified: Whether to show a simplified version of the model graph + Sets all of the following values: + clear_debug_info: Boolean representing whether to silence debug + info (which can be very verbose) + show_forward_only: Boolean representing whether to only show + blobs involved in the forward pass + show_cpu_only: Boolean representing whether to only show blobs + that are not associated with a gpu + use_tensorflow_naming: Boolean representing whether to convert + some common Caffe2 naming conventions to their Tensorflow + counterparts + custom_rename: Function string -> string that defines a custom + renaming function to use. + + Returns: + current_graph: GraphDef representing the computation graph formed by the + set of operators. + """ + if blob_name_tracker is not None: + blob_name_tracker.clear() + else: + blob_name_tracker = {} + + blob_name_tracker.update(_get_blob_names(ops)) + + _clear_debug_info(ops, show_simplified) # clear_debug_info + ops = _filter_ops(ops, _check_if_forward, show_simplified) # show_forward_only + ops = _filter_ops(ops, _check_if_cpu, show_simplified) # show_cpu_only + if custom_rename: + _rename_all(shapes, blob_name_tracker, ops, custom_rename) + if colon_replacement: + _replace_colons(shapes, blob_name_tracker, ops, colon_replacement) + if with_ssa: + _convert_to_ssa(shapes, blob_name_tracker, ops) + if with_gradient_scope: + _add_gradient_scope(shapes, blob_name_tracker, ops) + _fill_missing_operator_names(ops) + if show_simplified: # use_tensorflow_naming + _rename_tensorflow_style(shapes, blob_name_tracker, ops) + producing_ops: Dict[caffe2_pb2.OperatorDef, List] = {} + blobs = set() + input_blobs, inter_blobs, _ = _compute_in_out(ops) + current_graph = GraphDef() + seen = set(input_blobs) + for op in ops: + nodes_from_op = ( + _operator_to_node_simp(op, inter_blobs, seen) + if show_simplified + else [_operator_to_node(shapes, op)] + ) # .extend() expects an iterable + current_graph.node.extend(nodes_from_op) + for input_blob in op.input: + blobs.add(input_blob) + for i, output_blob in enumerate(op.output): + blobs.add(output_blob) + producing_ops.setdefault(output_blob, []).append((op, i)) + + if show_simplified: + # Show a cleaner, easier-to-interpret version of the model graph + blobs = input_blobs + + for blob in sorted(blobs): + current_graph.node.extend([_blob_to_node(producing_ops, {}, blob)]) + + return current_graph + + +def _propagate_device_option(net_def): + """ + Propagate the device options from net to operators. + + Args: + net_def: A caffe2_pb2.NetDef representing a computation graph. The graph + consists of Caffe2 operators. + + Returns: + None. Iterates through all ops contained within the net. For each op, + modifies the op device_option in-place to be the net device_option + if the op has no pre-existing device_option, and leaves the op as-is + if it already has a device_option. + """ + if not net_def.HasField("device_option"): + return + for op in net_def.op: + if not op.HasField("device_option"): + op.device_option.CopyFrom(net_def.device_option) + + +def _try_get_shapes(nets): + """ + Get missing shapes for all blobs contained in the nets. + + Args: + nets: List of core.Net to extract blob shape information from. + + Returns: + Dictionary containing blob name to shape/dimensions mapping. The net + is a computation graph that is composed of operators, and the + operators have input and output blobs, each with their own dims. + """ + try: + # Note: this will inspect the workspace for better or worse. + # We don't care about the types, only the shapes + shapes, _ = workspace.InferShapesAndTypes(nets) + return shapes + except Exception as e: + log.warning("Failed to compute shapes: %s", e) + return {} + + +def model_to_graph_def(model, **kwargs): + """ + Convert a Caffe2 model to a Tensorflow graph. + + This function extracts 'param_init_net' and 'net' from the model and passes it to nets_to_graph() + for further processing. + + Args: + model (cnn.CNNModelHelper, model_helper.ModelHelper): The model to + extract the nets (instances of core.Net) from. + + Returns: + Call to nets_to_graph_def() with extracted 'param_init_net', 'net' and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + nets = [model.param_init_net, model.net] + return nets_to_graph_def(nets, **kwargs) + + +def nets_to_graph_def(nets, shapes=None, **kwargs): + """ + Convert a set of Caffe2 nets to a Tensorflow graph. + + Args: + nets: List of core.Nets. core.Net is a wrapper around a NetDef protobuf. + The corresponding protobuf can be extracted using .Proto(). + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to protos_to_graph_def() with the extracted NetDef protobufs and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + # if shapes is None: + # shapes = _try_get_shapes(nets) + # _try_get_shapes(nets) depends on workspace.InferShapesAndTypes(nets), + # which is currently broken (segfault). We omit the shapes for now. + shapes = {} + nets = [copy.deepcopy(net.Proto()) for net in nets] + shapes = copy.deepcopy(shapes) + return protos_to_graph_def(nets, shapes, **kwargs) + + +def protos_to_graph_def(net_defs, shapes=None, **kwargs): + """ + Convert a set of Caffe2 net definitions to a Tensorflow graph. + + Args: + net_defs: List of caffe2_pb2.NetDef protobufs representing computation + graphs. + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to _operators_to_graph_def() with the extracted operators from the + NetDefs and **kwargs. See _operators_to_graph_def for detailed + **kwargs. + """ + for net in net_defs: + _propagate_device_option(net) + shapes = copy.deepcopy(shapes or {}) + ops = [op for net_def in net_defs for op in net_def.op] + return _operators_to_graph_def(shapes, ops, **kwargs) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py new file mode 100644 index 0000000000000000000000000000000000000000..54bf80a9673034bfe8c3a380205f971559659bb6 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py @@ -0,0 +1,38 @@ +"""This module converts objects into numpy array.""" +import numpy as np +import torch + + +def make_np(x): + """ + Convert an object into numpy array. + + Args: + x: An instance of torch tensor or caffe blob name + + Returns: + numpy.array: Numpy array + """ + if isinstance(x, np.ndarray): + return x + if isinstance(x, str): # Caffe2 will pass name of blob(s) to fetch + return _prepare_caffe2(x) + if np.isscalar(x): + return np.array([x]) + if isinstance(x, torch.Tensor): + return _prepare_pytorch(x) + raise NotImplementedError( + f"Got {type(x)}, but numpy array, torch tensor, or caffe2 blob name are expected." + ) + + +def _prepare_pytorch(x): + x = x.detach().cpu().numpy() + return x + + +def _prepare_caffe2(x): + from caffe2.python import workspace + + x = workspace.FetchBlob(x) + return x diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..afbe68191aa98fcbbbfe1395c7c325e4c6754f9c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py @@ -0,0 +1,85 @@ +import math +import numpy as np +from ._convert_np import make_np +from ._utils import make_grid +from tensorboard.compat import tf +from tensorboard.plugins.projector.projector_config_pb2 import EmbeddingInfo + + +_HAS_GFILE_JOIN = hasattr(tf.io.gfile, "join") + + +def _gfile_join(a, b): + # The join API is different between tensorboard's TF stub and TF: + # https://github.com/tensorflow/tensorboard/issues/6080 + # We need to try both because `tf` may point to either the stub or the real TF. + if _HAS_GFILE_JOIN: + return tf.io.gfile.join(a, b) + else: + fs = tf.io.gfile.get_filesystem(a) + return fs.join(a, b) + + +def make_tsv(metadata, save_path, metadata_header=None): + if not metadata_header: + metadata = [str(x) for x in metadata] + else: + assert len(metadata_header) == len( + metadata[0] + ), "len of header must be equal to the number of columns in metadata" + metadata = ["\t".join(str(e) for e in l) for l in [metadata_header] + metadata] + + metadata_bytes = tf.compat.as_bytes("\n".join(metadata) + "\n") + with tf.io.gfile.GFile(_gfile_join(save_path, "metadata.tsv"), "wb") as f: + f.write(metadata_bytes) + + +# https://github.com/tensorflow/tensorboard/issues/44 image label will be squared +def make_sprite(label_img, save_path): + from PIL import Image + from io import BytesIO + + # this ensures the sprite image has correct dimension as described in + # https://www.tensorflow.org/get_started/embedding_viz + nrow = int(math.ceil((label_img.size(0)) ** 0.5)) + arranged_img_CHW = make_grid(make_np(label_img), ncols=nrow) + + # augment images so that #images equals nrow*nrow + arranged_augment_square_HWC = np.zeros( + (arranged_img_CHW.shape[2], arranged_img_CHW.shape[2], 3) + ) + arranged_img_HWC = arranged_img_CHW.transpose(1, 2, 0) # chw -> hwc + arranged_augment_square_HWC[: arranged_img_HWC.shape[0], :, :] = arranged_img_HWC + im = Image.fromarray(np.uint8((arranged_augment_square_HWC * 255).clip(0, 255))) + + with BytesIO() as buf: + im.save(buf, format="PNG") + im_bytes = buf.getvalue() + + with tf.io.gfile.GFile(_gfile_join(save_path, "sprite.png"), "wb") as f: + f.write(im_bytes) + + +def get_embedding_info(metadata, label_img, subdir, global_step, tag): + info = EmbeddingInfo() + info.tensor_name = f"{tag}:{str(global_step).zfill(5)}" + info.tensor_path = _gfile_join(subdir, "tensors.tsv") + if metadata is not None: + info.metadata_path = _gfile_join(subdir, "metadata.tsv") + if label_img is not None: + info.sprite.image_path = _gfile_join(subdir, "sprite.png") + info.sprite.single_image_dim.extend([label_img.size(3), label_img.size(2)]) + return info + + +def write_pbtxt(save_path, contents): + config_path = _gfile_join(save_path, "projector_config.pbtxt") + with tf.io.gfile.GFile(config_path, "wb") as f: + f.write(tf.compat.as_bytes(contents)) + + +def make_mat(matlist, save_path): + with tf.io.gfile.GFile(_gfile_join(save_path, "tensors.tsv"), "wb") as f: + for x in matlist: + x = [str(i.item()) for i in x] + f.write(tf.compat.as_bytes("\t".join(x) + "\n")) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..04d4169882a40afefba1fbbc1fd9924b377a0385 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py @@ -0,0 +1,64 @@ +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.versions_pb2 import VersionDef +from tensorboard.compat.proto.attr_value_pb2 import AttrValue +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + + +def load_onnx_graph(fname): + import onnx + + m = onnx.load(fname) # type: ignore[attr-defined] + g = m.graph + return parse(g) + + +def parse(graph): + nodes_proto = [] + nodes = [] + import itertools + + for node in itertools.chain(graph.input, graph.output): + nodes_proto.append(node) + + for node in nodes_proto: + print(node.name) + shapeproto = TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=d.dim_value) + for d in node.type.tensor_type.shape.dim + ] + ) + nodes.append( + NodeDef( + name=node.name.encode(encoding="utf_8"), + op="Variable", + input=[], + attr={ + "dtype": AttrValue(type=node.type.tensor_type.elem_type), + "shape": AttrValue(shape=shapeproto), + }, + ) + ) + + for node in graph.node: + _attr = [] + for s in node.attribute: + _attr.append(" = ".join([str(f[1]) for f in s.ListFields()])) + attr = ", ".join(_attr).encode(encoding="utf_8") + print(node.output[0]) + nodes.append( + NodeDef( + name=node.output[0].encode(encoding="utf_8"), + op=node.op_type, + input=node.input, + attr={"parameters": AttrValue(s=attr)}, + ) + ) + + # two pass token replacement, appends opname to object id + mapping = {} + for node in nodes: + mapping[node.name] = node.op + "_" + node.name + + return GraphDef(node=nodes, versions=VersionDef(producer=22)) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0d15723d24586dd04a831d08cc04cbf93ed51d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py @@ -0,0 +1,53 @@ +from typing import Optional +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.attr_value_pb2 import AttrValue +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + + +def attr_value_proto(dtype, shape, s): + """Create a dict of objects matching a NodeDef's attr field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto + specifically designed for a NodeDef. The values have been reverse engineered from + standard TensorBoard logged data. + """ + attr = {} + if s is not None: + attr["attr"] = AttrValue(s=s.encode(encoding="utf_8")) + if shape is not None: + shapeproto = tensor_shape_proto(shape) + attr["_output_shapes"] = AttrValue(list=AttrValue.ListValue(shape=[shapeproto])) + return attr + + +def tensor_shape_proto(outputsize): + """Create an object matching a tensor_shape field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto . + """ + return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in outputsize]) + + +def node_proto( + name, + op="UnSpecified", + input=None, + dtype=None, + shape: Optional[tuple] = None, + outputsize=None, + attributes="", +): + """Create an object matching a NodeDef. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto . + """ + if input is None: + input = [] + if not isinstance(input, list): + input = [input] + return NodeDef( + name=name.encode(encoding="utf_8"), + op=op, + input=input, + attr=attr_value_proto(dtype, outputsize, attributes), + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..f4274199ffd33ce60792608620973e4b9ca75975 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py @@ -0,0 +1,380 @@ +from collections import OrderedDict +import contextlib +from typing import Dict, Any + +from tensorboard.compat.proto.config_pb2 import RunMetadata +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats +from tensorboard.compat.proto.versions_pb2 import VersionDef + +import torch +from ._proto_graph import node_proto + +methods_OP = [ + "attributeNames", + "hasMultipleOutputs", + "hasUses", + "inputs", + "kind", + "outputs", + "outputsSize", + "scopeName", +] +# Some additional methods to explure for methods_IO are +# +# 'unique' (type int) +# 'type' (type >) +# +# But the below are sufficient for now. +methods_IO = ["node", "offset", "debugName"] + +GETATTR_KIND = "prim::GetAttr" +CLASSTYPE_KIND = "ClassType" + + +class NodeBase: + def __init__( + self, + debugName=None, + inputs=None, + scope=None, + tensor_size=None, + op_type="UnSpecified", + attributes="", + ): + # TODO; Specify a __slots__ for this class or potentially + # used namedtuple instead + self.debugName = debugName + self.inputs = inputs + self.tensor_size = tensor_size + self.kind = op_type + self.attributes = attributes + self.scope = scope + + def __repr__(self): + repr = [] + repr.append(str(type(self))) + for m in dir(self): + if "__" not in m: + repr.append( + m + ": " + str(getattr(self, m)) + str(type(getattr(self, m))) + ) + return "\n".join(repr) + "\n\n" + + +class NodePy(NodeBase): + def __init__(self, node_cpp, valid_methods): + super().__init__(node_cpp) + valid_methods = valid_methods[:] + self.inputs = [] + + for m in valid_methods: + if m == "inputs" or m == "outputs": + list_of_node = list(getattr(node_cpp, m)()) + io_unique_names = [] + io_tensor_sizes = [] + for n in list_of_node: + io_unique_names.append(n.debugName()) + if n.isCompleteTensor(): + io_tensor_sizes.append(n.type().sizes()) + else: + io_tensor_sizes.append(None) + + setattr(self, m, io_unique_names) + setattr(self, m + "tensor_size", io_tensor_sizes) + + else: + setattr(self, m, getattr(node_cpp, m)()) + + +class NodePyIO(NodePy): + def __init__(self, node_cpp, input_or_output=None): + super().__init__(node_cpp, methods_IO) + try: + tensor_size = node_cpp.type().sizes() + except RuntimeError: + tensor_size = [ + 1, + ] # fail when constant model is used. + self.tensor_size = tensor_size + # Kind attribute string is purely descriptive and will be shown + # in detailed information for the node in TensorBoard's graph plugin. + # + # NodePyOP nodes get this from their kind() method. + self.kind = "Parameter" + if input_or_output: + self.input_or_output = input_or_output + self.kind = "IO Node" + + +class NodePyOP(NodePy): + def __init__(self, node_cpp): + super().__init__(node_cpp, methods_OP) + # Replace single quote which causes strange behavior in TensorBoard + # TODO: See if we can remove this in the future + self.attributes = str( + {k: _node_get(node_cpp, k) for k in node_cpp.attributeNames()} + ).replace("'", " ") + self.kind = node_cpp.kind() + + +class GraphPy: + """Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard. + + GraphDef generation operates in two passes: + + In the first pass, all nodes are read and saved to two lists. + One list is for input/output nodes (nodes_io), which only have inbound + or outbound connections, but not both. Another list is for internal + operator nodes (nodes_op). The first pass also saves all scope name + appeared in the nodes in scope_name_appeared list for later processing. + + In the second pass, scope names are fully applied to all nodes. + debugNameToScopedName is a mapping from a node's ID to its fully qualified + scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have + totally correct scope output, so this is nontrivial. The function + populate_namespace_from_OP_to_IO and find_common_root are used to + assign scope name to a node based on the connection between nodes + in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name + and scope_name_appeared. + """ + + def __init__(self): + self.nodes_op = [] + self.nodes_io = OrderedDict() + self.unique_name_to_scoped_name = {} + self.shallowest_scope_name = "default" + self.scope_name_appeared = [] + + def append(self, x): + if isinstance(x, NodePyIO): + self.nodes_io[x.debugName] = x + if isinstance(x, NodePyOP): + self.nodes_op.append(x) + + def printall(self): + print("all nodes") + for node in self.nodes_op: + print(node) + for key in self.nodes_io: + print(self.nodes_io[key]) + + def find_common_root(self): + for fullscope in self.scope_name_appeared: + if fullscope: + self.shallowest_scope_name = fullscope.split("/")[0] + + def populate_namespace_from_OP_to_IO(self): + for node in self.nodes_op: + for node_output, outputSize in zip(node.outputs, node.outputstensor_size): + self.scope_name_appeared.append(node.scopeName) + self.nodes_io[node_output] = NodeBase( + node_output, + node.inputs, + node.scopeName, + outputSize, + op_type=node.kind, + attributes=node.attributes, + ) + + self.find_common_root() + + for node in self.nodes_op: + for input_node_id in node.inputs: + self.unique_name_to_scoped_name[input_node_id] = ( + node.scopeName + "/" + input_node_id + ) + + for key, node in self.nodes_io.items(): + if type(node) == NodeBase: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if hasattr(node, "input_or_output"): + self.unique_name_to_scoped_name[key] = ( + node.input_or_output + "/" + node.debugName + ) + + if hasattr(node, "scope") and node.scope is not None: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if node.scope == "" and self.shallowest_scope_name: + self.unique_name_to_scoped_name[node.debugName] = ( + self.shallowest_scope_name + "/" + node.debugName + ) + + # replace name + for key, node in self.nodes_io.items(): + self.nodes_io[key].inputs = [ + self.unique_name_to_scoped_name[node_input_id] + for node_input_id in node.inputs + ] + if node.debugName in self.unique_name_to_scoped_name: + self.nodes_io[key].debugName = self.unique_name_to_scoped_name[ + node.debugName + ] + + def to_proto(self): + """Convert graph representation of GraphPy object to TensorBoard required format.""" + # TODO: compute correct memory usage and CPU time once + # PyTorch supports it + nodes = [] + for v in self.nodes_io.values(): + nodes.append( + node_proto( + v.debugName, + input=v.inputs, + outputsize=v.tensor_size, + op=v.kind, + attributes=v.attributes, + ) + ) + return nodes + + +def parse(graph, trace, args=None, omit_useless_nodes=True): + """Parse an optimized PyTorch model graph and produces a list of nodes and node stats. + + Useful for eventual conversion to TensorBoard protobuf format. + + Args: + graph (PyTorch module): The model graph to be parsed. + trace (PyTorch JIT TracedModule): The model trace to be parsed. + args (tuple): input tensor[s] for the model. + omit_useless_nodes (boolean): Whether to remove nodes from the graph. + """ + n_inputs = len(args) + + scope = {} + nodes_py = GraphPy() + for node in graph.inputs(): + if omit_useless_nodes: + if ( + len(node.uses()) == 0 + ): # number of user of the node (= number of outputs/ fanout) + continue + + if node.type().kind() != CLASSTYPE_KIND: + nodes_py.append(NodePyIO(node, "input")) + + attr_to_scope: Dict[Any, str] = {} + for node in graph.nodes(): + if node.kind() == GETATTR_KIND: + attr_name = node.s("name") + attr_key = node.output().debugName() + parent = node.input().node() + if ( + parent.kind() == GETATTR_KIND + ): # If the parent node is not the top-level "self" node + parent_attr_name = parent.s("name") + parent_attr_key = parent.output().debugName() + parent_scope = attr_to_scope[parent_attr_key] + attr_scope = parent_scope.split("/")[-1] + attr_to_scope[attr_key] = f"{parent_scope}/{attr_scope}.{attr_name}" + else: + attr_to_scope[attr_key] = f"__module.{attr_name}" + # We don't need classtype nodes; scope will provide this information + if node.output().type().kind() != CLASSTYPE_KIND: + node_py = NodePyOP(node) + node_py.scopeName = attr_to_scope[attr_key] # type: ignore[attr-defined] + nodes_py.append(node_py) + else: + nodes_py.append(NodePyOP(node)) + + for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops + node_pyio = NodePyIO(node, "output") + node_pyio.debugName = f"output.{i + 1}" + node_pyio.inputs = [node.debugName()] + nodes_py.append(node_pyio) + + def parse_traced_name(module): + if isinstance(module, torch.jit.TracedModule): + module_name = module._name + else: + module_name = getattr(module, "original_name", "Module") + return module_name + + alias_to_name = {} + base_name = parse_traced_name(trace) + for name, module in trace.named_modules(prefix="__module"): + mod_name = parse_traced_name(module) + attr_name = name.split(".")[-1] + alias_to_name[name] = f"{mod_name}[{attr_name}]" + + for node in nodes_py.nodes_op: + module_aliases = node.scopeName.split("/") + replacements = [ + alias_to_name[alias] if alias in alias_to_name else alias.split(".")[-1] + for alias in module_aliases + ] + node.scopeName = base_name + if any(replacements): + node.scopeName += "/" + "/".join(replacements) + + nodes_py.populate_namespace_from_OP_to_IO() + return nodes_py.to_proto() + + +def graph(model, args, verbose=False, use_strict_trace=True): + """ + Process a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard. + + Args: + model (PyTorch module): The model to be parsed. + args (tuple): input tensor[s] for the model. + verbose (bool): Whether to print out verbose information while + processing. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + with _set_model_to_eval(model): + try: + trace = torch.jit.trace(model, args, strict=use_strict_trace) + graph = trace.graph + torch._C._jit_pass_inline(graph) + except RuntimeError as e: + print(e) + print("Error occurs, No graph saved") + raise e + + if verbose: + print(graph) + list_of_nodes = parse(graph, trace, args) + # We are hardcoding that this was run on CPU even though it might have actually + # run on GPU. Note this is what is shown in TensorBoard and has no bearing + # on actual execution. + # TODO: See if we can extract GPU vs CPU information from the PyTorch model + # and pass it correctly to TensorBoard. + # + # Definition of StepStats and DeviceStepStats can be found at + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts + # and + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto + stepstats = RunMetadata( + step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")]) + ) + return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats + # The producer version has been reverse engineered from standard + # TensorBoard logged data. + + +@contextlib.contextmanager +def _set_model_to_eval(model): + """Context manager to temporarily set the training mode of ``model`` to eval.""" + if not isinstance(model, torch.jit.ScriptFunction): + originally_training = model.training + model.train(False) + try: + yield + finally: + model.train(originally_training) + else: + # Do nothing for ScriptFunction + try: + yield + finally: + pass + + +def _node_get(node: torch._C.Node, key: str): + """Get attributes of a node which is polymorphic over return type.""" + sel = node.kindOf(key) + return getattr(node, sel)(key) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f79f59749f534d47ceae2b26900569f9be9d4c79 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py @@ -0,0 +1,125 @@ +import numpy as np + + +# Functions for converting +def figure_to_image(figures, close=True): + """Render matplotlib figure to numpy format. + + Note that this requires the ``matplotlib`` package. + + Args: + figures (matplotlib.pyplot.figure or list of figures): figure or a list of figures + close (bool): Flag to automatically close the figure + + Returns: + numpy.array: image in [CHW] order + """ + import matplotlib.pyplot as plt + import matplotlib.backends.backend_agg as plt_backend_agg + + def render_to_rgb(figure): + canvas = plt_backend_agg.FigureCanvasAgg(figure) + canvas.draw() + data: np.ndarray = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8) + w, h = figure.canvas.get_width_height() + image_hwc = data.reshape([h, w, 4])[:, :, 0:3] + image_chw = np.moveaxis(image_hwc, source=2, destination=0) + if close: + plt.close(figure) + return image_chw + + if isinstance(figures, list): + images = [render_to_rgb(figure) for figure in figures] + return np.stack(images) + else: + image = render_to_rgb(figures) + return image + + +def _prepare_video(V): + """ + Convert a 5D tensor into 4D tensor. + + Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor) + to [time(frame), new_width, new_height, channel] (4D tensor). + + A batch of images are spreaded to a grid, which forms a frame. + e.g. Video with batchsize 16 will have a 4x4 grid. + """ + b, t, c, h, w = V.shape + + if V.dtype == np.uint8: + V = np.float32(V) / 255.0 + + def is_power2(num): + return num != 0 and ((num & (num - 1)) == 0) + + # pad to nearest power of 2, all at once + if not is_power2(V.shape[0]): + len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0]) + V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0) + + n_rows = 2 ** ((b.bit_length() - 1) // 2) + n_cols = V.shape[0] // n_rows + + V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w)) + V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3)) + V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c)) + + return V + + +def make_grid(I, ncols=8): + # I: N1HW or N3HW + assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here" + if I.shape[1] == 1: + I = np.concatenate([I, I, I], 1) + assert I.ndim == 4 and I.shape[1] == 3 + nimg = I.shape[0] + H = I.shape[2] + W = I.shape[3] + ncols = min(nimg, ncols) + nrows = int(np.ceil(float(nimg) / ncols)) + canvas = np.zeros((3, H * nrows, W * ncols), dtype=I.dtype) + i = 0 + for y in range(nrows): + for x in range(ncols): + if i >= nimg: + break + canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i] + i = i + 1 + return canvas + + # if modality == 'IMG': + # if x.dtype == np.uint8: + # x = x.astype(np.float32) / 255.0 + + +def convert_to_HWC(tensor, input_format): # tensor: numpy array + assert len(set(input_format)) == len( + input_format + ), f"You can not use the same dimension shordhand twice. input_format: {input_format}" + assert len(tensor.shape) == len( + input_format + ), f"size of input tensor and input format are different. \ + tensor shape: {tensor.shape}, input_format: {input_format}" + input_format = input_format.upper() + + if len(input_format) == 4: + index = [input_format.find(c) for c in "NCHW"] + tensor_NCHW = tensor.transpose(index) + tensor_CHW = make_grid(tensor_NCHW) + return tensor_CHW.transpose(1, 2, 0) + + if len(input_format) == 3: + index = [input_format.find(c) for c in "HWC"] + tensor_HWC = tensor.transpose(index) + if tensor_HWC.shape[2] == 1: + tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2) + return tensor_HWC + + if len(input_format) == 2: + index = [input_format.find(c) for c in "HW"] + tensor = tensor.transpose(index) + tensor = np.stack([tensor, tensor, tensor], 2) + return tensor diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..8211f6e0c80241bebacd19ea5824c5b338018849 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py @@ -0,0 +1,983 @@ +import json +import logging +import os +import struct + +from typing import Any, List, Optional + +import torch +import numpy as np + +from google.protobuf import struct_pb2 + +from tensorboard.compat.proto.summary_pb2 import ( + HistogramProto, + Summary, + SummaryMetadata, +) +from tensorboard.compat.proto.tensor_pb2 import TensorProto +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto +from tensorboard.plugins.custom_scalar import layout_pb2 +from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData +from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData + +from ._convert_np import make_np +from ._utils import _prepare_video, convert_to_HWC + +__all__ = [ + "half_to_int", + "int_to_half", + "hparams", + "scalar", + "histogram_raw", + "histogram", + "make_histogram", + "image", + "image_boxes", + "draw_boxes", + "make_image", + "video", + "make_video", + "audio", + "custom_scalars", + "text", + "tensor_proto", + "pr_curve_raw", + "pr_curve", + "compute_curve", + "mesh", +] + +logger = logging.getLogger(__name__) + +def half_to_int(f: float) -> int: + """Casts a half-precision float value into an integer. + + Converts a half precision floating point value, such as `torch.half` or + `torch.bfloat16`, into an integer value which can be written into the + half_val field of a TensorProto for storage. + + To undo the effects of this conversion, use int_to_half(). + + """ + buf = struct.pack("f", f) + return struct.unpack("i", buf)[0] + +def int_to_half(i: int) -> float: + """Casts an integer value to a half-precision float. + + Converts an integer value obtained from half_to_int back into a floating + point value. + + """ + buf = struct.pack("i", i) + return struct.unpack("f", buf)[0] + +def _tensor_to_half_val(t: torch.Tensor) -> List[int]: + return [half_to_int(x) for x in t.flatten().tolist()] + +def _tensor_to_complex_val(t: torch.Tensor) -> List[float]: + return torch.view_as_real(t).flatten().tolist() + +def _tensor_to_list(t: torch.Tensor) -> List[Any]: + return t.flatten().tolist() + +# type maps: torch.Tensor type -> (protobuf type, protobuf val field) +_TENSOR_TYPE_MAP = { + torch.half: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.float16: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.bfloat16: ("DT_BFLOAT16", "half_val", _tensor_to_half_val), + torch.float32: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float64: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.double: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.int8: ("DT_INT8", "int_val", _tensor_to_list), + torch.uint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.qint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.int16: ("DT_INT16", "int_val", _tensor_to_list), + torch.short: ("DT_INT16", "int_val", _tensor_to_list), + torch.int: ("DT_INT32", "int_val", _tensor_to_list), + torch.int32: ("DT_INT32", "int_val", _tensor_to_list), + torch.qint32: ("DT_INT32", "int_val", _tensor_to_list), + torch.int64: ("DT_INT64", "int64_val", _tensor_to_list), + torch.complex32: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.chalf: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.complex64: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.cfloat: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.bool: ("DT_BOOL", "bool_val", _tensor_to_list), + torch.complex128: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.cdouble: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.uint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint4x2: ("DT_UINT8", "uint32_val", _tensor_to_list), +} + + +def _calc_scale_factor(tensor): + converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor + return 1 if converted.dtype == np.uint8 else 255 + + +def _draw_single_box( + image, + xmin, + ymin, + xmax, + ymax, + display_str, + color="black", + color_text="black", + thickness=2, +): + from PIL import ImageDraw, ImageFont + + font = ImageFont.load_default() + draw = ImageDraw.Draw(image) + (left, right, top, bottom) = (xmin, xmax, ymin, ymax) + draw.line( + [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], + width=thickness, + fill=color, + ) + if display_str: + text_bottom = bottom + # Reverse list and print from bottom to top. + text_width, text_height = font.getsize(display_str) + margin = np.ceil(0.05 * text_height) + draw.rectangle( + [ + (left, text_bottom - text_height - 2 * margin), + (left + text_width, text_bottom), + ], + fill=color, + ) + draw.text( + (left + margin, text_bottom - text_height - margin), + display_str, + fill=color_text, + font=font, + ) + return image + + +def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): + """Output three `Summary` protocol buffers needed by hparams plugin. + + `Experiment` keeps the metadata of an experiment, such as the name of the + hyperparameters and the name of the metrics. + `SessionStartInfo` keeps key-value pairs of the hyperparameters + `SessionEndInfo` describes status of the experiment e.g. STATUS_SUCCESS + + Args: + hparam_dict: A dictionary that contains names of the hyperparameters + and their values. + metric_dict: A dictionary that contains names of the metrics + and their values. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + + Returns: + The `Summary` protobufs for Experiment, SessionStartInfo and + SessionEndInfo + """ + import torch + from tensorboard.plugins.hparams.api_pb2 import ( + DataType, + Experiment, + HParamInfo, + MetricInfo, + MetricName, + Status, + ) + from tensorboard.plugins.hparams.metadata import ( + EXPERIMENT_TAG, + PLUGIN_DATA_VERSION, + PLUGIN_NAME, + SESSION_END_INFO_TAG, + SESSION_START_INFO_TAG, + ) + from tensorboard.plugins.hparams.plugin_data_pb2 import ( + HParamsPluginData, + SessionEndInfo, + SessionStartInfo, + ) + + # TODO: expose other parameters in the future. + # hp = HParamInfo(name='lr',display_name='learning rate', + # type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10, + # max_value=100)) + # mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', + # description='', dataset_type=DatasetType.DATASET_VALIDATION) + # exp = Experiment(name='123', description='456', time_created_secs=100.0, + # hparam_infos=[hp], metric_infos=[mt], user='tw') + + if not isinstance(hparam_dict, dict): + logger.warning("parameter: hparam_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: hparam_dict should be a dictionary, nothing logged." + ) + if not isinstance(metric_dict, dict): + logger.warning("parameter: metric_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: metric_dict should be a dictionary, nothing logged." + ) + + hparam_domain_discrete = hparam_domain_discrete or {} + if not isinstance(hparam_domain_discrete, dict): + raise TypeError( + "parameter: hparam_domain_discrete should be a dictionary, nothing logged." + ) + for k, v in hparam_domain_discrete.items(): + if ( + k not in hparam_dict + or not isinstance(v, list) + or not all(isinstance(d, type(hparam_dict[k])) for d in v) + ): + raise TypeError( + f"parameter: hparam_domain_discrete[{k}] should be a list of same type as hparam_dict[{k}]." + ) + hps = [] + + ssi = SessionStartInfo() + for k, v in hparam_dict.items(): + if v is None: + continue + if isinstance(v, (int, float)): + ssi.hparams[k].number_value = v + + if k in hparam_domain_discrete: + domain_discrete: Optional[struct_pb2.ListValue] = struct_pb2.ListValue( + values=[ + struct_pb2.Value(number_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_FLOAT64"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, str): + ssi.hparams[k].string_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(string_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_STRING"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, bool): + ssi.hparams[k].bool_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(bool_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_BOOL"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, torch.Tensor): + v = make_np(v)[0] + ssi.hparams[k].number_value = v + hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_FLOAT64"))) + continue + raise ValueError( + "value should be one of int, float, str, bool, or torch.Tensor" + ) + + content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)]) + + mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()] + + exp = Experiment(hparam_infos=hps, metric_infos=mts) + + content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)]) + + sei = SessionEndInfo(status=Status.Value("STATUS_SUCCESS")) + content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)]) + + return exp, ssi, sei + + +def scalar(name, tensor, collections=None, new_style=False, double_precision=False): + """Output a `Summary` protocol buffer containing a single scalar value. + + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: A real numeric Tensor containing a single value. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + new_style: Whether to use new style (tensor field) or old style (simple_value + field). New style could lead to faster data loading. + Returns: + A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf. + Raises: + ValueError: If tensor has the wrong shape or type. + """ + tensor = make_np(tensor).squeeze() + assert ( + tensor.ndim == 0 + ), f"Tensor should contain one element (0 dimensions). Was given size: {tensor.size} and {tensor.ndim} dimensions." + # python float is double precision in numpy + scalar = float(tensor) + if new_style: + tensor_proto = TensorProto(float_val=[scalar], dtype="DT_FLOAT") + if double_precision: + tensor_proto = TensorProto(double_val=[scalar], dtype="DT_DOUBLE") + + plugin_data = SummaryMetadata.PluginData(plugin_name="scalars") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary( + value=[ + Summary.Value( + tag=name, + tensor=tensor_proto, + metadata=smd, + ) + ] + ) + else: + return Summary(value=[Summary.Value(tag=name, simple_value=scalar)]) + + +def tensor_proto(tag, tensor): + """Outputs a `Summary` protocol buffer containing the full tensor. + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: Tensor to be converted to protobuf + Returns: + A tensor protobuf in a `Summary` protobuf. + Raises: + ValueError: If tensor is too big to be converted to protobuf, or + tensor data type is not supported + """ + if tensor.numel() * tensor.itemsize >= (1 << 31): + raise ValueError( + "tensor is bigger than protocol buffer's hard limit of 2GB in size" + ) + + if tensor.dtype in _TENSOR_TYPE_MAP: + dtype, field_name, conversion_fn = _TENSOR_TYPE_MAP[tensor.dtype] + tensor_proto = TensorProto( + **{ + "dtype": dtype, + "tensor_shape": TensorShapeProto( + dim=[TensorShapeProto.Dim(size=x) for x in tensor.shape] + ), + field_name: conversion_fn(tensor), + }, + ) + else: + raise ValueError(f"{tag} has unsupported tensor dtype {tensor.dtype}") + + plugin_data = SummaryMetadata.PluginData(plugin_name="tensor") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor_proto)]) + + +def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + min: A float or int min value + max: A float or int max value + num: Int number of values + sum: Float or int sum of all values + sum_squares: Float or int sum of squares for all values + bucket_limits: A numeric `Tensor` with upper value per bucket + bucket_counts: A numeric `Tensor` with number of values per bucket + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + hist = HistogramProto( + min=min, + max=max, + num=num, + sum=sum, + sum_squares=sum_squares, + bucket_limit=bucket_limits, + bucket=bucket_counts, + ) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def histogram(name, values, bins, max_bins=None): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + This op reports an `InvalidArgument` error if any value is not finite. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + values: A real numeric `Tensor`. Any shape. Values to use to + build the histogram. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + values = make_np(values) + hist = make_histogram(values.astype(float), bins, max_bins) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def make_histogram(values, bins, max_bins=None): + """Convert values into a histogram proto using logic from histogram.cc.""" + if values.size == 0: + raise ValueError("The input has no element.") + values = values.reshape(-1) + counts, limits = np.histogram(values, bins=bins) + num_bins = len(counts) + if max_bins is not None and num_bins > max_bins: + subsampling = num_bins // max_bins + subsampling_remainder = num_bins % subsampling + if subsampling_remainder != 0: + counts = np.pad( + counts, + pad_width=[[0, subsampling - subsampling_remainder]], + mode="constant", + constant_values=0, + ) + counts = counts.reshape(-1, subsampling).sum(axis=-1) + new_limits = np.empty((counts.size + 1,), limits.dtype) + new_limits[:-1] = limits[:-1:subsampling] + new_limits[-1] = limits[-1] + limits = new_limits + + # Find the first and the last bin defining the support of the histogram: + + cum_counts = np.cumsum(np.greater(counts, 0)) + start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right") + start = int(start) + end = int(end) + 1 + del cum_counts + + # TensorBoard only includes the right bin limits. To still have the leftmost limit + # included, we include an empty bin left. + # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the + # first nonzero-count bin: + counts = ( + counts[start - 1 : end] if start > 0 else np.concatenate([[0], counts[:end]]) + ) + limits = limits[start : end + 1] + + if counts.size == 0 or limits.size == 0: + raise ValueError("The histogram is empty, please file a bug report.") + + sum_sq = values.dot(values) + return HistogramProto( + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limit=limits.tolist(), + bucket=counts.tolist(), + ) + + +def image(tag, tensor, rescale=1, dataformats="NCHW"): + """Output a `Summary` protocol buffer with images. + + The summary has up to `max_images` summary values containing images. The + images are built from `tensor` which must be 3-D with shape `[height, width, + channels]` and where `channels` can be: + * 1: `tensor` is interpreted as Grayscale. + * 3: `tensor` is interpreted as RGB. + * 4: `tensor` is interpreted as RGBA. + The `name` in the outputted Summary.Value protobufs is generated based on the + name, with a suffix depending on the max_outputs setting: + * If `max_outputs` is 1, the summary value tag is '*name*/image'. + * If `max_outputs` is greater than 1, the summary value tags are + generated sequentially as '*name*/image/0', '*name*/image/1', etc. + Args: + tag: A name for the generated node. Will also serve as a series name in + TensorBoard. + tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width, + channels]` where `channels` is 1, 3, or 4. + 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8). + The image() function will scale the image values to [0, 255] by applying + a scale factor of either 1 (uint8) or 255 (float32). Out-of-range values + will be clipped. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + tensor = make_np(tensor) + tensor = convert_to_HWC(tensor, dataformats) + # Do not assume that user passes in values in [0, 255], use data type to detect + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + image = make_image(tensor, rescale=rescale) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def image_boxes( + tag, tensor_image, tensor_boxes, rescale=1, dataformats="CHW", labels=None +): + """Output a `Summary` protocol buffer with images.""" + tensor_image = make_np(tensor_image) + tensor_image = convert_to_HWC(tensor_image, dataformats) + tensor_boxes = make_np(tensor_boxes) + tensor_image = tensor_image.astype(np.float32) * _calc_scale_factor(tensor_image) + image = make_image( + tensor_image.clip(0, 255).astype(np.uint8), + rescale=rescale, + rois=tensor_boxes, + labels=labels, + ) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def draw_boxes(disp_image, boxes, labels=None): + # xyxy format + num_boxes = boxes.shape[0] + list_gt = range(num_boxes) + for i in list_gt: + disp_image = _draw_single_box( + disp_image, + boxes[i, 0], + boxes[i, 1], + boxes[i, 2], + boxes[i, 3], + display_str=None if labels is None else labels[i], + color="Red", + ) + return disp_image + + +def make_image(tensor, rescale=1, rois=None, labels=None): + """Convert a numpy representation of an image to Image protobuf.""" + from PIL import Image + + height, width, channel = tensor.shape + scaled_height = int(height * rescale) + scaled_width = int(width * rescale) + image = Image.fromarray(tensor) + if rois is not None: + image = draw_boxes(image, rois, labels=labels) + try: + ANTIALIAS = Image.Resampling.LANCZOS + except AttributeError: + ANTIALIAS = Image.ANTIALIAS + image = image.resize((scaled_width, scaled_height), ANTIALIAS) + import io + + output = io.BytesIO() + image.save(output, format="PNG") + image_string = output.getvalue() + output.close() + return Summary.Image( + height=height, + width=width, + colorspace=channel, + encoded_image_string=image_string, + ) + + +def video(tag, tensor, fps=4): + tensor = make_np(tensor) + tensor = _prepare_video(tensor) + # If user passes in uint8, then we don't need to rescale by 255 + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + video = make_video(tensor, fps) + return Summary(value=[Summary.Value(tag=tag, image=video)]) + + +def make_video(tensor, fps): + try: + import moviepy # noqa: F401 + except ImportError: + print("add_video needs package moviepy") + return + try: + from moviepy import editor as mpy + except ImportError: + print( + "moviepy is installed, but can't import moviepy.editor.", + "Some packages could be missing [imageio, requests]", + ) + return + import tempfile + + t, h, w, c = tensor.shape + + # encode sequence of images into gif string + clip = mpy.ImageSequenceClip(list(tensor), fps=fps) + + filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name + try: # newer version of moviepy use logger instead of progress_bar argument. + clip.write_gif(filename, verbose=False, logger=None) + except TypeError: + try: # older version of moviepy does not support progress_bar argument. + clip.write_gif(filename, verbose=False, progress_bar=False) + except TypeError: + clip.write_gif(filename, verbose=False) + + with open(filename, "rb") as f: + tensor_string = f.read() + + try: + os.remove(filename) + except OSError: + logger.warning("The temporary file used by moviepy cannot be deleted.") + + return Summary.Image( + height=h, width=w, colorspace=c, encoded_image_string=tensor_string + ) + + +def audio(tag, tensor, sample_rate=44100): + array = make_np(tensor) + array = array.squeeze() + if abs(array).max() > 1: + print("warning: audio amplitude out of range, auto clipped.") + array = array.clip(-1, 1) + assert array.ndim == 1, "input tensor should be 1 dimensional." + array = (array * np.iinfo(np.int16).max).astype(" 127: # weird, value > 127 breaks protobuf + num_thresholds = 127 + data = np.stack((tp, fp, tn, fn, precision, recall)) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None): + # weird, value > 127 breaks protobuf + num_thresholds = min(num_thresholds, 127) + data = compute_curve( + labels, predictions, num_thresholds=num_thresholds, weights=weights + ) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py +def compute_curve(labels, predictions, num_thresholds=None, weights=None): + _MINIMUM_COUNT = 1e-7 + + if weights is None: + weights = 1.0 + + # Compute bins of true positives and false positives. + bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) + float_labels = labels.astype(np.float64) + histogram_range = (0, num_thresholds - 1) + tp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=float_labels * weights, + ) + fp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=(1.0 - float_labels) * weights, + ) + + # Obtain the reverse cumulative sum. + tp = np.cumsum(tp_buckets[::-1])[::-1] + fp = np.cumsum(fp_buckets[::-1])[::-1] + tn = fp[0] - fp + fn = tp[0] - tp + precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) + recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) + return np.stack((tp, fp, tn, fn, precision, recall)) + + +def _get_tensor_summary( + name, display_name, description, tensor, content_type, components, json_config +): + """Create a tensor summary with summary metadata. + + Args: + name: Uniquely identifiable name of the summary op. Could be replaced by + combination of name and type to make it unique even outside of this + summary. + display_name: Will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + tensor: Tensor to display in summary. + content_type: Type of content inside the Tensor. + components: Bitmask representing present parts (vertices, colors, etc.) that + belong to the summary. + json_config: A string, JSON-serialized dictionary of ThreeJS classes + configuration. + + Returns: + Tensor summary with metadata. + """ + import torch + from tensorboard.plugins.mesh import metadata + + tensor = torch.as_tensor(tensor) + + tensor_metadata = metadata.create_summary_metadata( + name, + display_name, + content_type, + components, + tensor.shape, + description, + json_config=json_config, + ) + + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=tensor.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=tensor.shape[0]), + TensorShapeProto.Dim(size=tensor.shape[1]), + TensorShapeProto.Dim(size=tensor.shape[2]), + ] + ), + ) + + tensor_summary = Summary.Value( + tag=metadata.get_instance_name(name, content_type), + tensor=tensor, + metadata=tensor_metadata, + ) + + return tensor_summary + + +def _get_json_config(config_dict): + """Parse and returns JSON string from python dictionary.""" + json_config = "{}" + if config_dict is not None: + json_config = json.dumps(config_dict, sort_keys=True) + return json_config + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/mesh/summary.py +def mesh( + tag, vertices, colors, faces, config_dict, display_name=None, description=None +): + """Output a merged `Summary` protocol buffer with a mesh/point cloud. + + Args: + tag: A name for this summary operation. + vertices: Tensor of shape `[dim_1, ..., dim_n, 3]` representing the 3D + coordinates of vertices. + faces: Tensor of shape `[dim_1, ..., dim_n, 3]` containing indices of + vertices within each triangle. + colors: Tensor of shape `[dim_1, ..., dim_n, 3]` containing colors for each + vertex. + display_name: If set, will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + config_dict: Dictionary with ThreeJS classes names and configuration. + + Returns: + Merged summary for mesh/point cloud representation. + """ + from tensorboard.plugins.mesh import metadata + from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData + + json_config = _get_json_config(config_dict) + + summaries = [] + tensors = [ + (vertices, MeshPluginData.VERTEX), + (faces, MeshPluginData.FACE), + (colors, MeshPluginData.COLOR), + ] + tensors = [tensor for tensor in tensors if tensor[0] is not None] + components = metadata.get_components_bitmask( + [content_type for (tensor, content_type) in tensors] + ) + + for tensor, content_type in tensors: + summaries.append( + _get_tensor_summary( + tag, + display_name, + description, + tensor, + content_type, + components, + json_config, + ) + ) + + return Summary(value=summaries) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..bd8c5d188dc5083e89ad82c130111f94765f1913 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py @@ -0,0 +1,1270 @@ +"""Provide an API for writing protocol buffers to event files to be consumed by TensorBoard for visualization.""" + +import os +import time +from typing import List, Optional, Union, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from matplotlib.figure import Figure +from tensorboard.compat import tf +from tensorboard.compat.proto import event_pb2 +from tensorboard.compat.proto.event_pb2 import Event, SessionLog +from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig +from tensorboard.summary.writer.event_file_writer import EventFileWriter + +from ._convert_np import make_np +from ._embedding import get_embedding_info, make_mat, make_sprite, make_tsv, write_pbtxt +from ._onnx_graph import load_onnx_graph +from ._pytorch_graph import graph +from ._utils import figure_to_image +from .summary import ( + audio, + custom_scalars, + histogram, + histogram_raw, + hparams, + image, + image_boxes, + mesh, + pr_curve, + pr_curve_raw, + scalar, + tensor_proto, + text, + video, +) + +__all__ = ["FileWriter", "SummaryWriter"] + + +class FileWriter: + """Writes protocol buffers to event files to be consumed by TensorBoard. + + The `FileWriter` class provides a mechanism to create an event file in a + given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""): + """Create a `FileWriter` and an event file. + + On construction the writer creates a new event file in `log_dir`. + The other arguments to the constructor control the asynchronous writes to + the event file. + + Args: + log_dir: A string. Directory where event file will be written. + max_queue: Integer. Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs: Number. How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix: A string. Suffix added to all event filenames + in the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + """ + # Sometimes PosixPath is passed in and we need to coerce it to + # a string in all cases + # TODO: See if we can remove this in the future if we are + # actually the ones passing in a PosixPath + log_dir = str(log_dir) + self.event_writer = EventFileWriter( + log_dir, max_queue, flush_secs, filename_suffix + ) + + def get_logdir(self): + """Return the directory where event file will be written.""" + return self.event_writer.get_logdir() + + def add_event(self, event, step=None, walltime=None): + """Add an event to the event file. + + Args: + event: An `Event` protocol buffer. + step: Number. Optional global step value for training process + to record with the event. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event.wall_time = time.time() if walltime is None else walltime + if step is not None: + # Make sure step is converted from numpy or other formats + # since protobuf might not convert depending on version + event.step = int(step) + self.event_writer.add_event(event) + + def add_summary(self, summary, global_step=None, walltime=None): + """Add a `Summary` protocol buffer to the event file. + + This method wraps the provided summary in an `Event` protocol buffer + and adds it to the event file. + + Args: + summary: A `Summary` protocol buffer. + global_step: Number. Optional global step value for training process + to record with the summary. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event = event_pb2.Event(summary=summary) + self.add_event(event, global_step, walltime) + + def add_graph(self, graph_profile, walltime=None): + """Add a `Graph` and step stats protocol buffer to the event file. + + Args: + graph_profile: A `Graph` and step stats protocol buffer. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + graph = graph_profile[0] + stepstats = graph_profile[1] + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + trm = event_pb2.TaggedRunMetadata( + tag="step1", run_metadata=stepstats.SerializeToString() + ) + event = event_pb2.Event(tagged_run_metadata=trm) + self.add_event(event, None, walltime) + + def add_onnx_graph(self, graph, walltime=None): + """Add a `Graph` protocol buffer to the event file. + + Args: + graph: A `Graph` protocol buffer. + walltime: float. Optional walltime to override the default (current) + _get_file_writerfrom time.time()) + """ + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + self.event_writer.flush() + + def close(self): + """Flushes the event file to disk and close the file. + + Call this method when you do not need the summary writer anymore. + """ + self.event_writer.close() + + def reopen(self): + """Reopens the EventFileWriter. + + Can be called after `close()` to add more events in the same directory. + The events will go into a new events file. + Does nothing if the EventFileWriter was not closed. + """ + self.event_writer.reopen() + + +class SummaryWriter: + """Writes entries directly to event files in the log_dir to be consumed by TensorBoard. + + The `SummaryWriter` class provides a high-level API to create an event file + in a given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__( + self, + log_dir=None, + comment="", + purge_step=None, + max_queue=10, + flush_secs=120, + filename_suffix="", + ): + """Create a `SummaryWriter` that will write out events and summaries to the event file. + + Args: + log_dir (str): Save directory location. Default is + runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. + Use hierarchical folder structure to compare + between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc. + for each new experiment to compare across them. + comment (str): Comment log_dir suffix appended to the default + ``log_dir``. If ``log_dir`` is assigned, this argument has no effect. + purge_step (int): + When logging crashes at step :math:`T+X` and restarts at step :math:`T`, + any events whose global_step larger or equal to :math:`T` will be + purged and hidden from TensorBoard. + Note that crashed and resumed experiments should have the same ``log_dir``. + max_queue (int): Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs (int): How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix (str): Suffix added to all event filenames in + the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + + # create a summary writer with automatically generated folder name. + writer = SummaryWriter() + # folder location: runs/May04_22-14-54_s-MacBook-Pro.local/ + + # create a summary writer using the specified folder name. + writer = SummaryWriter("my_experiment") + # folder location: my_experiment + + # create a summary writer with comment appended. + writer = SummaryWriter(comment="LR_0.1_BATCH_16") + # folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/ + + """ + torch._C._log_api_usage_once("tensorboard.create.summarywriter") + if not log_dir: + import socket + from datetime import datetime + + current_time = datetime.now().strftime("%b%d_%H-%M-%S") + log_dir = os.path.join( + "runs", current_time + "_" + socket.gethostname() + comment + ) + self.log_dir = log_dir + self.purge_step = purge_step + self.max_queue = max_queue + self.flush_secs = flush_secs + self.filename_suffix = filename_suffix + + # Initialize the file writers, but they can be cleared out on close + # and recreated later as needed. + self.file_writer = self.all_writers = None + self._get_file_writer() + + # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard + v = 1e-12 + buckets = [] + neg_buckets = [] + while v < 1e20: + buckets.append(v) + neg_buckets.append(-v) + v *= 1.1 + self.default_bins = neg_buckets[::-1] + [0] + buckets + + def _check_caffe2_blob(self, item): + """ + Check if the input is a string representing a Caffe2 blob name. + + Caffe2 users have the option of passing a string representing the name of a blob + in the workspace instead of passing the actual Tensor/array containing the numeric values. + Thus, we need to check if we received a string as input + instead of an actual Tensor/array, and if so, we need to fetch the Blob + from the workspace corresponding to that name. Fetching can be done with the + following: + + from caffe2.python import workspace (if not already imported) + workspace.FetchBlob(blob_name) + workspace.FetchBlobs([blob_name1, blob_name2, ...]) + """ + return isinstance(item, str) + + def _get_file_writer(self): + """Return the default FileWriter instance. Recreates it if closed.""" + if self.all_writers is None or self.file_writer is None: + self.file_writer = FileWriter( + self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers = {self.file_writer.get_logdir(): self.file_writer} + if self.purge_step is not None: + most_recent_step = self.purge_step + self.file_writer.add_event( + Event(step=most_recent_step, file_version="brain.Event:2") + ) + self.file_writer.add_event( + Event( + step=most_recent_step, + session_log=SessionLog(status=SessionLog.START), + ) + ) + self.purge_step = None + return self.file_writer + + def get_logdir(self): + """Return the directory where event files will be written.""" + return self.log_dir + + def add_hparams( + self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None, global_step=None + ): + """Add a set of hyperparameters to be compared in TensorBoard. + + Args: + hparam_dict (dict): Each key-value pair in the dictionary is the + name of the hyper parameter and it's corresponding value. + The type of the value can be one of `bool`, `string`, `float`, + `int`, or `None`. + metric_dict (dict): Each key-value pair in the dictionary is the + name of the metric and it's corresponding value. Note that the key used + here should be unique in the tensorboard record. Otherwise the value + you added by ``add_scalar`` will be displayed in hparam plugin. In most + cases, this is unwanted. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + run_name (str): Name of the run, to be included as part of the logdir. + If unspecified, will use current timestamp. + global_step (int): Global step value to record + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + with SummaryWriter() as w: + for i in range(5): + w.add_hparams({'lr': 0.1*i, 'bsize': i}, + {'hparam/accuracy': 10*i, 'hparam/loss': 10*i}) + + Expected result: + + .. image:: _static/img/tensorboard/add_hparam.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_hparams") + if type(hparam_dict) is not dict or type(metric_dict) is not dict: + raise TypeError("hparam_dict and metric_dict should be dictionary.") + exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete) + + if not run_name: + run_name = str(time.time()) + logdir = os.path.join(self._get_file_writer().get_logdir(), run_name) + with SummaryWriter(log_dir=logdir) as w_hp: + w_hp.file_writer.add_summary(exp, global_step) + w_hp.file_writer.add_summary(ssi, global_step) + w_hp.file_writer.add_summary(sei, global_step) + for k, v in metric_dict.items(): + w_hp.add_scalar(k, v, global_step) + + def add_scalar( + self, + tag, + scalar_value, + global_step=None, + walltime=None, + new_style=False, + double_precision=False, + ): + """Add scalar data to summary. + + Args: + tag (str): Data identifier + scalar_value (float or string/blobname): Value to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + with seconds after epoch of event + new_style (boolean): Whether to use new style (tensor field) or old + style (simple_value field). New style could lead to faster data loading. + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = range(100) + for i in x: + writer.add_scalar('y=2x', i * 2, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_scalar.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalar") + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + + summary = scalar( + tag, scalar_value, new_style=new_style, double_precision=double_precision + ) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): + """Add many scalar data to summary. + + Args: + main_tag (str): The parent name for the tags + tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + r = 5 + for i in range(100): + writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), + 'xcosx':i*np.cos(i/r), + 'tanx': np.tan(i/r)}, i) + writer.close() + # This call adds three values to the same scalar plot with the tag + # 'run_14h' in TensorBoard's scalar section. + + Expected result: + + .. image:: _static/img/tensorboard/add_scalars.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalars") + walltime = time.time() if walltime is None else walltime + fw_logdir = self._get_file_writer().get_logdir() + for tag, scalar_value in tag_scalar_dict.items(): + fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag + assert self.all_writers is not None + if fw_tag in self.all_writers.keys(): + fw = self.all_writers[fw_tag] + else: + fw = FileWriter( + fw_tag, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers[fw_tag] = fw + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime) + + def add_tensor( + self, + tag, + tensor, + global_step=None, + walltime=None, + ): + """Add tensor data to summary. + + Args: + tag (str): Data identifier + tensor (torch.Tensor): tensor to save + global_step (int): Global step value to record + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = torch.tensor([1,2,3]) + writer.add_scalar('x', x) + writer.close() + + Expected result: + Summary::tensor::float_val [1,2,3] + ::tensor::shape [3] + ::tag 'x' + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_tensor") + if self._check_caffe2_blob(tensor): + from caffe2.python import workspace + + tensor = torch.tensor(workspace.FetchBlob(tensor)) + + summary = tensor_proto(tag, tensor) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_histogram( + self, + tag, + values, + global_step=None, + bins="tensorflow", + walltime=None, + max_bins=None, + ): + """Add histogram to summary. + + Args: + tag (str): Data identifier + values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram + global_step (int): Global step value to record + bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find + other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + for i in range(10): + x = np.random.random(1000) + writer.add_histogram('distribution centers', x + i, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram") + if self._check_caffe2_blob(values): + from caffe2.python import workspace + + values = workspace.FetchBlob(values) + if isinstance(bins, str) and bins == "tensorflow": + bins = self.default_bins + self._get_file_writer().add_summary( + histogram(tag, values, bins, max_bins=max_bins), global_step, walltime + ) + + def add_histogram_raw( + self, + tag, + min, + max, + num, + sum, + sum_squares, + bucket_limits, + bucket_counts, + global_step=None, + walltime=None, + ): + """Add histogram with raw data. + + Args: + tag (str): Data identifier + min (float or int): Min value + max (float or int): Max value + num (int): Number of values + sum (float or int): Sum of all values + sum_squares (float or int): Sum of squares for all values + bucket_limits (torch.Tensor, numpy.ndarray): Upper value per bucket. + The number of elements of it should be the same as `bucket_counts`. + bucket_counts (torch.Tensor, numpy.ndarray): Number of values per bucket + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + dummy_data = [] + for idx, value in enumerate(range(50)): + dummy_data += [idx + 0.001] * value + + bins = list(range(50+2)) + bins = np.array(bins) + values = np.array(dummy_data).astype(float).reshape(-1) + counts, limits = np.histogram(values, bins=bins) + sum_sq = values.dot(values) + writer.add_histogram_raw( + tag='histogram_with_raw_data', + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limits=limits[1:].tolist(), + bucket_counts=counts.tolist(), + global_step=0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram_raw.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw") + if len(bucket_limits) != len(bucket_counts): + raise ValueError( + "len(bucket_limits) != len(bucket_counts), see the document." + ) + self._get_file_writer().add_summary( + histogram_raw( + tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts + ), + global_step, + walltime, + ) + + def add_image( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW" + ): + """Add image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to + convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. + Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as + corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + img = np.zeros((3, 100, 100)) + img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + img_HWC = np.zeros((100, 100, 3)) + img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + writer = SummaryWriter() + writer.add_image('my_image', img, 0) + + # If you have non-default dimension setting, set the dataformats argument. + writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_image.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_images( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="NCHW" + ): + """Add batched image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be + accepted. e.g. NCHW or NHWC. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + + img_batch = np.zeros((16, 3, 100, 100)) + for i in range(16): + img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i + img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i + + writer = SummaryWriter() + writer.add_images('my_image_batch', img_batch, 0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_images.png + :scale: 30 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_images") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_image_with_boxes( + self, + tag, + img_tensor, + box_tensor, + global_step=None, + walltime=None, + rescale=1, + dataformats="CHW", + labels=None, + ): + """Add image and draw bounding boxes on the image. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects) + box should be represented as [x1, y1, x2, y2]. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + rescale (float): Optional scale override + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + labels (list of string): The label to be shown for each bounding box. + Shape: + img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument. + e.g. CHW or HWC + + box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of + boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax). + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + if self._check_caffe2_blob(box_tensor): + from caffe2.python import workspace + + box_tensor = workspace.FetchBlob(box_tensor) + if labels is not None: + if isinstance(labels, str): + labels = [labels] + if len(labels) != box_tensor.shape[0]: + labels = None + self._get_file_writer().add_summary( + image_boxes( + tag, + img_tensor, + box_tensor, + rescale=rescale, + dataformats=dataformats, + labels=labels, + ), + global_step, + walltime, + ) + + def add_figure( + self, + tag: str, + figure: Union["Figure", List["Figure"]], + global_step: Optional[int] = None, + close: bool = True, + walltime: Optional[float] = None + ) -> None: + """Render matplotlib figure into an image and add it to summary. + + Note that this requires the ``matplotlib`` package. + + Args: + tag: Data identifier + figure: Figure or a list of figures + global_step: Global step value to record + close: Flag to automatically close the figure + walltime: Optional override default walltime (time.time()) + seconds after epoch of event + """ + torch._C._log_api_usage_once("tensorboard.logging.add_figure") + if isinstance(figure, list): + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="NCHW", + ) + else: + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="CHW", + ) + + def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): + """Add video data to summary. + + Note that this requires the ``moviepy`` package. + + Args: + tag (str): Data identifier + vid_tensor (torch.Tensor): Video data + global_step (int): Global step value to record + fps (float or int): Frames per second + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_video") + self._get_file_writer().add_summary( + video(tag, vid_tensor, fps), global_step, walltime + ) + + def add_audio( + self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None + ): + """Add audio data to summary. + + Args: + tag (str): Data identifier + snd_tensor (torch.Tensor): Sound data + global_step (int): Global step value to record + sample_rate (int): sample rate in Hz + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1]. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_audio") + if self._check_caffe2_blob(snd_tensor): + from caffe2.python import workspace + + snd_tensor = workspace.FetchBlob(snd_tensor) + self._get_file_writer().add_summary( + audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime + ) + + def add_text(self, tag, text_string, global_step=None, walltime=None): + """Add text data to summary. + + Args: + tag (str): Data identifier + text_string (str): String to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Examples:: + + writer.add_text('lstm', 'This is an lstm', 0) + writer.add_text('rnn', 'This is an rnn', 10) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_text") + self._get_file_writer().add_summary( + text(tag, text_string), global_step, walltime + ) + + def add_onnx_graph(self, prototxt): + torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph") + self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt)) + + def add_graph( + self, model, input_to_model=None, verbose=False, use_strict_trace=True + ): + """Add graph data to summary. + + Args: + model (torch.nn.Module): Model to draw. + input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of + variables to be fed. + verbose (bool): Whether to print graph structure in console. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_graph") + if hasattr(model, "forward"): + # A valid PyTorch model should have a 'forward' method + self._get_file_writer().add_graph( + graph(model, input_to_model, verbose, use_strict_trace) + ) + else: + # Caffe2 models do not have the 'forward' method + from caffe2.proto import caffe2_pb2 + from caffe2.python import core + + from ._caffe2_graph import ( + model_to_graph_def, + nets_to_graph_def, + protos_to_graph_def, + ) + + if isinstance(model, list): + if isinstance(model[0], core.Net): + current_graph = nets_to_graph_def(model) + elif isinstance(model[0], caffe2_pb2.NetDef): + current_graph = protos_to_graph_def(model) + else: + # Handles cnn.CNNModelHelper, model_helper.ModelHelper + current_graph = model_to_graph_def(model) + event = event_pb2.Event(graph_def=current_graph.SerializeToString()) + self._get_file_writer().add_event(event) + + @staticmethod + def _encode(rawstr): + # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. + retval = rawstr + retval = retval.replace("%", f"%{ord('%'):02x}") + retval = retval.replace("/", f"%{ord('/'):02x}") + retval = retval.replace("\\", "%%%02x" % (ord("\\"))) + return retval + + def add_embedding( + self, + mat, + metadata=None, + label_img=None, + global_step=None, + tag="default", + metadata_header=None, + ): + """Add embedding projector data to summary. + + Args: + mat (torch.Tensor or numpy.ndarray): A matrix which each row is the feature vector of the data point + metadata (list): A list of labels, each element will be convert to string + label_img (torch.Tensor): Images correspond to each data point + global_step (int): Global step value to record + tag (str): Name for the embedding + Shape: + mat: :math:`(N, D)`, where N is number of data and D is feature dimension + + label_img: :math:`(N, C, H, W)` + + Examples:: + + import keyword + import torch + meta = [] + while len(meta)<100: + meta = meta+keyword.kwlist # get some strings + meta = meta[:100] + + for i, v in enumerate(meta): + meta[i] = v+str(i) + + label_img = torch.rand(100, 3, 10, 32) + for i in range(100): + label_img[i]*=i/100.0 + + writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img) + writer.add_embedding(torch.randn(100, 5), label_img=label_img) + writer.add_embedding(torch.randn(100, 5), metadata=meta) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_embedding") + mat = make_np(mat) + if global_step is None: + global_step = 0 + # clear pbtxt? + + # Maybe we should encode the tag so slashes don't trip us up? + # I don't think this will mess us up, but better safe than sorry. + subdir = f"{str(global_step).zfill(5)}/{self._encode(tag)}" + save_path = os.path.join(self._get_file_writer().get_logdir(), subdir) + + fs = tf.io.gfile + if fs.exists(save_path): + if fs.isdir(save_path): + print( + "warning: Embedding dir exists, did you set global_step for add_embedding()?" + ) + else: + raise Exception( + f"Path: `{save_path}` exists, but is a file. Cannot proceed." + ) + else: + fs.makedirs(save_path) + + if metadata is not None: + assert mat.shape[0] == len( + metadata + ), "#labels should equal with #data points" + make_tsv(metadata, save_path, metadata_header=metadata_header) + + if label_img is not None: + assert ( + mat.shape[0] == label_img.shape[0] + ), "#images should equal with #data points" + make_sprite(label_img, save_path) + + assert ( + mat.ndim == 2 + ), "mat should be 2D, where mat.size(0) is the number of data points" + make_mat(mat, save_path) + + # Filesystem doesn't necessarily have append semantics, so we store an + # internal buffer to append to and re-write whole file after each + # embedding is added + if not hasattr(self, "_projector_config"): + self._projector_config = ProjectorConfig() + embedding_info = get_embedding_info( + metadata, label_img, subdir, global_step, tag + ) + self._projector_config.embeddings.extend([embedding_info]) + + from google.protobuf import text_format + + config_pbtxt = text_format.MessageToString(self._projector_config) + write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt) + + def add_pr_curve( + self, + tag, + labels, + predictions, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve. + + Plotting a precision-recall curve lets you understand your model's + performance under different threshold settings. With this function, + you provide the ground truth labeling (T/F) and prediction confidence + (usually the output of your model) for each target. The TensorBoard UI + will let you choose the threshold interactively. + + Args: + tag (str): Data identifier + labels (torch.Tensor, numpy.ndarray, or string/blobname): + Ground truth data. Binary label for each element. + predictions (torch.Tensor, numpy.ndarray, or string/blobname): + The probability that an element be classified as true. + Value should be in [0, 1] + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + labels = np.random.randint(2, size=100) # binary label + predictions = np.random.rand(100) + writer = SummaryWriter() + writer.add_pr_curve('pr_curve', labels, predictions, 0) + writer.close() + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve") + labels, predictions = make_np(labels), make_np(predictions) + self._get_file_writer().add_summary( + pr_curve(tag, labels, predictions, num_thresholds, weights), + global_step, + walltime, + ) + + def add_pr_curve_raw( + self, + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve with raw data. + + Args: + tag (str): Data identifier + true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts + false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts + true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts + false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts + precision (torch.Tensor, numpy.ndarray, or string/blobname): precision + recall (torch.Tensor, numpy.ndarray, or string/blobname): recall + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw") + self._get_file_writer().add_summary( + pr_curve_raw( + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + num_thresholds, + weights, + ), + global_step, + walltime, + ) + + def add_custom_scalars_multilinechart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_multilinechart" + ) + layout = {category: {title: ["Multiline", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars_marginchart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating marginchart. + + Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*, + which should have exactly 3 elements. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_marginchart" + ) + assert len(tags) == 3 + layout = {category: {title: ["Margin", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars(self, layout): + """Create special chart by collecting charts tags in 'scalars'. + + NOTE: This function can only be called once for each SummaryWriter() object. + + Because it only provides metadata to tensorboard, the function can be called before or after the training loop. + + Args: + layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary + {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type + (one of **Multiline** or **Margin**) and the second element should be a list containing the tags + you have used in add_scalar function, which will be collected into the new chart. + + Examples:: + + layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, + 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], + 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} + + writer.add_custom_scalars(layout) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars") + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_mesh( + self, + tag, + vertices, + colors=None, + faces=None, + config_dict=None, + global_step=None, + walltime=None, + ): + """Add meshes or 3D point clouds to TensorBoard. + + The visualization is based on Three.js, + so it allows users to interact with the rendered object. Besides the basic definitions + such as vertices, faces, users can further provide camera parameter, lighting condition, etc. + Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for + advanced usage. + + Args: + tag (str): Data identifier + vertices (torch.Tensor): List of the 3D coordinates of vertices. + colors (torch.Tensor): Colors for each vertex + faces (torch.Tensor): Indices of vertices within each triangle. (Optional) + config_dict: Dictionary with ThreeJS classes names and configuration. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Shape: + vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels) + + colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + + faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + vertices_tensor = torch.as_tensor([ + [1, 1, 1], + [-1, -1, 1], + [1, -1, -1], + [-1, 1, -1], + ], dtype=torch.float).unsqueeze(0) + colors_tensor = torch.as_tensor([ + [255, 0, 0], + [0, 255, 0], + [0, 0, 255], + [255, 0, 255], + ], dtype=torch.int).unsqueeze(0) + faces_tensor = torch.as_tensor([ + [0, 2, 3], + [0, 3, 1], + [0, 1, 2], + [1, 3, 2], + ], dtype=torch.int).unsqueeze(0) + + writer = SummaryWriter() + writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor) + + writer.close() + """ + torch._C._log_api_usage_once("tensorboard.logging.add_mesh") + self._get_file_writer().add_summary( + mesh(tag, vertices, colors, faces, config_dict), global_step, walltime + ) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + if self.all_writers is None: + return + for writer in self.all_writers.values(): + writer.flush() + + def close(self): + if self.all_writers is None: + return # ignore double close + for writer in self.all_writers.values(): + writer.flush() + writer.close() + self.file_writer = self.all_writers = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/viz/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/torch/utils/viz/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/fully_sharded_layers.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/fully_sharded_layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31ab361e0377a758f1619d194fb19319c877c05f Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/fully_sharded_layers.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/layers.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87402b022e2ab1b23e4b4483f59b7e57d81567fe Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/layers.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/lora.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba2f675f9ef7b871c53d1427fe1e2e8b74e52fdf Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/lora.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/models.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d4fc39f32446f5e7d184f4f5a9fb0071401ea8 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/models.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/punica.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/punica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..576f5ccf2718043e0ff0f7ae9de3909f79943dae Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/punica.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/request.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/request.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f9e3fb6c08be1ecbd41efec340103a82175fcdd Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/request.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/worker_manager.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/worker_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd5a8c32f218bb20638832745bcc3828fbfb0d79 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/__pycache__/worker_manager.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a958826cdee600d6a3738bcc87c123fb87a4557 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_expand.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_expand.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b2a3c40da4e7a5404257259d72ce1d0747658f7 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_expand.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_shrink.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_shrink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acaf5463a32c1e24ef04299090b4d9497c26b861 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/bgmv_shrink.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_expand.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_expand.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fbcca3aa11f2bacfe3452ea4d9a53c50e741800 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_expand.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_shrink.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_shrink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def1ca22795dfec94fb095ca504521c78e249a46 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/sgmv_shrink.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ebbe5ddd19b2fb69a8d96d22138edd8019ab44a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/vllm/lora/ops/__pycache__/utils.cpython-310.pyc differ