ZTWHHH commited on
Commit
1db5ffe
·
verified ·
1 Parent(s): 62d11b0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 +3 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc +0 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc +0 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc +0 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py +106 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py +106 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py +82 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py +93 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py +81 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py +0 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc +0 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc +0 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc +0 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc +0 -0
  22. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc +0 -0
  23. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc +0 -0
  24. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py +40 -0
  25. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py +355 -0
  26. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py +320 -0
  27. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py +187 -0
  28. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py +457 -0
  29. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp +43 -0
  30. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py +537 -0
  31. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py +906 -0
  32. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py +0 -0
  33. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc +0 -0
  34. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__init__.py +1 -0
  35. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc +0 -0
  36. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc +0 -0
  37. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc +0 -0
  38. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc +0 -0
  39. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/constants.py +62 -0
  40. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py +0 -0
  41. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py +1129 -0
  42. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/version.py +1 -0
  43. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/jit/__init__.py +1 -0
  44. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py +13 -0
  45. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc +0 -0
  46. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc +0 -0
  47. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc +0 -0
  48. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc +0 -0
  49. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc +0 -0
  50. evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -937,3 +937,4 @@ infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x
937
  infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
938
  infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
939
  infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_spropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
937
  infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
938
  infer_4_47_1/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
939
  infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_spropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
940
+ deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs diff=lfs merge=lfs -text
deepseekvl2/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca5c6f81d584906b4d32b984ad8704dd65bf75bdab4334ed22ce7eef7501a95a
3
+ size 279161544
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (497 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc ADDED
Binary file (2.88 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc ADDED
Binary file (2.68 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class BinaryOpFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+
22
+ # Shapes for `x` and `y`.
23
+ # It is important to test all shapes, however
24
+ # powers of two are especially important and therefore
25
+ # warrant special attention. This is done by generating
26
+ # both a value drawn from all integers between the min and
27
+ # max allowed values, and another from only the powers of two
28
+ # (both distributions are loguniform) and then randomly
29
+ # selecting between the two.
30
+ # Moreover, `y` will occasionally have singleton
31
+ # dimensions in order to test broadcasting.
32
+ [
33
+ FuzzedParameter(
34
+ name=f"k_any_{i}",
35
+ minval=_MIN_DIM_SIZE,
36
+ maxval=_MAX_DIM_SIZE,
37
+ distribution="loguniform",
38
+ ) for i in range(3)
39
+ ],
40
+ [
41
+ FuzzedParameter(
42
+ name=f"k_pow2_{i}",
43
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
44
+ ) for i in range(3)
45
+ ],
46
+ [
47
+ FuzzedParameter(
48
+ name=f"k{i}",
49
+ distribution={
50
+ ParameterAlias(f"k_any_{i}"): 0.8,
51
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
52
+ },
53
+ strict=True,
54
+ ) for i in range(3)
55
+ ],
56
+
57
+ [
58
+ FuzzedParameter(
59
+ name=f"y_k{i}",
60
+ distribution={
61
+ ParameterAlias(f"k{i}"): 0.8,
62
+ 1: 0.2,
63
+ },
64
+ strict=True,
65
+ ) for i in range(3)
66
+ ],
67
+
68
+ # Steps for `x` and `y`. (Benchmarks strided memory access.)
69
+ [
70
+ FuzzedParameter(
71
+ name=f"{name}_step_{i}",
72
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
73
+ )
74
+ for i in range(3)
75
+ for name in ("x", "y")
76
+ ],
77
+
78
+ # Repeatable entropy for downstream applications.
79
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
80
+ ],
81
+ tensors=[
82
+ FuzzedTensor(
83
+ name="x",
84
+ size=("k0", "k1", "k2"),
85
+ steps=("x_step_0", "x_step_1", "x_step_2"),
86
+ probability_contiguous=0.75,
87
+ min_elements=4 * 1024,
88
+ max_elements=32 * 1024 ** 2,
89
+ max_allocation_bytes=2 * 1024**3, # 2 GB
90
+ dim_parameter="dim",
91
+ dtype=dtype,
92
+ cuda=cuda,
93
+ ),
94
+ FuzzedTensor(
95
+ name="y",
96
+ size=("y_k0", "y_k1", "y_k2"),
97
+ steps=("x_step_0", "x_step_1", "x_step_2"),
98
+ probability_contiguous=0.75,
99
+ max_allocation_bytes=2 * 1024**3, # 2 GB
100
+ dim_parameter="dim",
101
+ dtype=dtype,
102
+ cuda=cuda,
103
+ ),
104
+ ],
105
+ seed=seed,
106
+ )
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class BinaryOpSparseFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+ FuzzedParameter(
22
+ name="sparse_dim",
23
+ distribution={1: 0.4, 2: 0.4, 3: 0.2},
24
+ strict=True
25
+ ),
26
+ # Shapes for `x` and `y`.
27
+ # It is important to test all shapes, however
28
+ # powers of two are especially important and therefore
29
+ # warrant special attention. This is done by generating
30
+ # both a value drawn from all integers between the min and
31
+ # max allowed values, and another from only the powers of two
32
+ # (both distributions are loguniform) and then randomly
33
+ # selecting between the two.
34
+ # Moreover, `y` will occasionally have singleton
35
+ # dimensions in order to test broadcasting.
36
+ [
37
+ FuzzedParameter(
38
+ name=f"k_any_{i}",
39
+ minval=_MIN_DIM_SIZE,
40
+ maxval=_MAX_DIM_SIZE,
41
+ distribution="loguniform",
42
+ ) for i in range(3)
43
+ ],
44
+ [
45
+ FuzzedParameter(
46
+ name=f"k_pow2_{i}",
47
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
48
+ ) for i in range(3)
49
+ ],
50
+ [
51
+ FuzzedParameter(
52
+ name=f"k{i}",
53
+ distribution={
54
+ ParameterAlias(f"k_any_{i}"): 0.8,
55
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
56
+ },
57
+ strict=True,
58
+ ) for i in range(3)
59
+ ],
60
+ [
61
+ FuzzedParameter(
62
+ name=f"y_k{i}",
63
+ distribution={
64
+ ParameterAlias(f"k{i}"): 1.0},
65
+ strict=True,
66
+ ) for i in range(3)
67
+ ],
68
+ FuzzedParameter(
69
+ name="density",
70
+ distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
71
+ ),
72
+ FuzzedParameter(
73
+ name="coalesced",
74
+ distribution={True: 0.5, False: 0.5},
75
+ ),
76
+ # Repeatable entropy for downstream applications.
77
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
78
+ ],
79
+ tensors=[
80
+ FuzzedSparseTensor(
81
+ name="x",
82
+ size=("k0", "k1", "k2"),
83
+ dim_parameter="dim_parameter",
84
+ sparse_dim="sparse_dim",
85
+ density="density",
86
+ coalesced="coalesced",
87
+ min_elements=4 * 1024,
88
+ max_elements=32 * 1024 ** 2,
89
+ dtype=dtype,
90
+ cuda=cuda,
91
+ ),
92
+ FuzzedSparseTensor(
93
+ name="y",
94
+ size=("y_k0", "y_k1", "y_k2"),
95
+ dim_parameter="dim_parameter",
96
+ sparse_dim="sparse_dim",
97
+ density="density",
98
+ coalesced="coalesced",
99
+ min_elements=4 * 1024,
100
+ max_elements=32 * 1024 ** 2,
101
+ dtype=dtype,
102
+ cuda=cuda,
103
+ ),
104
+ ],
105
+ seed=seed,
106
+ )
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import torch
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+ class UnaryOpSparseFuzzer(Fuzzer):
15
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
16
+ super().__init__(
17
+ parameters=[
18
+ # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.)
19
+ FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
20
+ FuzzedParameter(
21
+ name="sparse_dim",
22
+ distribution={1: 0.4, 2: 0.4, 3: 0.2},
23
+ strict=True
24
+ ),
25
+ # Shapes for `x`.
26
+ # It is important to test all shapes, however
27
+ # powers of two are especially important and therefore
28
+ # warrant special attention. This is done by generating
29
+ # both a value drawn from all integers between the min and
30
+ # max allowed values, and another from only the powers of two
31
+ # (both distributions are loguniform) and then randomly
32
+ # selecting between the two.
33
+ [
34
+ FuzzedParameter(
35
+ name=f"k_any_{i}",
36
+ minval=_MIN_DIM_SIZE,
37
+ maxval=_MAX_DIM_SIZE,
38
+ distribution="loguniform",
39
+ ) for i in range(3)
40
+ ],
41
+ [
42
+ FuzzedParameter(
43
+ name=f"k_pow2_{i}",
44
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
45
+ ) for i in range(3)
46
+ ],
47
+ [
48
+ FuzzedParameter(
49
+ name=f"k{i}",
50
+ distribution={
51
+ ParameterAlias(f"k_any_{i}"): 0.8,
52
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
53
+ },
54
+ strict=True,
55
+ ) for i in range(3)
56
+ ],
57
+ FuzzedParameter(
58
+ name="density",
59
+ distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
60
+ ),
61
+ FuzzedParameter(
62
+ name="coalesced",
63
+ distribution={True: 0.5, False: 0.5},
64
+ ),
65
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
66
+ ],
67
+ tensors=[
68
+ FuzzedSparseTensor(
69
+ name="x",
70
+ size=("k0", "k1", "k2"),
71
+ dim_parameter="dim_parameter",
72
+ sparse_dim="sparse_dim",
73
+ min_elements=4 * 1024,
74
+ max_elements=32 * 1024 ** 2,
75
+ density="density",
76
+ coalesced="coalesced",
77
+ dtype=dtype,
78
+ cuda=cuda,
79
+ ),
80
+ ],
81
+ seed=seed,
82
+ )
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch.utils import benchmark
5
+ from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias
6
+
7
+
8
+ __all__ = ['SpectralOpFuzzer']
9
+
10
+ MIN_DIM_SIZE = 16
11
+ MAX_DIM_SIZE = 16 * 1024
12
+
13
+ def power_range(upper_bound, base):
14
+ return (base ** i for i in range(int(math.log(upper_bound, base)) + 1))
15
+
16
+ # List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE
17
+ # These numbers factorize into multiples of prime factors 2, 3, and 5 only
18
+ # and are usually the fastest in FFT implementations.
19
+ REGULAR_SIZES = []
20
+ for i in power_range(MAX_DIM_SIZE, 2):
21
+ for j in power_range(MAX_DIM_SIZE // i, 3):
22
+ ij = i * j
23
+ for k in power_range(MAX_DIM_SIZE // ij, 5):
24
+ ijk = ij * k
25
+ if ijk > MIN_DIM_SIZE:
26
+ REGULAR_SIZES.append(ijk)
27
+ REGULAR_SIZES.sort()
28
+
29
+ class SpectralOpFuzzer(benchmark.Fuzzer):
30
+ def __init__(self, *, seed: int, dtype=torch.float64,
31
+ cuda: bool = False, probability_regular: float = 1.0):
32
+ super().__init__(
33
+ parameters=[
34
+ # Dimensionality of x. (e.g. 1D, 2D, or 3D.)
35
+ FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
36
+
37
+ # Shapes for `x`.
38
+ # It is important to test all shapes, however
39
+ # regular sizes are especially important to the FFT and therefore
40
+ # warrant special attention. This is done by generating
41
+ # both a value drawn from all integers between the min and
42
+ # max allowed values, and another from only the regular numbers
43
+ # (both distributions are loguniform) and then randomly
44
+ # selecting between the two.
45
+ [
46
+ FuzzedParameter(
47
+ name=f"k_any_{i}",
48
+ minval=MIN_DIM_SIZE,
49
+ maxval=MAX_DIM_SIZE,
50
+ distribution="loguniform",
51
+ ) for i in range(3)
52
+ ],
53
+ [
54
+ FuzzedParameter(
55
+ name=f"k_regular_{i}",
56
+ distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES}
57
+ ) for i in range(3)
58
+ ],
59
+ [
60
+ FuzzedParameter(
61
+ name=f"k{i}",
62
+ distribution={
63
+ ParameterAlias(f"k_regular_{i}"): probability_regular,
64
+ ParameterAlias(f"k_any_{i}"): 1 - probability_regular,
65
+ },
66
+ strict=True,
67
+ ) for i in range(3)
68
+ ],
69
+
70
+ # Steps for `x`. (Benchmarks strided memory access.)
71
+ [
72
+ FuzzedParameter(
73
+ name=f"step_{i}",
74
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
75
+ ) for i in range(3)
76
+ ],
77
+ ],
78
+ tensors=[
79
+ FuzzedTensor(
80
+ name="x",
81
+ size=("k0", "k1", "k2"),
82
+ steps=("step_0", "step_1", "step_2"),
83
+ probability_contiguous=0.75,
84
+ min_elements=4 * 1024,
85
+ max_elements=32 * 1024 ** 2,
86
+ max_allocation_bytes=2 * 1024**3, # 2 GB
87
+ dim_parameter="ndim",
88
+ dtype=dtype,
89
+ cuda=cuda,
90
+ ),
91
+ ],
92
+ seed=seed,
93
+ )
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class UnaryOpFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+
22
+ # Shapes for `x`.
23
+ # It is important to test all shapes, however
24
+ # powers of two are especially important and therefore
25
+ # warrant special attention. This is done by generating
26
+ # both a value drawn from all integers between the min and
27
+ # max allowed values, and another from only the powers of two
28
+ # (both distributions are loguniform) and then randomly
29
+ # selecting between the two.
30
+ [
31
+ FuzzedParameter(
32
+ name=f"k_any_{i}",
33
+ minval=_MIN_DIM_SIZE,
34
+ maxval=_MAX_DIM_SIZE,
35
+ distribution="loguniform",
36
+ ) for i in range(3)
37
+ ],
38
+ [
39
+ FuzzedParameter(
40
+ name=f"k_pow2_{i}",
41
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
42
+ ) for i in range(3)
43
+ ],
44
+ [
45
+ FuzzedParameter(
46
+ name=f"k{i}",
47
+ distribution={
48
+ ParameterAlias(f"k_any_{i}"): 0.8,
49
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
50
+ },
51
+ strict=True,
52
+ ) for i in range(3)
53
+ ],
54
+
55
+ # Steps for `x`. (Benchmarks strided memory access.)
56
+ [
57
+ FuzzedParameter(
58
+ name=f"x_step_{i}",
59
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
60
+ ) for i in range(3)
61
+ ],
62
+
63
+ # Repeatable entropy for downstream applications.
64
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
65
+ ],
66
+ tensors=[
67
+ FuzzedTensor(
68
+ name="x",
69
+ size=("k0", "k1", "k2"),
70
+ steps=("x_step_0", "x_step_1", "x_step_2"),
71
+ probability_contiguous=0.75,
72
+ min_elements=4 * 1024,
73
+ max_elements=32 * 1024 ** 2,
74
+ max_allocation_bytes=2 * 1024**3, # 2 GB
75
+ dim_parameter="dim",
76
+ dtype=dtype,
77
+ cuda=cuda,
78
+ ),
79
+ ],
80
+ seed=seed,
81
+ )
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc ADDED
Binary file (3.7 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, Protocol, runtime_checkable
2
+
3
+
4
+ class TimerClass(Protocol):
5
+ """This is the portion of the `timeit.Timer` API used by benchmark utils."""
6
+ def __init__(
7
+ self,
8
+ stmt: str,
9
+ setup: str,
10
+ timer: Callable[[], float],
11
+ globals: Dict[str, Any],
12
+ **kwargs: Any,
13
+ ) -> None:
14
+ ...
15
+
16
+ def timeit(self, number: int) -> float:
17
+ ...
18
+
19
+
20
+ @runtime_checkable
21
+ class TimeitModuleType(Protocol):
22
+ """Modules generated from `timeit_template.cpp`."""
23
+ def timeit(self, number: int) -> float:
24
+ ...
25
+
26
+
27
+ class CallgrindModuleType(Protocol):
28
+ """Replicates the valgrind endpoints in `torch._C`.
29
+
30
+ These bindings are used to collect Callgrind profiles on earlier versions
31
+ of PyTorch and will eventually be removed.
32
+ """
33
+ __file__: str
34
+ __name__: str
35
+
36
+ def _valgrind_supported_platform(self) -> bool:
37
+ ...
38
+
39
+ def _valgrind_toggle(self) -> None:
40
+ ...
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base shared classes and utilities."""
2
+
3
+ import collections
4
+ import contextlib
5
+ import dataclasses
6
+ import os
7
+ import shutil
8
+ import tempfile
9
+ import textwrap
10
+ import time
11
+ from typing import cast, Any, DefaultDict, Dict, Iterable, Iterator, List, Optional, Tuple
12
+ import uuid
13
+
14
+ import torch
15
+
16
+
17
+ __all__ = ["TaskSpec", "Measurement", "select_unit", "unit_to_english", "trim_sigfig", "ordered_unique", "set_torch_threads"]
18
+
19
+
20
+ _MAX_SIGNIFICANT_FIGURES = 4
21
+ _MIN_CONFIDENCE_INTERVAL = 25e-9 # 25 ns
22
+
23
+ # Measurement will include a warning if the distribution is suspect. All
24
+ # runs are expected to have some variation; these parameters set the
25
+ # thresholds.
26
+ _IQR_WARN_THRESHOLD = 0.1
27
+ _IQR_GROSS_WARN_THRESHOLD = 0.25
28
+
29
+
30
+ @dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True)
31
+ class TaskSpec:
32
+ """Container for information used to define a Timer. (except globals)"""
33
+ stmt: str
34
+ setup: str
35
+ global_setup: str = ""
36
+ label: Optional[str] = None
37
+ sub_label: Optional[str] = None
38
+ description: Optional[str] = None
39
+ env: Optional[str] = None
40
+ num_threads: int = 1
41
+
42
+ @property
43
+ def title(self) -> str:
44
+ """Best effort attempt at a string label for the measurement."""
45
+ if self.label is not None:
46
+ return self.label + (f": {self.sub_label}" if self.sub_label else "")
47
+ elif "\n" not in self.stmt:
48
+ return self.stmt + (f": {self.sub_label}" if self.sub_label else "")
49
+ return (
50
+ f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n"
51
+ f"{textwrap.indent(self.stmt, ' ')}"
52
+ )
53
+
54
+ def setup_str(self) -> str:
55
+ return (
56
+ "" if (self.setup == "pass" or not self.setup)
57
+ else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup
58
+ else f"setup: {self.setup}"
59
+ )
60
+
61
+ def summarize(self) -> str:
62
+ """Build TaskSpec portion of repr string for other containers."""
63
+ sections = [
64
+ self.title,
65
+ self.description or "",
66
+ self.setup_str(),
67
+ ]
68
+ return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i])
69
+
70
+ _TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec))
71
+
72
+
73
+ @dataclasses.dataclass(init=True, repr=False)
74
+ class Measurement:
75
+ """The result of a Timer measurement.
76
+
77
+ This class stores one or more measurements of a given statement. It is
78
+ serializable and provides several convenience methods
79
+ (including a detailed __repr__) for downstream consumers.
80
+ """
81
+ number_per_run: int
82
+ raw_times: List[float]
83
+ task_spec: TaskSpec
84
+ metadata: Optional[Dict[Any, Any]] = None # Reserved for user payloads.
85
+
86
+ def __post_init__(self) -> None:
87
+ self._sorted_times: Tuple[float, ...] = ()
88
+ self._warnings: Tuple[str, ...] = ()
89
+ self._median: float = -1.0
90
+ self._mean: float = -1.0
91
+ self._p25: float = -1.0
92
+ self._p75: float = -1.0
93
+
94
+ def __getattr__(self, name: str) -> Any:
95
+ # Forward TaskSpec fields for convenience.
96
+ if name in _TASKSPEC_FIELDS:
97
+ return getattr(self.task_spec, name)
98
+ return super().__getattribute__(name)
99
+
100
+ # =========================================================================
101
+ # == Convenience methods for statistics ===================================
102
+ # =========================================================================
103
+ #
104
+ # These methods use raw time divided by number_per_run; this is an
105
+ # extrapolation and hides the fact that different number_per_run will
106
+ # result in different amortization of overheads, however if Timer has
107
+ # selected an appropriate number_per_run then this is a non-issue, and
108
+ # forcing users to handle that division would result in a poor experience.
109
+ @property
110
+ def times(self) -> List[float]:
111
+ return [t / self.number_per_run for t in self.raw_times]
112
+
113
+ @property
114
+ def median(self) -> float:
115
+ self._lazy_init()
116
+ return self._median
117
+
118
+ @property
119
+ def mean(self) -> float:
120
+ self._lazy_init()
121
+ return self._mean
122
+
123
+ @property
124
+ def iqr(self) -> float:
125
+ self._lazy_init()
126
+ return self._p75 - self._p25
127
+
128
+ @property
129
+ def significant_figures(self) -> int:
130
+ """Approximate significant figure estimate.
131
+
132
+ This property is intended to give a convenient way to estimate the
133
+ precision of a measurement. It only uses the interquartile region to
134
+ estimate statistics to try to mitigate skew from the tails, and
135
+ uses a static z value of 1.645 since it is not expected to be used
136
+ for small values of `n`, so z can approximate `t`.
137
+
138
+ The significant figure estimation used in conjunction with the
139
+ `trim_sigfig` method to provide a more human interpretable data
140
+ summary. __repr__ does not use this method; it simply displays raw
141
+ values. Significant figure estimation is intended for `Compare`.
142
+ """
143
+ self._lazy_init()
144
+ n_total = len(self._sorted_times)
145
+ lower_bound = int(n_total // 4)
146
+ upper_bound = int(torch.tensor(3 * n_total / 4).ceil())
147
+ interquartile_points: Tuple[float, ...] = self._sorted_times[lower_bound:upper_bound]
148
+ std = torch.tensor(interquartile_points).std(unbiased=False).item()
149
+ sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item()
150
+
151
+ # Rough estimates. These are by no means statistically rigorous.
152
+ confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL)
153
+ relative_ci = torch.tensor(self._median / confidence_interval).log10().item()
154
+ num_significant_figures = int(torch.tensor(relative_ci).floor())
155
+ return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES)
156
+
157
+ @property
158
+ def has_warnings(self) -> bool:
159
+ self._lazy_init()
160
+ return bool(self._warnings)
161
+
162
+ def _lazy_init(self) -> None:
163
+ if self.raw_times and not self._sorted_times:
164
+ self._sorted_times = tuple(sorted(self.times))
165
+ _sorted_times = torch.tensor(self._sorted_times, dtype=torch.float64)
166
+ self._median = _sorted_times.quantile(.5).item()
167
+ self._mean = _sorted_times.mean().item()
168
+ self._p25 = _sorted_times.quantile(.25).item()
169
+ self._p75 = _sorted_times.quantile(.75).item()
170
+
171
+ def add_warning(msg: str) -> None:
172
+ rel_iqr = self.iqr / self.median * 100
173
+ self._warnings += (
174
+ f" WARNING: Interquartile range is {rel_iqr:.1f}% "
175
+ f"of the median measurement.\n {msg}",
176
+ )
177
+
178
+ if not self.meets_confidence(_IQR_GROSS_WARN_THRESHOLD):
179
+ add_warning("This suggests significant environmental influence.")
180
+ elif not self.meets_confidence(_IQR_WARN_THRESHOLD):
181
+ add_warning("This could indicate system fluctuation.")
182
+
183
+
184
+ def meets_confidence(self, threshold: float = _IQR_WARN_THRESHOLD) -> bool:
185
+ return self.iqr / self.median < threshold
186
+
187
+ @property
188
+ def title(self) -> str:
189
+ return self.task_spec.title
190
+
191
+ @property
192
+ def env(self) -> str:
193
+ return (
194
+ "Unspecified env" if self.taskspec.env is None
195
+ else cast(str, self.taskspec.env)
196
+ )
197
+
198
+ @property
199
+ def as_row_name(self) -> str:
200
+ return self.sub_label or self.stmt or "[Unknown]"
201
+
202
+ def __repr__(self) -> str:
203
+ """
204
+ Example repr:
205
+ <utils.common.Measurement object at 0x7f395b6ac110>
206
+ Broadcasting add (4x8)
207
+ Median: 5.73 us
208
+ IQR: 2.25 us (4.01 to 6.26)
209
+ 372 measurements, 100 runs per measurement, 1 thread
210
+ WARNING: Interquartile range is 39.4% of the median measurement.
211
+ This suggests significant environmental influence.
212
+ """
213
+ self._lazy_init()
214
+ skip_line, newline = "MEASUREMENT_REPR_SKIP_LINE", "\n"
215
+ n = len(self._sorted_times)
216
+ time_unit, time_scale = select_unit(self._median)
217
+ iqr_filter = '' if n >= 4 else skip_line
218
+
219
+ repr_str = f"""
220
+ {super().__repr__()}
221
+ {self.task_spec.summarize()}
222
+ {'Median: ' if n > 1 else ''}{self._median / time_scale:.2f} {time_unit}
223
+ {iqr_filter}IQR: {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f})
224
+ {n} measurement{'s' if n > 1 else ''}, {self.number_per_run} runs {'per measurement,' if n > 1 else ','} {self.num_threads} thread{'s' if self.num_threads > 1 else ''}
225
+ {newline.join(self._warnings)}""".strip() # noqa: B950
226
+
227
+ return "\n".join(l for l in repr_str.splitlines(keepends=False) if skip_line not in l)
228
+
229
+ @staticmethod
230
+ def merge(measurements: Iterable["Measurement"]) -> List["Measurement"]:
231
+ """Convenience method for merging replicates.
232
+
233
+ Merge will extrapolate times to `number_per_run=1` and will not
234
+ transfer any metadata. (Since it might differ between replicates)
235
+ """
236
+ grouped_measurements: DefaultDict[TaskSpec, List[Measurement]] = collections.defaultdict(list)
237
+ for m in measurements:
238
+ grouped_measurements[m.task_spec].append(m)
239
+
240
+ def merge_group(task_spec: TaskSpec, group: List["Measurement"]) -> "Measurement":
241
+ times: List[float] = []
242
+ for m in group:
243
+ # Different measurements could have different `number_per_run`,
244
+ # so we call `.times` which normalizes the results.
245
+ times.extend(m.times)
246
+
247
+ return Measurement(
248
+ number_per_run=1,
249
+ raw_times=times,
250
+ task_spec=task_spec,
251
+ metadata=None,
252
+ )
253
+
254
+ return [merge_group(t, g) for t, g in grouped_measurements.items()]
255
+
256
+
257
+ def select_unit(t: float) -> Tuple[str, float]:
258
+ """Determine how to scale times for O(1) magnitude.
259
+
260
+ This utility is used to format numbers for human consumption.
261
+ """
262
+ time_unit = {-3: "ns", -2: "us", -1: "ms"}.get(int(torch.tensor(t).log10().item() // 3), "s")
263
+ time_scale = {"ns": 1e-9, "us": 1e-6, "ms": 1e-3, "s": 1}[time_unit]
264
+ return time_unit, time_scale
265
+
266
+
267
+ def unit_to_english(u: str) -> str:
268
+ return {
269
+ "ns": "nanosecond",
270
+ "us": "microsecond",
271
+ "ms": "millisecond",
272
+ "s": "second",
273
+ }[u]
274
+
275
+
276
+ def trim_sigfig(x: float, n: int) -> float:
277
+ """Trim `x` to `n` significant figures. (e.g. 3.14159, 2 -> 3.10000)"""
278
+ assert n == int(n)
279
+ magnitude = int(torch.tensor(x).abs().log10().ceil().item())
280
+ scale = 10 ** (magnitude - n)
281
+ return float(torch.tensor(x / scale).round() * scale)
282
+
283
+
284
+ def ordered_unique(elements: Iterable[Any]) -> List[Any]:
285
+ return list(collections.OrderedDict({i: None for i in elements}).keys())
286
+
287
+
288
+ @contextlib.contextmanager
289
+ def set_torch_threads(n: int) -> Iterator[None]:
290
+ prior_num_threads = torch.get_num_threads()
291
+ try:
292
+ torch.set_num_threads(n)
293
+ yield
294
+ finally:
295
+ torch.set_num_threads(prior_num_threads)
296
+
297
+
298
+ def _make_temp_dir(prefix: Optional[str] = None, gc_dev_shm: bool = False) -> str:
299
+ """Create a temporary directory. The caller is responsible for cleanup.
300
+
301
+ This function is conceptually similar to `tempfile.mkdtemp`, but with
302
+ the key additional feature that it will use shared memory if the
303
+ `BENCHMARK_USE_DEV_SHM` environment variable is set. This is an
304
+ implementation detail, but an important one for cases where many Callgrind
305
+ measurements are collected at once. (Such as when collecting
306
+ microbenchmarks.)
307
+
308
+ This is an internal utility, and is exported solely so that microbenchmarks
309
+ can reuse the util.
310
+ """
311
+ use_dev_shm: bool = (os.getenv("BENCHMARK_USE_DEV_SHM") or "").lower() in ("1", "true")
312
+ if use_dev_shm:
313
+ root = "/dev/shm/pytorch_benchmark_utils"
314
+ assert os.name == "posix", f"tmpfs (/dev/shm) is POSIX only, current platform is {os.name}"
315
+ assert os.path.exists("/dev/shm"), "This system does not appear to support tmpfs (/dev/shm)."
316
+ os.makedirs(root, exist_ok=True)
317
+
318
+ # Because we're working in shared memory, it is more important than
319
+ # usual to clean up ALL intermediate files. However we don't want every
320
+ # worker to walk over all outstanding directories, so instead we only
321
+ # check when we are sure that it won't lead to contention.
322
+ if gc_dev_shm:
323
+ for i in os.listdir(root):
324
+ owner_file = os.path.join(root, i, "owner.pid")
325
+ if not os.path.exists(owner_file):
326
+ continue
327
+
328
+ with open(owner_file) as f:
329
+ owner_pid = int(f.read())
330
+
331
+ if owner_pid == os.getpid():
332
+ continue
333
+
334
+ try:
335
+ # https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
336
+ os.kill(owner_pid, 0)
337
+
338
+ except OSError:
339
+ print(f"Detected that {os.path.join(root, i)} was orphaned in shared memory. Cleaning up.")
340
+ shutil.rmtree(os.path.join(root, i))
341
+
342
+ else:
343
+ root = tempfile.gettempdir()
344
+
345
+ # We include the time so names sort by creation time, and add a UUID
346
+ # to ensure we don't collide.
347
+ name = f"{prefix or tempfile.gettempprefix()}__{int(time.time())}__{uuid.uuid4()}"
348
+ path = os.path.join(root, name)
349
+ os.makedirs(path, exist_ok=False)
350
+
351
+ if use_dev_shm:
352
+ with open(os.path.join(path, "owner.pid"), "w") as f:
353
+ f.write(str(os.getpid()))
354
+
355
+ return path
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Display class to aggregate and print the results of many measurements."""
2
+ import collections
3
+ import enum
4
+ import itertools as it
5
+ from typing import DefaultDict, List, Optional, Tuple
6
+
7
+ from torch.utils.benchmark.utils import common
8
+ from torch import tensor as _tensor
9
+
10
+ __all__ = ["Colorize", "Compare"]
11
+
12
+ BEST = "\033[92m"
13
+ GOOD = "\033[34m"
14
+ BAD = "\033[2m\033[91m"
15
+ VERY_BAD = "\033[31m"
16
+ BOLD = "\033[1m"
17
+ TERMINATE = "\033[0m"
18
+
19
+
20
+ class Colorize(enum.Enum):
21
+ NONE = "none"
22
+ COLUMNWISE = "columnwise"
23
+ ROWWISE = "rowwise"
24
+
25
+
26
+ # Classes to separate internal bookkeeping from what is rendered.
27
+ class _Column:
28
+ def __init__(
29
+ self,
30
+ grouped_results: List[Tuple[Optional[common.Measurement], ...]],
31
+ time_scale: float,
32
+ time_unit: str,
33
+ trim_significant_figures: bool,
34
+ highlight_warnings: bool,
35
+ ):
36
+ self._grouped_results = grouped_results
37
+ self._flat_results = list(it.chain(*grouped_results))
38
+ self._time_scale = time_scale
39
+ self._time_unit = time_unit
40
+ self._trim_significant_figures = trim_significant_figures
41
+ self._highlight_warnings = (
42
+ highlight_warnings
43
+ and any(r.has_warnings for r in self._flat_results if r)
44
+ )
45
+ leading_digits = [
46
+ int(_tensor(r.median / self._time_scale).log10().ceil()) if r else None
47
+ for r in self._flat_results
48
+ ]
49
+ unit_digits = max(d for d in leading_digits if d is not None)
50
+ decimal_digits = min(
51
+ max(m.significant_figures - digits, 0)
52
+ for digits, m in zip(leading_digits, self._flat_results)
53
+ if (m is not None) and (digits is not None)
54
+ ) if self._trim_significant_figures else 1
55
+ length = unit_digits + decimal_digits + (1 if decimal_digits else 0)
56
+ self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}"
57
+
58
+ def get_results_for(self, group):
59
+ return self._grouped_results[group]
60
+
61
+ def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]):
62
+ if value is None:
63
+ return " " * len(self.num_to_str(1, estimated_sigfigs, None))
64
+
65
+ if self._trim_significant_figures:
66
+ value = common.trim_sigfig(value, estimated_sigfigs)
67
+
68
+ return self._template.format(
69
+ value,
70
+ f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "")
71
+
72
+
73
+ def optional_min(seq):
74
+ l = list(seq)
75
+ return None if len(l) == 0 else min(l)
76
+
77
+
78
+ class _Row:
79
+ def __init__(self, results, row_group, render_env, env_str_len,
80
+ row_name_str_len, time_scale, colorize, num_threads=None):
81
+ super().__init__()
82
+ self._results = results
83
+ self._row_group = row_group
84
+ self._render_env = render_env
85
+ self._env_str_len = env_str_len
86
+ self._row_name_str_len = row_name_str_len
87
+ self._time_scale = time_scale
88
+ self._colorize = colorize
89
+ self._columns: Tuple[_Column, ...] = ()
90
+ self._num_threads = num_threads
91
+
92
+ def register_columns(self, columns: Tuple[_Column, ...]):
93
+ self._columns = columns
94
+
95
+ def as_column_strings(self):
96
+ concrete_results = [r for r in self._results if r is not None]
97
+ env = f"({concrete_results[0].env})" if self._render_env else ""
98
+ env = env.ljust(self._env_str_len + 4)
99
+ output = [" " + env + concrete_results[0].as_row_name]
100
+ for m, col in zip(self._results, self._columns or ()):
101
+ if m is None:
102
+ output.append(col.num_to_str(None, 1, None))
103
+ else:
104
+ output.append(col.num_to_str(
105
+ m.median / self._time_scale,
106
+ m.significant_figures,
107
+ m.iqr / m.median if m.has_warnings else None
108
+ ))
109
+ return output
110
+
111
+ @staticmethod
112
+ def color_segment(segment, value, best_value):
113
+ if value <= best_value * 1.01 or value <= best_value + 100e-9:
114
+ return BEST + BOLD + segment + TERMINATE * 2
115
+ if value <= best_value * 1.1:
116
+ return GOOD + BOLD + segment + TERMINATE * 2
117
+ if value >= best_value * 5:
118
+ return VERY_BAD + BOLD + segment + TERMINATE * 2
119
+ if value >= best_value * 2:
120
+ return BAD + segment + TERMINATE * 2
121
+
122
+ return segment
123
+
124
+ def row_separator(self, overall_width):
125
+ return (
126
+ [f"{self._num_threads} threads: ".ljust(overall_width, "-")]
127
+ if self._num_threads is not None else []
128
+ )
129
+
130
+ def finalize_column_strings(self, column_strings, col_widths):
131
+ best_values = [-1 for _ in column_strings]
132
+ if self._colorize == Colorize.ROWWISE:
133
+ row_min = min(r.median for r in self._results if r is not None)
134
+ best_values = [row_min for _ in column_strings]
135
+ elif self._colorize == Colorize.COLUMNWISE:
136
+ best_values = [
137
+ optional_min(r.median for r in column.get_results_for(self._row_group) if r is not None)
138
+ for column in (self._columns or ())
139
+ ]
140
+
141
+ row_contents = [column_strings[0].ljust(col_widths[0])]
142
+ for col_str, width, result, best_value in zip(column_strings[1:], col_widths[1:], self._results, best_values):
143
+ col_str = col_str.center(width)
144
+ if self._colorize != Colorize.NONE and result is not None and best_value is not None:
145
+ col_str = self.color_segment(col_str, result.median, best_value)
146
+ row_contents.append(col_str)
147
+ return row_contents
148
+
149
+
150
+ class Table:
151
+ def __init__(
152
+ self,
153
+ results: List[common.Measurement],
154
+ colorize: Colorize,
155
+ trim_significant_figures: bool,
156
+ highlight_warnings: bool
157
+ ):
158
+ assert len({r.label for r in results}) == 1
159
+
160
+ self.results = results
161
+ self._colorize = colorize
162
+ self._trim_significant_figures = trim_significant_figures
163
+ self._highlight_warnings = highlight_warnings
164
+ self.label = results[0].label
165
+ self.time_unit, self.time_scale = common.select_unit(
166
+ min(r.median for r in results)
167
+ )
168
+
169
+ self.row_keys = common.ordered_unique([self.row_fn(i) for i in results])
170
+ self.row_keys.sort(key=lambda args: args[:2]) # preserve stmt order
171
+ self.column_keys = common.ordered_unique([self.col_fn(i) for i in results])
172
+ self.rows, self.columns = self.populate_rows_and_columns()
173
+
174
+ @staticmethod
175
+ def row_fn(m: common.Measurement) -> Tuple[int, Optional[str], str]:
176
+ return m.num_threads, m.env, m.as_row_name
177
+
178
+ @staticmethod
179
+ def col_fn(m: common.Measurement) -> Optional[str]:
180
+ return m.description
181
+
182
+ def populate_rows_and_columns(self) -> Tuple[Tuple[_Row, ...], Tuple[_Column, ...]]:
183
+ rows: List[_Row] = []
184
+ columns: List[_Column] = []
185
+ ordered_results: List[List[Optional[common.Measurement]]] = [
186
+ [None for _ in self.column_keys]
187
+ for _ in self.row_keys
188
+ ]
189
+ row_position = {key: i for i, key in enumerate(self.row_keys)}
190
+ col_position = {key: i for i, key in enumerate(self.column_keys)}
191
+ for r in self.results:
192
+ i = row_position[self.row_fn(r)]
193
+ j = col_position[self.col_fn(r)]
194
+ ordered_results[i][j] = r
195
+
196
+ unique_envs = {r.env for r in self.results}
197
+ render_env = len(unique_envs) > 1
198
+ env_str_len = max(len(i) for i in unique_envs) if render_env else 0
199
+
200
+ row_name_str_len = max(len(r.as_row_name) for r in self.results)
201
+
202
+ prior_num_threads = -1
203
+ prior_env = ""
204
+ row_group = -1
205
+ rows_by_group: List[List[List[Optional[common.Measurement]]]] = []
206
+ for (num_threads, env, _), row in zip(self.row_keys, ordered_results):
207
+ thread_transition = (num_threads != prior_num_threads)
208
+ if thread_transition:
209
+ prior_num_threads = num_threads
210
+ prior_env = ""
211
+ row_group += 1
212
+ rows_by_group.append([])
213
+ rows.append(
214
+ _Row(
215
+ results=row,
216
+ row_group=row_group,
217
+ render_env=(render_env and env != prior_env),
218
+ env_str_len=env_str_len,
219
+ row_name_str_len=row_name_str_len,
220
+ time_scale=self.time_scale,
221
+ colorize=self._colorize,
222
+ num_threads=num_threads if thread_transition else None,
223
+ )
224
+ )
225
+ rows_by_group[-1].append(row)
226
+ prior_env = env
227
+
228
+ for i in range(len(self.column_keys)):
229
+ grouped_results = [tuple(row[i] for row in g) for g in rows_by_group]
230
+ column = _Column(
231
+ grouped_results=grouped_results,
232
+ time_scale=self.time_scale,
233
+ time_unit=self.time_unit,
234
+ trim_significant_figures=self._trim_significant_figures,
235
+ highlight_warnings=self._highlight_warnings,)
236
+ columns.append(column)
237
+
238
+ rows_tuple, columns_tuple = tuple(rows), tuple(columns)
239
+ for ri in rows_tuple:
240
+ ri.register_columns(columns_tuple)
241
+ return rows_tuple, columns_tuple
242
+
243
+ def render(self) -> str:
244
+ string_rows = [[""] + self.column_keys]
245
+ for r in self.rows:
246
+ string_rows.append(r.as_column_strings())
247
+ num_cols = max(len(i) for i in string_rows)
248
+ for sr in string_rows:
249
+ sr.extend(["" for _ in range(num_cols - len(sr))])
250
+
251
+ col_widths = [max(len(j) for j in i) for i in zip(*string_rows)]
252
+ finalized_columns = [" | ".join(i.center(w) for i, w in zip(string_rows[0], col_widths))]
253
+ overall_width = len(finalized_columns[0])
254
+ for string_row, row in zip(string_rows[1:], self.rows):
255
+ finalized_columns.extend(row.row_separator(overall_width))
256
+ finalized_columns.append(" | ".join(row.finalize_column_strings(string_row, col_widths)))
257
+
258
+ newline = "\n"
259
+ has_warnings = self._highlight_warnings and any(ri.has_warnings for ri in self.results)
260
+ return f"""
261
+ [{(' ' + (self.label or '') + ' ').center(overall_width - 2, '-')}]
262
+ {newline.join(finalized_columns)}
263
+
264
+ Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}).
265
+ {'(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline if has_warnings else ""}"""[1:]
266
+
267
+
268
+ class Compare:
269
+ def __init__(self, results: List[common.Measurement]):
270
+ self._results: List[common.Measurement] = []
271
+ self.extend_results(results)
272
+ self._trim_significant_figures = False
273
+ self._colorize = Colorize.NONE
274
+ self._highlight_warnings = False
275
+
276
+ def __str__(self):
277
+ return "\n".join(self._render())
278
+
279
+ def extend_results(self, results):
280
+ for r in results:
281
+ if not isinstance(r, common.Measurement):
282
+ raise ValueError(
283
+ "Expected an instance of `Measurement`, " f"got {type(r)} instead."
284
+ )
285
+ self._results.extend(results)
286
+
287
+ def trim_significant_figures(self):
288
+ self._trim_significant_figures = True
289
+
290
+ def colorize(self, rowwise=False):
291
+ self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE
292
+
293
+ def highlight_warnings(self):
294
+ self._highlight_warnings = True
295
+
296
+ def print(self):
297
+ print(str(self))
298
+
299
+ def _render(self):
300
+ results = common.Measurement.merge(self._results)
301
+ grouped_results = self._group_by_label(results)
302
+ output = []
303
+ for group in grouped_results.values():
304
+ output.append(self._layout(group))
305
+ return output
306
+
307
+ def _group_by_label(self, results: List[common.Measurement]):
308
+ grouped_results: DefaultDict[str, List[common.Measurement]] = collections.defaultdict(list)
309
+ for r in results:
310
+ grouped_results[r.label].append(r)
311
+ return grouped_results
312
+
313
+ def _layout(self, results: List[common.Measurement]):
314
+ table = Table(
315
+ results,
316
+ self._colorize,
317
+ self._trim_significant_figures,
318
+ self._highlight_warnings
319
+ )
320
+ return table.render()
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ __all__ = ["bench_all", "benchmark_compile"]
4
+
5
+ import torch._dynamo
6
+ from torch._dynamo.testing import CompileCounterWithBackend
7
+ from torch.utils.benchmark import Timer
8
+
9
+ from typing import Optional, List, Callable, Union, Any, cast
10
+
11
+ _warned_tensor_cores = False
12
+ _default_float_32_precision = torch.get_float32_matmul_precision()
13
+
14
+ try:
15
+ from tabulate import tabulate
16
+ HAS_TABULATE = True
17
+ except ImportError:
18
+ HAS_TABULATE = False
19
+ print("tabulate is not installed, please pip install tabulate to use this utility")
20
+
21
+ if HAS_TABULATE:
22
+ def _enable_tensor_cores():
23
+ global _warned_tensor_cores
24
+
25
+ if torch.cuda.is_available():
26
+ if torch.backends.cuda.matmul.allow_tf32 is False and torch.cuda.get_device_capability() >= (8, 0):
27
+ torch.set_float32_matmul_precision("high")
28
+ if not _warned_tensor_cores:
29
+ print("Your GPU supports tensor cores")
30
+ print("we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`")
31
+ _warned_tensor_cores = True
32
+
33
+ def _disable_tensor_cores():
34
+ torch.set_float32_matmul_precision(_default_float_32_precision)
35
+
36
+ def bench_loop(
37
+ model: Union[torch.nn.Module, Callable],
38
+ sample_input: Union[torch.Tensor, Any],
39
+ num_iters: int = 5,
40
+ optimizer: Optional[torch.optim.Optimizer] = None,
41
+ loss_fn: Optional[Callable] = None,
42
+ ):
43
+ # Define the statement and setup for the benchmark
44
+ if optimizer and loss_fn:
45
+ # Training mode
46
+ stmt = """
47
+ output = model(sample_input)
48
+ loss = loss_fn(output) if loss_fn else output.sum()
49
+ loss.backward()
50
+ optimizer.step()
51
+ optimizer.zero_grad()
52
+ """
53
+ else:
54
+ # Inference mode
55
+ stmt = "model(sample_input)"
56
+
57
+ # Create the Timer object
58
+ timer = Timer(
59
+ stmt=stmt,
60
+ globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn},
61
+ )
62
+
63
+
64
+ result = timer.timeit(number=num_iters)
65
+
66
+ # Get the average time per iteration in milliseconds
67
+ avg_time = result.mean * 1000
68
+ return round(avg_time, 2)
69
+
70
+ def benchmark_compile(
71
+ model: Union[torch.nn.Module, Callable],
72
+ sample_input: Union[torch.Tensor, Any],
73
+ num_iters: int = 5,
74
+ backend: Optional[str] = None,
75
+ mode: Optional[str] = "default",
76
+ optimizer: Optional[torch.optim.Optimizer] = None,
77
+ loss_fn : Union[torch.nn.Module, Callable, None] = None,
78
+ ):
79
+ """
80
+ Use this utility to benchmark torch.compile
81
+ """
82
+ if backend:
83
+ try:
84
+ torch._dynamo.reset()
85
+ compile_counter_with_backend = CompileCounterWithBackend(backend)
86
+ opt_model = torch.compile(model, backend=compile_counter_with_backend, mode=mode)
87
+
88
+ # Compilation only happens after the first inference
89
+ compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn)
90
+
91
+ running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
92
+
93
+ if compile_counter_with_backend.frame_count == 0:
94
+ raise RuntimeError("No compilation occurred during benchmarking.")
95
+
96
+ if compile_counter_with_backend.frame_count > 1:
97
+ raise RuntimeError("Recompilation occurred during benchmarking.")
98
+
99
+ except Exception as e:
100
+ print(e)
101
+ print(f"Failed to compile {backend} with mode {mode}")
102
+ return None, None
103
+ else:
104
+ opt_model = model
105
+ compilation_time = None
106
+ running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
107
+
108
+ compilation_time = round(compilation_time, 2) if compilation_time else None
109
+ running_time = round(running_time, 2) if running_time else None
110
+
111
+
112
+ return compilation_time, running_time
113
+
114
+
115
+ def bench_all(
116
+ model : Union[torch.nn.Module, Callable],
117
+ sample_input: Union[torch.Tensor, Any],
118
+ num_iters : int = 5,
119
+ optimizer: Optional[torch.optim.Optimizer] = None,
120
+ loss_fn : Union[torch.nn.Module, Callable, None] = None,
121
+ ):
122
+ """
123
+ This is a simple utility that can be used to benchmark torch.compile
124
+ In particular it ensures that your GPU is setup to use tensor cores if it supports its
125
+ It also tries out all the main backends and prints a table of results so you can easily compare them all
126
+ Many of the backendds have their own optional dependencies so please pip install them seperately
127
+
128
+ You will get one table for inference and another for training
129
+ If you'd like to leverage this utility for training make sure to pass in a torch.optim.Optimizer
130
+
131
+ The important warnings are
132
+ Your GPU supports tensor cores
133
+ we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`
134
+
135
+ If a compilation fails for any reason including the dependency not being included
136
+ then we will print Failed to compile {backend} with mode {mode}
137
+ """
138
+ field_names = ["Train/Inference", "Backend", "Mode", "Compilation Time", "Average Running Time"]
139
+ table = []
140
+
141
+
142
+ eager_time = None
143
+ torch._dynamo.reset()
144
+ _, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer)
145
+ table.append(
146
+ [("Training" if optimizer else "Inference"), "Eager", "-", "-", f"{eager_time} ms"]
147
+ )
148
+
149
+ for backend in torch._dynamo.list_backends():
150
+
151
+ if backend == "inductor":
152
+ mode_options = cast(List[Optional[str]], list(torch._inductor.list_mode_options().keys())) + [None]
153
+ for mode in mode_options:
154
+ if mode == "default":
155
+ continue
156
+ torch._dynamo.reset()
157
+ try:
158
+ if torch.cuda.is_available():
159
+ _enable_tensor_cores()
160
+ compilation_time, running_time = benchmark_compile(
161
+ model, sample_input, num_iters, backend, mode, optimizer, loss_fn)
162
+ finally:
163
+ if torch.cuda.is_available():
164
+ _disable_tensor_cores()
165
+ table.append([
166
+ ("Training" if optimizer else "Inference"),
167
+ backend if backend else "-",
168
+ mode if mode is not None else "-",
169
+ f"{compilation_time} ms " if compilation_time else "-",
170
+ f"{running_time} ms " if running_time else "-",
171
+ ])
172
+
173
+ else:
174
+ torch._dynamo.reset()
175
+ compilation_time, running_time = benchmark_compile(
176
+ model, sample_input, num_iters, backend, None, optimizer, loss_fn)
177
+
178
+ if running_time is not None:
179
+ table.append([
180
+ ("Training" if optimizer else "Inference"),
181
+ backend, "-",
182
+ f"{compilation_time} ms " or "-",
183
+ f"{running_time} ms ",
184
+ ])
185
+
186
+
187
+ return tabulate(table, headers=field_names, tablefmt="github")
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools as it
3
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+
8
+
9
+ __all__ = [
10
+ "Fuzzer",
11
+ "FuzzedParameter", "ParameterAlias",
12
+ "FuzzedTensor",
13
+ ]
14
+
15
+
16
+ _DISTRIBUTIONS = (
17
+ "loguniform",
18
+ "uniform",
19
+ )
20
+
21
+
22
+ class FuzzedParameter:
23
+ """Specification for a parameter to be generated during fuzzing."""
24
+ def __init__(
25
+ self,
26
+ name: str,
27
+ minval: Optional[Union[int, float]] = None,
28
+ maxval: Optional[Union[int, float]] = None,
29
+ distribution: Optional[Union[str, Dict[Any, float]]] = None,
30
+ strict: bool = False,
31
+ ):
32
+ """
33
+ Args:
34
+ name:
35
+ A string name with which to identify the parameter.
36
+ FuzzedTensors can reference this string in their
37
+ specifications.
38
+ minval:
39
+ The lower bound for the generated value. See the description
40
+ of `distribution` for type behavior.
41
+ maxval:
42
+ The upper bound for the generated value. Type behavior is
43
+ identical to `minval`.
44
+ distribution:
45
+ Specifies the distribution from which this parameter should
46
+ be drawn. There are three possibilities:
47
+ - "loguniform"
48
+ Samples between `minval` and `maxval` (inclusive) such
49
+ that the probabilities are uniform in log space. As a
50
+ concrete example, if minval=1 and maxval=100, a sample
51
+ is as likely to fall in [1, 10) as it is [10, 100].
52
+ - "uniform"
53
+ Samples are chosen with uniform probability between
54
+ `minval` and `maxval` (inclusive). If either `minval`
55
+ or `maxval` is a float then the distribution is the
56
+ continuous uniform distribution; otherwise samples
57
+ are constrained to the integers.
58
+ - dict:
59
+ If a dict is passed, the keys are taken to be choices
60
+ for the variables and the values are interpreted as
61
+ probabilities. (And must sum to one.)
62
+ If a dict is passed, `minval` and `maxval` must not be set.
63
+ Otherwise, they must be set.
64
+ strict:
65
+ If a parameter is strict, it will not be included in the
66
+ iterative resampling process which Fuzzer uses to find a
67
+ valid parameter configuration. This allows an author to
68
+ prevent skew from resampling for a given parameter (for
69
+ instance, a low size limit could inadvertently bias towards
70
+ Tensors with fewer dimensions) at the cost of more iterations
71
+ when generating parameters.
72
+ """
73
+ self._name = name
74
+ self._minval = minval
75
+ self._maxval = maxval
76
+ self._distribution = self._check_distribution(distribution)
77
+ self.strict = strict
78
+
79
+ @property
80
+ def name(self):
81
+ return self._name
82
+
83
+ def sample(self, state):
84
+ if self._distribution == "loguniform":
85
+ return self._loguniform(state)
86
+
87
+ if self._distribution == "uniform":
88
+ return self._uniform(state)
89
+
90
+ if isinstance(self._distribution, dict):
91
+ return self._custom_distribution(state)
92
+
93
+ def _check_distribution(self, distribution):
94
+ if not isinstance(distribution, dict):
95
+ assert distribution in _DISTRIBUTIONS
96
+ else:
97
+ assert not any(i < 0 for i in distribution.values()), "Probabilities cannot be negative"
98
+ assert abs(sum(distribution.values()) - 1) <= 1e-5, "Distribution is not normalized"
99
+ assert self._minval is None
100
+ assert self._maxval is None
101
+
102
+ return distribution
103
+
104
+ def _loguniform(self, state):
105
+ output = int(2 ** state.uniform(
106
+ low=np.log2(self._minval) if self._minval is not None else None,
107
+ high=np.log2(self._maxval) if self._maxval is not None else None,
108
+ ))
109
+ if self._minval is not None and output < self._minval:
110
+ return self._minval
111
+ if self._maxval is not None and output > self._maxval:
112
+ return self._maxval
113
+ return output
114
+
115
+ def _uniform(self, state):
116
+ if isinstance(self._minval, int) and isinstance(self._maxval, int):
117
+ return int(state.randint(low=self._minval, high=self._maxval + 1))
118
+ return state.uniform(low=self._minval, high=self._maxval)
119
+
120
+ def _custom_distribution(self, state):
121
+ # If we directly pass the keys to `choice`, numpy will convert
122
+ # them to numpy dtypes.
123
+ index = state.choice(
124
+ np.arange(len(self._distribution)),
125
+ p=tuple(self._distribution.values()))
126
+ return list(self._distribution.keys())[index]
127
+
128
+
129
+ class ParameterAlias:
130
+ """Indicates that a parameter should alias the value of another parameter.
131
+
132
+ When used in conjunction with a custom distribution, this allows fuzzed
133
+ tensors to represent a broader range of behaviors. For example, the
134
+ following sometimes produces Tensors which broadcast:
135
+
136
+ Fuzzer(
137
+ parameters=[
138
+ FuzzedParameter("x_len", 4, 1024, distribution="uniform"),
139
+
140
+ # `y` will either be size one, or match the size of `x`.
141
+ FuzzedParameter("y_len", distribution={
142
+ 0.5: 1,
143
+ 0.5: ParameterAlias("x_len")
144
+ }),
145
+ ],
146
+ tensors=[
147
+ FuzzedTensor("x", size=("x_len",)),
148
+ FuzzedTensor("y", size=("y_len",)),
149
+ ],
150
+ )
151
+
152
+ Chains of alias' are allowed, but may not contain cycles.
153
+ """
154
+ def __init__(self, alias_to):
155
+ self.alias_to = alias_to
156
+
157
+ def __repr__(self):
158
+ return f"ParameterAlias[alias_to: {self.alias_to}]"
159
+
160
+
161
+ def dtype_size(dtype):
162
+ if dtype == torch.bool:
163
+ return 1
164
+ if dtype.is_floating_point or dtype.is_complex:
165
+ return int(torch.finfo(dtype).bits / 8)
166
+ return int(torch.iinfo(dtype).bits / 8)
167
+
168
+
169
+ def prod(values, base=1):
170
+ """np.prod can overflow, so for sizes the product should be done in Python.
171
+
172
+ Even though np.prod type promotes to int64, it can still overflow in which
173
+ case the negative value will pass the size check and OOM when attempting to
174
+ actually allocate the Tensor.
175
+ """
176
+ return functools.reduce(lambda x, y: int(x) * int(y), values, base)
177
+
178
+
179
+ class FuzzedTensor:
180
+ def __init__(
181
+ self,
182
+ name: str,
183
+ size: Tuple[Union[str, int], ...],
184
+ steps: Optional[Tuple[Union[str, int], ...]] = None,
185
+ probability_contiguous: float = 0.5,
186
+ min_elements: Optional[int] = None,
187
+ max_elements: Optional[int] = None,
188
+ max_allocation_bytes: Optional[int] = None,
189
+ dim_parameter: Optional[str] = None,
190
+ roll_parameter: Optional[str] = None,
191
+ dtype=torch.float32,
192
+ cuda=False,
193
+ tensor_constructor: Optional[Callable] = None
194
+ ):
195
+ """
196
+ Args:
197
+ name:
198
+ A string identifier for the generated Tensor.
199
+ size:
200
+ A tuple of integers or strings specifying the size of the generated
201
+ Tensor. String values will replaced with a concrete int during the
202
+ generation process, while ints are simply passed as literals.
203
+ steps:
204
+ An optional tuple with the same length as `size`. This indicates
205
+ that a larger Tensor should be allocated, and then sliced to
206
+ produce the generated Tensor. For instance, if size is (4, 8)
207
+ and steps is (1, 4), then a tensor `t` of size (4, 32) will be
208
+ created and then `t[:, ::4]` will be used. (Allowing one to test
209
+ Tensors with strided memory.)
210
+ probability_contiguous:
211
+ A number between zero and one representing the chance that the
212
+ generated Tensor has a contiguous memory layout. This is achieved by
213
+ randomly permuting the shape of a Tensor, calling `.contiguous()`,
214
+ and then permuting back. This is applied before `steps`, which can
215
+ also cause a Tensor to be non-contiguous.
216
+ min_elements:
217
+ The minimum number of parameters that this Tensor must have for a
218
+ set of parameters to be valid. (Otherwise they are resampled.)
219
+ max_elements:
220
+ Like `min_elements`, but setting an upper bound.
221
+ max_allocation_bytes:
222
+ Like `max_elements`, but for the size of Tensor that must be
223
+ allocated prior to slicing for `steps` (if applicable). For
224
+ example, a FloatTensor with size (1024, 1024) and steps (4, 4)
225
+ would have 1M elements, but would require a 64 MB allocation.
226
+ dim_parameter:
227
+ The length of `size` and `steps` will be truncated to this value.
228
+ This allows Tensors of varying dimensions to be generated by the
229
+ Fuzzer.
230
+ dtype:
231
+ The PyTorch dtype of the generated Tensor.
232
+ cuda:
233
+ Whether to place the Tensor on a GPU.
234
+ tensor_constructor:
235
+ Callable which will be used instead of the default Tensor
236
+ construction method. This allows the author to enforce properties
237
+ of the Tensor (e.g. it can only have certain values). The dtype and
238
+ concrete shape of the Tensor to be created will be passed, and
239
+ concrete values of all parameters will be passed as kwargs. Note
240
+ that transformations to the result (permuting, slicing) will be
241
+ performed by the Fuzzer; the tensor_constructor is only responsible
242
+ for creating an appropriately sized Tensor.
243
+ """
244
+ self._name = name
245
+ self._size = size
246
+ self._steps = steps
247
+ self._probability_contiguous = probability_contiguous
248
+ self._min_elements = min_elements
249
+ self._max_elements = max_elements
250
+ self._max_allocation_bytes = max_allocation_bytes
251
+ self._dim_parameter = dim_parameter
252
+ self._dtype = dtype
253
+ self._cuda = cuda
254
+ self._tensor_constructor = tensor_constructor
255
+
256
+ @property
257
+ def name(self):
258
+ return self._name
259
+
260
+ @staticmethod
261
+ def default_tensor_constructor(size, dtype, **kwargs):
262
+ if dtype.is_floating_point or dtype.is_complex:
263
+ return torch.rand(size=size, dtype=dtype, device="cpu")
264
+ else:
265
+ return torch.randint(1, 127, size=size, dtype=dtype, device="cpu")
266
+
267
+ def _make_tensor(self, params, state):
268
+ size, steps, allocation_size = self._get_size_and_steps(params)
269
+ constructor = (
270
+ self._tensor_constructor or
271
+ self.default_tensor_constructor
272
+ )
273
+
274
+ raw_tensor = constructor(size=allocation_size, dtype=self._dtype, **params)
275
+ if self._cuda:
276
+ raw_tensor = raw_tensor.cuda()
277
+
278
+ # Randomly permute the Tensor and call `.contiguous()` to force re-ordering
279
+ # of the memory, and then permute it back to the original shape.
280
+ dim = len(size)
281
+ order = np.arange(dim)
282
+ if state.rand() > self._probability_contiguous:
283
+ while dim > 1 and np.all(order == np.arange(dim)):
284
+ order = state.permutation(raw_tensor.dim())
285
+
286
+ raw_tensor = raw_tensor.permute(tuple(order)).contiguous()
287
+ raw_tensor = raw_tensor.permute(tuple(np.argsort(order)))
288
+
289
+ slices = [slice(0, size * step, step) for size, step in zip(size, steps)]
290
+ tensor = raw_tensor[slices]
291
+
292
+ properties = {
293
+ "numel": int(tensor.numel()),
294
+ "order": order,
295
+ "steps": steps,
296
+ "is_contiguous": tensor.is_contiguous(),
297
+ "dtype": str(self._dtype),
298
+ }
299
+
300
+ return tensor, properties
301
+
302
+ def _get_size_and_steps(self, params):
303
+ dim = (
304
+ params[self._dim_parameter]
305
+ if self._dim_parameter is not None
306
+ else len(self._size)
307
+ )
308
+
309
+ def resolve(values, dim):
310
+ """Resolve values into concrete integers."""
311
+ values = tuple(params.get(i, i) for i in values)
312
+ if len(values) > dim:
313
+ values = values[:dim]
314
+ if len(values) < dim:
315
+ values = values + tuple(1 for _ in range(dim - len(values)))
316
+ return values
317
+
318
+ size = resolve(self._size, dim)
319
+ steps = resolve(self._steps or (), dim)
320
+ allocation_size = tuple(size_i * step_i for size_i, step_i in zip(size, steps))
321
+ return size, steps, allocation_size
322
+
323
+ def satisfies_constraints(self, params):
324
+ size, _, allocation_size = self._get_size_and_steps(params)
325
+ # Product is computed in Python to avoid integer overflow.
326
+ num_elements = prod(size)
327
+ assert num_elements >= 0
328
+
329
+ allocation_bytes = prod(allocation_size, base=dtype_size(self._dtype))
330
+
331
+ def nullable_greater(left, right):
332
+ if left is None or right is None:
333
+ return False
334
+ return left > right
335
+
336
+ return not any((
337
+ nullable_greater(num_elements, self._max_elements),
338
+ nullable_greater(self._min_elements, num_elements),
339
+ nullable_greater(allocation_bytes, self._max_allocation_bytes),
340
+ ))
341
+
342
+
343
+ class Fuzzer:
344
+ def __init__(
345
+ self,
346
+ parameters: List[Union[FuzzedParameter, List[FuzzedParameter]]],
347
+ tensors: List[Union[FuzzedTensor, List[FuzzedTensor]]],
348
+ constraints: Optional[List[Callable]] = None,
349
+ seed: Optional[int] = None
350
+ ):
351
+ """
352
+ Args:
353
+ parameters:
354
+ List of FuzzedParameters which provide specifications
355
+ for generated parameters. Iterable elements will be
356
+ unpacked, though arbitrary nested structures will not.
357
+ tensors:
358
+ List of FuzzedTensors which define the Tensors which
359
+ will be created each step based on the parameters for
360
+ that step. Iterable elements will be unpacked, though
361
+ arbitrary nested structures will not.
362
+ constraints:
363
+ List of callables. They will be called with params
364
+ as kwargs, and if any of them return False the current
365
+ set of parameters will be rejected.
366
+ seed:
367
+ Seed for the RandomState used by the Fuzzer. This will
368
+ also be used to set the PyTorch random seed so that random
369
+ ops will create reproducible Tensors.
370
+ """
371
+ if seed is None:
372
+ seed = np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64)
373
+ self._seed = seed
374
+ self._parameters = Fuzzer._unpack(parameters, FuzzedParameter)
375
+ self._tensors = Fuzzer._unpack(tensors, FuzzedTensor)
376
+ self._constraints = constraints or ()
377
+
378
+ p_names = {p.name for p in self._parameters}
379
+ t_names = {t.name for t in self._tensors}
380
+ name_overlap = p_names.intersection(t_names)
381
+ if name_overlap:
382
+ raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}")
383
+
384
+ self._rejections = 0
385
+ self._total_generated = 0
386
+
387
+ @staticmethod
388
+ def _unpack(values, cls):
389
+ return tuple(it.chain(
390
+ *[[i] if isinstance(i, cls) else i for i in values]
391
+ ))
392
+
393
+ def take(self, n):
394
+ state = np.random.RandomState(self._seed)
395
+ torch.manual_seed(state.randint(low=0, high=2 ** 63, dtype=np.int64))
396
+ for _ in range(n):
397
+ params = self._generate(state)
398
+ tensors = {}
399
+ tensor_properties = {}
400
+ for t in self._tensors:
401
+ tensor, properties = t._make_tensor(params, state)
402
+ tensors[t.name] = tensor
403
+ tensor_properties[t.name] = properties
404
+ yield tensors, tensor_properties, params
405
+
406
+ @property
407
+ def rejection_rate(self):
408
+ if not self._total_generated:
409
+ return 0.
410
+ return self._rejections / self._total_generated
411
+
412
+ def _generate(self, state):
413
+ strict_params: Dict[str, Union[float, int, ParameterAlias]] = {}
414
+ for _ in range(1000):
415
+ candidate_params: Dict[str, Union[float, int, ParameterAlias]] = {}
416
+ for p in self._parameters:
417
+ if p.strict:
418
+ if p.name in strict_params:
419
+ candidate_params[p.name] = strict_params[p.name]
420
+ else:
421
+ candidate_params[p.name] = p.sample(state)
422
+ strict_params[p.name] = candidate_params[p.name]
423
+ else:
424
+ candidate_params[p.name] = p.sample(state)
425
+
426
+ candidate_params = self._resolve_aliases(candidate_params)
427
+
428
+ self._total_generated += 1
429
+ if not all(f(candidate_params) for f in self._constraints):
430
+ self._rejections += 1
431
+ continue
432
+
433
+ if not all(t.satisfies_constraints(candidate_params) for t in self._tensors):
434
+ self._rejections += 1
435
+ continue
436
+
437
+ return candidate_params
438
+ raise ValueError("Failed to generate a set of valid parameters.")
439
+
440
+ @staticmethod
441
+ def _resolve_aliases(params):
442
+ params = dict(params)
443
+ alias_count = sum(isinstance(v, ParameterAlias) for v in params.values())
444
+
445
+ keys = list(params.keys())
446
+ while alias_count:
447
+ for k in keys:
448
+ v = params[k]
449
+ if isinstance(v, ParameterAlias):
450
+ params[k] = params[v.alias_to]
451
+ alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values())
452
+ if alias_count == alias_count_new:
453
+ raise ValueError(f"ParameterAlias cycle detected\n{params}")
454
+
455
+ alias_count = alias_count_new
456
+
457
+ return params
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* C++ template for Timer.timeit
2
+
3
+ This template will be consumed by `cpp_jit.py`, and will replace:
4
+ `GLOBAL_SETUP_TEMPLATE_LOCATION`,
5
+ `SETUP_TEMPLATE_LOCATION`
6
+ and
7
+ `STMT_TEMPLATE_LOCATION`
8
+ sections with user provided statements.
9
+ */
10
+ #include <chrono>
11
+
12
+ #include <c10/util/irange.h>
13
+ #include <torch/csrc/utils/pybind.h>
14
+ #include <pybind11/pybind11.h>
15
+ #include <torch/extension.h>
16
+
17
+ // Global setup. (e.g. #includes)
18
+ // GLOBAL_SETUP_TEMPLATE_LOCATION
19
+
20
+ double timeit(int n) {
21
+ pybind11::gil_scoped_release no_gil;
22
+
23
+ // Setup
24
+ // SETUP_TEMPLATE_LOCATION
25
+
26
+ {
27
+ // Warmup
28
+ // STMT_TEMPLATE_LOCATION
29
+ }
30
+
31
+ // Main loop
32
+ auto start_time = std::chrono::high_resolution_clock::now();
33
+ for (const auto loop_idx : c10::irange(n)) {
34
+ (void)loop_idx;
35
+ // STMT_TEMPLATE_LOCATION
36
+ }
37
+ auto end_time = std::chrono::high_resolution_clock::now();
38
+ return std::chrono::duration<double>(end_time - start_time).count();
39
+ }
40
+
41
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
42
+ m.def("timeit", &timeit);
43
+ }
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Timer class based on the timeit.Timer class, but torch aware."""
2
+ import enum
3
+ import timeit
4
+ import textwrap
5
+ from typing import overload, Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union
6
+
7
+ import torch
8
+ from torch.utils.benchmark.utils import common, cpp_jit
9
+ from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType
10
+ from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface
11
+
12
+
13
+ __all__ = ["Timer", "timer", "Language"]
14
+
15
+
16
+ if torch.backends.cuda.is_built() and torch.cuda.is_available():
17
+ def timer() -> float:
18
+ torch.cuda.synchronize()
19
+ return timeit.default_timer()
20
+ elif torch._C._get_privateuse1_backend_name() != "privateuseone":
21
+ privateuse1_device_handler = getattr(torch, torch._C._get_privateuse1_backend_name(), None) \
22
+ if torch._C._get_privateuse1_backend_name() != "cpu" else None
23
+
24
+ def timer() -> float:
25
+ if privateuse1_device_handler:
26
+ privateuse1_device_handler.synchronize()
27
+ return timeit.default_timer()
28
+ else:
29
+ timer = timeit.default_timer
30
+
31
+
32
+ class Language(enum.Enum):
33
+ PYTHON = 0
34
+ CPP = 1
35
+
36
+
37
+ class CPPTimer:
38
+ def __init__(
39
+ self,
40
+ stmt: str,
41
+ setup: str,
42
+ global_setup: str,
43
+ timer: Callable[[], float],
44
+ globals: Dict[str, Any],
45
+ ) -> None:
46
+ if timer is not timeit.default_timer:
47
+ raise NotImplementedError(
48
+ "PyTorch was built with CUDA and a GPU is present; however "
49
+ "Timer does not yet support GPU measurements. If your "
50
+ "code is CPU only, pass `timer=timeit.default_timer` to the "
51
+ "Timer's constructor to indicate this. (Note that this will "
52
+ "produce incorrect results if the GPU is in fact used, as "
53
+ "Timer will not synchronize CUDA.)"
54
+ )
55
+
56
+ if globals:
57
+ raise ValueError("C++ timing does not support globals.")
58
+
59
+ self._stmt: str = textwrap.dedent(stmt)
60
+ self._setup: str = textwrap.dedent(setup)
61
+ self._global_setup: str = textwrap.dedent(global_setup)
62
+ self._timeit_module: Optional[TimeitModuleType] = None
63
+
64
+ def timeit(self, number: int) -> float:
65
+ if self._timeit_module is None:
66
+ self._timeit_module = cpp_jit.compile_timeit_template(
67
+ stmt=self._stmt,
68
+ setup=self._setup,
69
+ global_setup=self._global_setup,
70
+ )
71
+
72
+ return self._timeit_module.timeit(number)
73
+
74
+
75
+ class Timer:
76
+ """Helper class for measuring execution time of PyTorch statements.
77
+
78
+ For a full tutorial on how to use this class, see:
79
+ https://pytorch.org/tutorials/recipes/recipes/benchmark.html
80
+
81
+ The PyTorch Timer is based on `timeit.Timer` (and in fact uses
82
+ `timeit.Timer` internally), but with several key differences:
83
+
84
+ 1) Runtime aware:
85
+ Timer will perform warmups (important as some elements of PyTorch are
86
+ lazily initialized), set threadpool size so that comparisons are
87
+ apples-to-apples, and synchronize asynchronous CUDA functions when
88
+ necessary.
89
+
90
+ 2) Focus on replicates:
91
+ When measuring code, and particularly complex kernels / models,
92
+ run-to-run variation is a significant confounding factor. It is
93
+ expected that all measurements should include replicates to quantify
94
+ noise and allow median computation, which is more robust than mean.
95
+ To that effect, this class deviates from the `timeit` API by
96
+ conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.
97
+ (Exact algorithms are discussed in method docstrings.) The `timeit`
98
+ method is replicated for cases where an adaptive strategy is not
99
+ desired.
100
+
101
+ 3) Optional metadata:
102
+ When defining a Timer, one can optionally specify `label`, `sub_label`,
103
+ `description`, and `env`. (Defined later) These fields are included in
104
+ the representation of result object and by the `Compare` class to group
105
+ and display results for comparison.
106
+
107
+ 4) Instruction counts
108
+ In addition to wall times, Timer can run a statement under Callgrind
109
+ and report instructions executed.
110
+
111
+ Directly analogous to `timeit.Timer` constructor arguments:
112
+
113
+ `stmt`, `setup`, `timer`, `globals`
114
+
115
+ PyTorch Timer specific constructor arguments:
116
+
117
+ `label`, `sub_label`, `description`, `env`, `num_threads`
118
+
119
+ Args:
120
+ stmt: Code snippet to be run in a loop and timed.
121
+
122
+ setup: Optional setup code. Used to define variables used in `stmt`
123
+
124
+ global_setup: (C++ only)
125
+ Code which is placed at the top level of the file for things like
126
+ `#include` statements.
127
+
128
+ timer:
129
+ Callable which returns the current time. If PyTorch was built
130
+ without CUDA or there is no GPU present, this defaults to
131
+ `timeit.default_timer`; otherwise it will synchronize CUDA before
132
+ measuring the time.
133
+
134
+ globals:
135
+ A dict which defines the global variables when `stmt` is being
136
+ executed. This is the other method for providing variables which
137
+ `stmt` needs.
138
+
139
+ label:
140
+ String which summarizes `stmt`. For instance, if `stmt` is
141
+ "torch.nn.functional.relu(torch.add(x, 1, out=out))"
142
+ one might set label to "ReLU(x + 1)" to improve readability.
143
+
144
+ sub_label:
145
+ Provide supplemental information to disambiguate measurements
146
+ with identical stmt or label. For instance, in our example
147
+ above sub_label might be "float" or "int", so that it is easy
148
+ to differentiate:
149
+ "ReLU(x + 1): (float)"
150
+
151
+ "ReLU(x + 1): (int)"
152
+ when printing Measurements or summarizing using `Compare`.
153
+
154
+ description:
155
+ String to distinguish measurements with identical label and
156
+ sub_label. The principal use of `description` is to signal to
157
+ `Compare` the columns of data. For instance one might set it
158
+ based on the input size to create a table of the form: ::
159
+
160
+ | n=1 | n=4 | ...
161
+ ------------- ...
162
+ ReLU(x + 1): (float) | ... | ... | ...
163
+ ReLU(x + 1): (int) | ... | ... | ...
164
+
165
+
166
+ using `Compare`. It is also included when printing a Measurement.
167
+
168
+ env:
169
+ This tag indicates that otherwise identical tasks were run in
170
+ different environments, and are therefore not equivalent, for
171
+ instance when A/B testing a change to a kernel. `Compare` will
172
+ treat Measurements with different `env` specification as distinct
173
+ when merging replicate runs.
174
+
175
+ num_threads:
176
+ The size of the PyTorch threadpool when executing `stmt`. Single
177
+ threaded performance is important as both a key inference workload
178
+ and a good indicator of intrinsic algorithmic efficiency, so the
179
+ default is set to one. This is in contrast to the default PyTorch
180
+ threadpool size which tries to utilize all cores.
181
+ """
182
+
183
+ _timer_cls: Type[TimerClass] = timeit.Timer
184
+
185
+ def __init__(
186
+ self,
187
+ stmt: str = "pass",
188
+ setup: str = "pass",
189
+ global_setup: str = "",
190
+ timer: Callable[[], float] = timer,
191
+ globals: Optional[Dict[str, Any]] = None,
192
+ label: Optional[str] = None,
193
+ sub_label: Optional[str] = None,
194
+ description: Optional[str] = None,
195
+ env: Optional[str] = None,
196
+ num_threads: int = 1,
197
+ language: Union[Language, str] = Language.PYTHON,
198
+ ):
199
+ if not isinstance(stmt, str):
200
+ raise ValueError("Currently only a `str` stmt is supported.")
201
+
202
+ # We copy `globals` to prevent mutations from leaking.
203
+ # (For instance, `eval` adds the `__builtins__` key)
204
+ self._globals = dict(globals or {})
205
+
206
+ timer_kwargs = {}
207
+ if language in (Language.PYTHON, "py", "python"):
208
+ # Include `torch` if not specified as a convenience feature.
209
+ self._globals.setdefault("torch", torch)
210
+ self._language: Language = Language.PYTHON
211
+ if global_setup:
212
+ raise ValueError(
213
+ f"global_setup is C++ only, got `{global_setup}`. Most "
214
+ "likely this code can simply be moved to `setup`."
215
+ )
216
+
217
+ elif language in (Language.CPP, "cpp", "c++"):
218
+ assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped."
219
+ self._timer_cls = CPPTimer
220
+ setup = ("" if setup == "pass" else setup)
221
+ self._language = Language.CPP
222
+ timer_kwargs["global_setup"] = global_setup
223
+
224
+ else:
225
+ raise ValueError(f"Invalid language `{language}`.")
226
+
227
+ # Convenience adjustment so that multi-line code snippets defined in
228
+ # functions do not IndentationError (Python) or look odd (C++). The
229
+ # leading newline removal is for the initial newline that appears when
230
+ # defining block strings. For instance:
231
+ # textwrap.dedent("""
232
+ # print("This is a stmt")
233
+ # """)
234
+ # produces '\nprint("This is a stmt")\n'.
235
+ #
236
+ # Stripping this down to 'print("This is a stmt")' doesn't change
237
+ # what gets executed, but it makes __repr__'s nicer.
238
+ stmt = textwrap.dedent(stmt)
239
+ stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip()
240
+ setup = textwrap.dedent(setup)
241
+ setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip()
242
+
243
+ self._timer = self._timer_cls(
244
+ stmt=stmt,
245
+ setup=setup,
246
+ timer=timer,
247
+ globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),
248
+ **timer_kwargs,
249
+ )
250
+ self._task_spec = common.TaskSpec(
251
+ stmt=stmt,
252
+ setup=setup,
253
+ global_setup=global_setup,
254
+ label=label,
255
+ sub_label=sub_label,
256
+ description=description,
257
+ env=env,
258
+ num_threads=num_threads,
259
+ )
260
+
261
+ def _timeit(self, number: int) -> float:
262
+ # Even calling a timer in C++ takes ~50 ns, so no real operation should
263
+ # take less than 1 ns. (And this prevents divide by zero errors.)
264
+ return max(self._timer.timeit(number), 1e-9)
265
+
266
+ def timeit(self, number: int = 1000000) -> common.Measurement:
267
+ """Mirrors the semantics of timeit.Timer.timeit().
268
+
269
+ Execute the main statement (`stmt`) `number` times.
270
+ https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit
271
+ """
272
+ with common.set_torch_threads(self._task_spec.num_threads):
273
+ # Warmup
274
+ self._timeit(number=max(int(number // 100), 2))
275
+
276
+ return common.Measurement(
277
+ number_per_run=number,
278
+ raw_times=[self._timeit(number=number)],
279
+ task_spec=self._task_spec
280
+ )
281
+
282
+ def repeat(self, repeat: int = -1, number: int = -1) -> None:
283
+ raise NotImplementedError("See `Timer.blocked_autorange.`")
284
+
285
+ def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:
286
+ raise NotImplementedError("See `Timer.blocked_autorange.`")
287
+
288
+ def _threaded_measurement_loop(
289
+ self,
290
+ number: int,
291
+ time_hook: Callable[[], float],
292
+ stop_hook: Callable[[List[float]], bool],
293
+ min_run_time: float,
294
+ max_run_time: Optional[float] = None,
295
+ callback: Optional[Callable[[int, float], NoReturn]] = None
296
+ ) -> List[float]:
297
+ total_time = 0.0
298
+ can_stop = False
299
+ times: List[float] = []
300
+ with common.set_torch_threads(self._task_spec.num_threads):
301
+ while (total_time < min_run_time) or (not can_stop):
302
+ time_spent = time_hook()
303
+ times.append(time_spent)
304
+ total_time += time_spent
305
+ if callback:
306
+ callback(number, time_spent)
307
+ can_stop = stop_hook(times)
308
+ if max_run_time and total_time > max_run_time:
309
+ break
310
+ return times
311
+
312
+ def _estimate_block_size(self, min_run_time: float) -> int:
313
+ with common.set_torch_threads(self._task_spec.num_threads):
314
+ # Estimate the block size needed for measurement to be negligible
315
+ # compared to the inner loop. This also serves as a warmup.
316
+ overhead = torch.tensor([self._timeit(0) for _ in range(5)]).median().item()
317
+ number = 1
318
+ while True:
319
+ time_taken = self._timeit(number)
320
+ relative_overhead = overhead / time_taken
321
+ if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:
322
+ break
323
+ if time_taken > min_run_time:
324
+ break
325
+ # Avoid overflow in C++ pybind11 interface
326
+ if number * 10 > 2147483647:
327
+ break
328
+ number *= 10
329
+ return number
330
+
331
+ def blocked_autorange(
332
+ self,
333
+ callback: Optional[Callable[[int, float], NoReturn]] = None,
334
+ min_run_time: float = 0.2,
335
+ ) -> common.Measurement:
336
+ """Measure many replicates while keeping timer overhead to a minimum.
337
+
338
+ At a high level, blocked_autorange executes the following pseudo-code::
339
+
340
+ `setup`
341
+
342
+ total_time = 0
343
+ while total_time < min_run_time
344
+ start = timer()
345
+ for _ in range(block_size):
346
+ `stmt`
347
+ total_time += (timer() - start)
348
+
349
+ Note the variable `block_size` in the inner loop. The choice of block
350
+ size is important to measurement quality, and must balance two
351
+ competing objectives:
352
+
353
+ 1) A small block size results in more replicates and generally
354
+ better statistics.
355
+
356
+ 2) A large block size better amortizes the cost of `timer`
357
+ invocation, and results in a less biased measurement. This is
358
+ important because CUDA synchronization time is non-trivial
359
+ (order single to low double digit microseconds) and would
360
+ otherwise bias the measurement.
361
+
362
+ blocked_autorange sets block_size by running a warmup period,
363
+ increasing block size until timer overhead is less than 0.1% of
364
+ the overall computation. This value is then used for the main
365
+ measurement loop.
366
+
367
+ Returns:
368
+ A `Measurement` object that contains measured runtimes and
369
+ repetition counts, and can be used to compute statistics.
370
+ (mean, median, etc.)
371
+ """
372
+ number = self._estimate_block_size(min_run_time)
373
+
374
+ def time_hook() -> float:
375
+ return self._timeit(number)
376
+
377
+ def stop_hook(times: List[float]) -> bool:
378
+ return True
379
+
380
+ times = self._threaded_measurement_loop(
381
+ number, time_hook, stop_hook,
382
+ min_run_time=min_run_time,
383
+ callback=callback)
384
+
385
+ return common.Measurement(
386
+ number_per_run=number,
387
+ raw_times=times,
388
+ task_spec=self._task_spec
389
+ )
390
+
391
+ def adaptive_autorange(
392
+ self,
393
+ threshold: float = 0.1,
394
+ *,
395
+ min_run_time: float = 0.01,
396
+ max_run_time: float = 10.0,
397
+ callback: Optional[Callable[[int, float], NoReturn]] = None,
398
+ ) -> common.Measurement:
399
+ """Similar to `blocked_autorange` but also checks for variablility in measurements
400
+ and repeats until iqr/median is smaller than `threshold` or `max_run_time` is reached.
401
+
402
+
403
+ At a high level, adaptive_autorange executes the following pseudo-code::
404
+
405
+ `setup`
406
+
407
+ times = []
408
+ while times.sum < max_run_time
409
+ start = timer()
410
+ for _ in range(block_size):
411
+ `stmt`
412
+ times.append(timer() - start)
413
+
414
+ enough_data = len(times)>3 and times.sum > min_run_time
415
+ small_iqr=times.iqr/times.mean<threshold
416
+
417
+ if enough_data and small_iqr:
418
+ break
419
+
420
+ Args:
421
+ threshold: value of iqr/median threshold for stopping
422
+
423
+ min_run_time: total runtime needed before checking `threshold`
424
+
425
+ max_run_time: total runtime for all measurements regardless of `threshold`
426
+
427
+ Returns:
428
+ A `Measurement` object that contains measured runtimes and
429
+ repetition counts, and can be used to compute statistics.
430
+ (mean, median, etc.)
431
+ """
432
+ number = self._estimate_block_size(min_run_time=0.05)
433
+
434
+ def time_hook() -> float:
435
+ return self._timeit(number)
436
+
437
+ def stop_hook(times: List[float]) -> bool:
438
+ if len(times) > 3:
439
+ return common.Measurement(
440
+ number_per_run=number,
441
+ raw_times=times,
442
+ task_spec=self._task_spec
443
+ ).meets_confidence(threshold=threshold)
444
+ return False
445
+ times = self._threaded_measurement_loop(
446
+ number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)
447
+
448
+ return common.Measurement(
449
+ number_per_run=number,
450
+ raw_times=times,
451
+ task_spec=self._task_spec
452
+ )
453
+
454
+ @overload
455
+ def collect_callgrind(
456
+ self,
457
+ number: int,
458
+ *,
459
+ repeats: None,
460
+ collect_baseline: bool,
461
+ retain_out_file: bool,
462
+ ) -> valgrind_timer_interface.CallgrindStats:
463
+ ...
464
+
465
+ @overload
466
+ def collect_callgrind(
467
+ self,
468
+ number: int,
469
+ *,
470
+ repeats: int,
471
+ collect_baseline: bool,
472
+ retain_out_file: bool,
473
+ ) -> Tuple[valgrind_timer_interface.CallgrindStats, ...]:
474
+ ...
475
+
476
+ def collect_callgrind(
477
+ self,
478
+ number: int = 100,
479
+ *,
480
+ repeats: Optional[int] = None,
481
+ collect_baseline: bool = True,
482
+ retain_out_file: bool = False,
483
+ ) -> Any:
484
+ """Collect instruction counts using Callgrind.
485
+
486
+ Unlike wall times, instruction counts are deterministic
487
+ (modulo non-determinism in the program itself and small amounts of
488
+ jitter from the Python interpreter.) This makes them ideal for detailed
489
+ performance analysis. This method runs `stmt` in a separate process
490
+ so that Valgrind can instrument the program. Performance is severely
491
+ degraded due to the instrumentation, however this is ameliorated by
492
+ the fact that a small number of iterations is generally sufficient to
493
+ obtain good measurements.
494
+
495
+ In order to to use this method `valgrind`, `callgrind_control`, and
496
+ `callgrind_annotate` must be installed.
497
+
498
+ Because there is a process boundary between the caller (this process)
499
+ and the `stmt` execution, `globals` cannot contain arbitrary in-memory
500
+ data structures. (Unlike timing methods) Instead, globals are
501
+ restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules
502
+ to reduce the surprise factor from serialization and subsequent
503
+ deserialization. The `GlobalsBridge` class provides more detail on this
504
+ subject. Take particular care with nn.Modules: they rely on pickle and
505
+ you may need to add an import to `setup` for them to transfer properly.
506
+
507
+ By default, a profile for an empty statement will be collected and
508
+ cached to indicate how many instructions are from the Python loop which
509
+ drives `stmt`.
510
+
511
+ Returns:
512
+ A `CallgrindStats` object which provides instruction counts and
513
+ some basic facilities for analyzing and manipulating results.
514
+ """
515
+ if not isinstance(self._task_spec.stmt, str):
516
+ raise ValueError("`collect_callgrind` currently only supports string `stmt`")
517
+
518
+ if repeats is not None and repeats < 1:
519
+ raise ValueError("If specified, `repeats` must be >= 1")
520
+
521
+ # Check that the statement is valid. It doesn't guarantee success, but it's much
522
+ # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in
523
+ # the parent process rather than the valgrind subprocess.
524
+ self._timeit(1)
525
+ is_python = (self._language == Language.PYTHON)
526
+ assert is_python or not self._globals
527
+ result = valgrind_timer_interface.wrapper_singleton().collect_callgrind(
528
+ task_spec=self._task_spec,
529
+ globals=self._globals,
530
+ number=number,
531
+ repeats=repeats or 1,
532
+ collect_baseline=collect_baseline and is_python,
533
+ is_python=is_python,
534
+ retain_out_file=retain_out_file,
535
+ )
536
+
537
+ return (result[0] if repeats is None else result)
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Intermediate layer between `Timer` and `valgrind`."""
2
+ import collections
3
+ import enum
4
+ import dataclasses
5
+ import itertools as it
6
+ import os
7
+ import pickle
8
+ import re
9
+ import shutil
10
+ import subprocess
11
+ import sys
12
+ import textwrap
13
+ from typing import (
14
+ cast, Any, Callable, DefaultDict, Dict, Generator, List, NamedTuple,
15
+ Optional, Tuple, Union, TYPE_CHECKING)
16
+
17
+ import torch
18
+ from torch.utils.benchmark.utils import common, cpp_jit
19
+ from torch.utils.benchmark.utils._stubs import CallgrindModuleType
20
+
21
+
22
+ __all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"]
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ CompletedProcessType = subprocess.CompletedProcess[str]
27
+ else:
28
+ CompletedProcessType = subprocess.CompletedProcess
29
+
30
+
31
+ class FunctionCount(NamedTuple):
32
+ # TODO(#105471): Rename the count field
33
+ count: int # type: ignore[assignment]
34
+ function: str
35
+
36
+
37
+ @dataclasses.dataclass(repr=False, eq=False, frozen=True)
38
+ class FunctionCounts:
39
+ """Container for manipulating Callgrind results.
40
+
41
+ It supports:
42
+ 1) Addition and subtraction to combine or diff results.
43
+ 2) Tuple-like indexing.
44
+ 3) A `denoise` function which strips CPython calls which are known to
45
+ be non-deterministic and quite noisy.
46
+ 4) Two higher order methods (`filter` and `transform`) for custom
47
+ manipulation.
48
+ """
49
+ _data: Tuple[FunctionCount, ...]
50
+ inclusive: bool
51
+ truncate_rows: bool = True
52
+
53
+ # For normal use, torch._tensor_str.PRINT_OPTS.linewidth determines
54
+ # the print settings. This is simply to allow hermetic unit tests.
55
+ _linewidth: Optional[int] = None
56
+
57
+ def __iter__(self) -> Generator[FunctionCount, None, None]:
58
+ yield from self._data
59
+
60
+ def __len__(self) -> int:
61
+ return len(self._data)
62
+
63
+ def __getitem__(self, item: Any) -> Union[FunctionCount, "FunctionCounts"]:
64
+ data: Union[FunctionCount, Tuple[FunctionCount, ...]] = self._data[item]
65
+ return (
66
+ FunctionCounts(cast(Tuple[FunctionCount, ...], data), self.inclusive, truncate_rows=False)
67
+ if isinstance(data, tuple) else data
68
+ )
69
+
70
+ def __repr__(self) -> str:
71
+ count_len = 0
72
+ for c, _ in self:
73
+ # Account for sign in string length.
74
+ count_len = max(count_len, len(str(c)) + int(c < 0))
75
+
76
+ lines = []
77
+ linewidth = self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth
78
+ fn_str_len = max(linewidth - count_len - 4, 40)
79
+ for c, fn in self:
80
+ if len(fn) > fn_str_len:
81
+ left_len = int((fn_str_len - 5) // 2)
82
+ fn = fn[:left_len] + " ... " + fn[-(fn_str_len - left_len - 5):]
83
+ lines.append(f" {c:>{count_len}} {fn}")
84
+
85
+ if self.truncate_rows and len(lines) > 18:
86
+ lines = lines[:9] + ["...".rjust(count_len + 2)] + lines[-9:]
87
+
88
+ if not self.inclusive:
89
+ lines.extend(["", f"Total: {self.sum()}"])
90
+
91
+ return "\n".join([super().__repr__()] + lines)
92
+
93
+ def __add__(
94
+ self,
95
+ other: "FunctionCounts",
96
+ ) -> "FunctionCounts":
97
+ return self._merge(other, lambda c: c)
98
+
99
+ def __sub__(
100
+ self,
101
+ other: "FunctionCounts",
102
+ ) -> "FunctionCounts":
103
+ return self._merge(other, lambda c: -c)
104
+
105
+ def __mul__(self, other: Union[int, float]) -> "FunctionCounts":
106
+ return self._from_dict({
107
+ fn: int(c * other) for c, fn in self._data
108
+ }, self.inclusive)
109
+
110
+ def transform(self, map_fn: Callable[[str], str]) -> "FunctionCounts":
111
+ """Apply `map_fn` to all of the function names.
112
+
113
+ This can be used to regularize function names (e.g. stripping irrelevant
114
+ parts of the file path), coalesce entries by mapping multiple functions
115
+ to the same name (in which case the counts are added together), etc.
116
+ """
117
+ counts: DefaultDict[str, int] = collections.defaultdict(int)
118
+ for c, fn in self._data:
119
+ counts[map_fn(fn)] += c
120
+
121
+ return self._from_dict(counts, self.inclusive)
122
+
123
+ def filter(self, filter_fn: Callable[[str], bool]) -> "FunctionCounts":
124
+ """Keep only the elements where `filter_fn` applied to function name returns True."""
125
+ return FunctionCounts(tuple(i for i in self if filter_fn(i.function)), self.inclusive)
126
+
127
+ def sum(self) -> int:
128
+ return sum(c for c, _ in self)
129
+
130
+ def denoise(self) -> "FunctionCounts":
131
+ """Remove known noisy instructions.
132
+
133
+ Several instructions in the CPython interpreter are rather noisy. These
134
+ instructions involve unicode to dictionary lookups which Python uses to
135
+ map variable names. FunctionCounts is generally a content agnostic
136
+ container, however this is sufficiently important for obtaining
137
+ reliable results to warrant an exception."""
138
+ return self.filter(lambda fn: "dictobject.c:lookdict_unicode" not in fn)
139
+
140
+ def _merge(
141
+ self,
142
+ second: "FunctionCounts",
143
+ merge_fn: Callable[[int], int]
144
+ ) -> "FunctionCounts":
145
+ assert self.inclusive == second.inclusive, "Cannot merge inclusive and exclusive counts."
146
+ counts: DefaultDict[str, int] = collections.defaultdict(int)
147
+ for c, fn in self:
148
+ counts[fn] += c
149
+
150
+ for c, fn in second:
151
+ counts[fn] += merge_fn(c)
152
+
153
+ return self._from_dict(counts, self.inclusive)
154
+
155
+ @staticmethod
156
+ def _from_dict(counts: Dict[str, int], inclusive: bool) -> "FunctionCounts":
157
+ flat_counts = (FunctionCount(c, fn) for fn, c in counts.items() if c)
158
+ return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive)
159
+
160
+
161
+ @dataclasses.dataclass(repr=False, eq=False, frozen=True)
162
+ class CallgrindStats:
163
+ """Top level container for Callgrind results collected by Timer.
164
+
165
+ Manipulation is generally done using the FunctionCounts class, which is
166
+ obtained by calling `CallgrindStats.stats(...)`. Several convenience
167
+ methods are provided as well; the most significant is
168
+ `CallgrindStats.as_standardized()`.
169
+ """
170
+ task_spec: common.TaskSpec
171
+ number_per_run: int
172
+ built_with_debug_symbols: bool
173
+ baseline_inclusive_stats: FunctionCounts
174
+ baseline_exclusive_stats: FunctionCounts
175
+ stmt_inclusive_stats: FunctionCounts
176
+ stmt_exclusive_stats: FunctionCounts
177
+ stmt_callgrind_out: Optional[str]
178
+
179
+ def __repr__(self) -> str:
180
+ newline = "\n" # `\` cannot appear in fstring code section.
181
+ base_stats = self.baseline_exclusive_stats
182
+ output = f"""
183
+ {super().__repr__()}
184
+ {self.task_spec.summarize()}
185
+ {'':>25}All{'':>10}Noisy symbols removed
186
+ Instructions: {self.counts(denoise=False):>12}{'':>15}{self.counts(denoise=True):>12}
187
+ Baseline: {base_stats.sum():>12}{'':>15}{base_stats.denoise().sum():>12}
188
+ {self.number_per_run} runs per measurement, {self.task_spec.num_threads} thread{'s' if self.task_spec.num_threads > 1 else ''}
189
+ """.strip()
190
+ if not self.built_with_debug_symbols:
191
+ output += textwrap.dedent("""
192
+ Warning: PyTorch was not built with debug symbols.
193
+ Source information may be limited. Rebuild with
194
+ REL_WITH_DEB_INFO=1 for more detailed results.""")
195
+ return output
196
+
197
+ def stats(self, inclusive: bool = False) -> FunctionCounts:
198
+ """Returns detailed function counts.
199
+
200
+ Conceptually, the FunctionCounts returned can be thought of as a tuple
201
+ of (count, path_and_function_name) tuples.
202
+
203
+ `inclusive` matches the semantics of callgrind. If True, the counts
204
+ include instructions executed by children. `inclusive=True` is useful
205
+ for identifying hot spots in code; `inclusive=False` is useful for
206
+ reducing noise when diffing counts from two different runs. (See
207
+ CallgrindStats.delta(...) for more details)
208
+ """
209
+ return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats
210
+
211
+ def counts(self, *, denoise: bool = False) -> int:
212
+ """Returns the total number of instructions executed.
213
+
214
+ See `FunctionCounts.denoise()` for an explanation of the `denoise` arg.
215
+ """
216
+ stats = self.stmt_exclusive_stats
217
+ return (stats.denoise() if denoise else stats).sum()
218
+
219
+ # FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563
220
+ def delta(
221
+ self,
222
+ other: "CallgrindStats",
223
+ inclusive: bool = False,
224
+ ) -> FunctionCounts:
225
+ """Diff two sets of counts.
226
+
227
+ One common reason to collect instruction counts is to determine the
228
+ the effect that a particular change will have on the number of instructions
229
+ needed to perform some unit of work. If a change increases that number, the
230
+ next logical question is "why". This generally involves looking at what part
231
+ if the code increased in instruction count. This function automates that
232
+ process so that one can easily diff counts on both an inclusive and
233
+ exclusive basis.
234
+ """
235
+ return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive)
236
+
237
+ def as_standardized(self) -> "CallgrindStats":
238
+ """Strip library names and some prefixes from function strings.
239
+
240
+ When comparing two different sets of instruction counts, on stumbling
241
+ block can be path prefixes. Callgrind includes the full filepath
242
+ when reporting a function (as it should). However, this can cause
243
+ issues when diffing profiles. If a key component such as Python
244
+ or PyTorch was built in separate locations in the two profiles, which
245
+ can result in something resembling::
246
+
247
+ 23234231 /tmp/first_build_dir/thing.c:foo(...)
248
+ 9823794 /tmp/first_build_dir/thing.c:bar(...)
249
+ ...
250
+ 53453 .../aten/src/Aten/...:function_that_actually_changed(...)
251
+ ...
252
+ -9823794 /tmp/second_build_dir/thing.c:bar(...)
253
+ -23234231 /tmp/second_build_dir/thing.c:foo(...)
254
+
255
+ Stripping prefixes can ameliorate this issue by regularizing the
256
+ strings and causing better cancellation of equivalent call sites
257
+ when diffing.
258
+ """
259
+ def strip(stats: FunctionCounts) -> FunctionCounts:
260
+ transforms = (
261
+ # PyTorch may have been built in different locations.
262
+ (r"^.+build/\.\./", "build/../"),
263
+ (r"^.+/" + re.escape("build/aten/"), "build/aten/"),
264
+
265
+ # "Python" and "Objects" come from CPython.
266
+ (r"^.+/" + re.escape("Python/"), "Python/"),
267
+ (r"^.+/" + re.escape("Objects/"), "Objects/"),
268
+
269
+ # Strip library name. e.g. `libtorch.so`
270
+ (r"\s\[.+\]$", ""),
271
+ )
272
+
273
+ for before, after in transforms:
274
+ stats = stats.transform(lambda fn: re.sub(before, after, fn))
275
+
276
+ return stats
277
+
278
+ return CallgrindStats(
279
+ task_spec=self.task_spec,
280
+ number_per_run=self.number_per_run,
281
+ built_with_debug_symbols=self.built_with_debug_symbols,
282
+ baseline_inclusive_stats=strip(self.baseline_inclusive_stats),
283
+ baseline_exclusive_stats=strip(self.baseline_exclusive_stats),
284
+ stmt_inclusive_stats=strip(self.stmt_inclusive_stats),
285
+ stmt_exclusive_stats=strip(self.stmt_exclusive_stats),
286
+
287
+ # `as_standardized` will change symbol names, so the contents will
288
+ # no longer map directly to `callgrind.out`
289
+ stmt_callgrind_out=None,
290
+ )
291
+
292
+
293
+ class Serialization(enum.Enum):
294
+ PICKLE = 0
295
+ TORCH = 1
296
+ TORCH_JIT = 2
297
+
298
+
299
+ _GLOBALS_ALLOWED_TYPES: Dict[Serialization, Tuple[Any, ...]] = {
300
+ Serialization.PICKLE: (str, bytes, bool, int, float, complex),
301
+ Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule),
302
+ Serialization.TORCH: (torch.nn.Module,),
303
+ }
304
+
305
+
306
+ class CopyIfCallgrind:
307
+ """Signal that a global may be replaced with a deserialized copy.
308
+
309
+ See `GlobalsBridge` for why this matters.
310
+ """
311
+ def __init__(self, value: Any, *, setup: Optional[str] = None):
312
+ for method, supported_types in _GLOBALS_ALLOWED_TYPES.items():
313
+ if any(isinstance(value, t) for t in supported_types):
314
+ self._value: Any = value
315
+ self._setup: Optional[str] = setup
316
+ self._serialization: Serialization = method
317
+ break
318
+ else:
319
+ supported_str = "\n".join([
320
+ getattr(t, "__name__", repr(t))
321
+ for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())])
322
+
323
+ raise ValueError(
324
+ f"Unsupported type: {type(value)}\n"
325
+ f"`collect_callgrind` restricts globals to the following types:\n"
326
+ f"{textwrap.indent(supported_str, ' ')}"
327
+ )
328
+
329
+ @property
330
+ def value(self) -> Any:
331
+ return self._value
332
+
333
+ @property
334
+ def setup(self) -> Optional[str]:
335
+ return self._setup
336
+
337
+ @property
338
+ def serialization(self) -> Serialization:
339
+ return self._serialization
340
+
341
+ @staticmethod
342
+ def unwrap_all(globals: Dict[str, Any]) -> Dict[str, Any]:
343
+ return {
344
+ k: (v.value if isinstance(v, CopyIfCallgrind) else v)
345
+ for k, v in globals.items()
346
+ }
347
+
348
+
349
+ class GlobalsBridge:
350
+ """Handle the transfer of (certain) globals when collecting Callgrind statistics.
351
+
352
+ Key takeaway: Any globals passed must be wrapped in `CopyIfCallgrind` to
353
+ work with `Timer.collect_callgrind`.
354
+
355
+ Consider the following code snippet:
356
+ ```
357
+ import pickle
358
+ import timeit
359
+
360
+ class Counter:
361
+ value = 0
362
+
363
+ def __call__(self):
364
+ self.value += 1
365
+
366
+ counter = Counter()
367
+ timeit.Timer("counter()", globals={"counter": counter}).timeit(10)
368
+ print(counter.value) # 10
369
+
370
+ timeit.Timer(
371
+ "counter()",
372
+ globals={"counter": pickle.loads(pickle.dumps(counter))}
373
+ ).timeit(20)
374
+ print(counter.value) # Still 10
375
+ ```
376
+
377
+ In the first case, `stmt` is executed using the objects in `globals`;
378
+ however, the addition of serialization and deserialization changes the
379
+ semantics and may meaningfully change behavior.
380
+
381
+ This is a practical consideration when collecting Callgrind statistics.
382
+ Unlike `exec` based execution (which `timeit` uses under the hood) which
383
+ can share in-memory data structures with the caller, Callgrind collection
384
+ requires an entirely new process in order to run under Valgrind. This means
385
+ that any data structures used for statement execution will have to be
386
+ serialized and deserialized in the subprocess.
387
+
388
+ In order to avoid surprising semantics from (user invisible) process
389
+ boundaries, what can be passed through `globals` is severely restricted
390
+ for `Timer.collect_callgrind`. It is expected that most setup should be
391
+ achievable (albeit perhaps less ergonomically) by passing a `setup`
392
+ string.
393
+
394
+ There are, however, exceptions. One such class are TorchScripted functions.
395
+ Because they require a concrete file with source code it is not possible
396
+ to define them using a `setup` string. Another group are torch.nn.Modules,
397
+ whose construction can be complex and prohibitively cumbersome to coerce
398
+ into a `setup` string. Finally, most builtin types are sufficiently well
399
+ behaved and sufficiently common to warrant allowing as well. (e.g.
400
+ `globals={"n": 1}` is very convenient.)
401
+
402
+ Fortunately, all have well defined serialization semantics. This class
403
+ is responsible for enabling the Valgrind subprocess to use elements in
404
+ `globals` so long as they are an allowed type.
405
+
406
+ Caveats:
407
+ The user is required to acknowledge this serialization by wrapping
408
+ elements in `globals` with `CopyIfCallgrind`.
409
+
410
+ While ScriptFunction and ScriptModule are expected to save and load
411
+ quite robustly, it is up to the user to ensure that an nn.Module can
412
+ un-pickle successfully.
413
+
414
+ `torch.Tensor` and `np.ndarray` are deliberately excluded. The
415
+ serialization/deserialization process perturbs the representation of a
416
+ tensor in ways that could result in incorrect measurements. For example,
417
+ if a tensor lives in pinned CPU memory, this fact would not be preserved
418
+ by a dump, and that will in turn change the performance of certain CUDA
419
+ operations.
420
+ """
421
+
422
+ def __init__(self, globals: Dict[str, Any], data_dir: str) -> None:
423
+ self._globals: Dict[str, CopyIfCallgrind] = {}
424
+ self._data_dir = data_dir
425
+ if not os.path.exists(data_dir):
426
+ os.mkdir(data_dir)
427
+
428
+ if globals.get("torch", torch) is not torch:
429
+ raise ValueError("`collect_callgrind` does not support mocking out `torch`.")
430
+
431
+ for name, value in globals.items():
432
+ if name in ("torch", "__builtins__"):
433
+ # Torch will be imported by the collection script, and
434
+ # __builtins__ is added by Timer.
435
+ continue
436
+
437
+ if not isinstance(value, CopyIfCallgrind):
438
+ raise ValueError(
439
+ "`collect_callgrind` requires that globals be wrapped in "
440
+ "`CopyIfCallgrind` so that serialization is explicit."
441
+ )
442
+
443
+ self._globals[name] = value
444
+
445
+ def construct(self) -> str:
446
+ load_lines = []
447
+ for name, wrapped_value in self._globals.items():
448
+ if wrapped_value.setup is not None:
449
+ load_lines.append(textwrap.dedent(wrapped_value.setup))
450
+
451
+ if wrapped_value.serialization == Serialization.PICKLE:
452
+ path = os.path.join(self._data_dir, f"{name}.pkl")
453
+ load_lines.append(
454
+ f"with open({repr(path)}, 'rb') as f:\n {name} = pickle.load(f)")
455
+ with open(path, "wb") as f:
456
+ pickle.dump(wrapped_value.value, f)
457
+
458
+ elif wrapped_value.serialization == Serialization.TORCH:
459
+ path = os.path.join(self._data_dir, f"{name}.pt")
460
+ load_lines.append(f"{name} = torch.load({repr(path)})")
461
+ torch.save(wrapped_value.value, path)
462
+
463
+ elif wrapped_value.serialization == Serialization.TORCH_JIT:
464
+ path = os.path.join(self._data_dir, f"{name}.pt")
465
+ load_lines.append(f"{name} = torch.jit.load({repr(path)})")
466
+ with open(path, "wb") as f:
467
+ torch.jit.save(wrapped_value.value, f)
468
+
469
+ else:
470
+ raise NotImplementedError(
471
+ f"Unknown serialization method: {wrapped_value.serialization}")
472
+
473
+ return "\n".join(load_lines)
474
+
475
+
476
+ class _ValgrindWrapper:
477
+ def __init__(self) -> None:
478
+ self._bindings_module: Optional[CallgrindModuleType] = None
479
+ valgrind_symbols = (
480
+ "_valgrind_supported_platform",
481
+ "_valgrind_toggle",
482
+ "_valgrind_toggle_and_dump_stats",
483
+ )
484
+ if all(hasattr(torch._C, symbol) for symbol in valgrind_symbols):
485
+ self._supported_platform: bool = torch._C._valgrind_supported_platform()
486
+
487
+ else:
488
+ print("Callgrind bindings are not present in `torch._C`. JIT-ing bindings.")
489
+ self._bindings_module = cpp_jit.get_compat_bindings()
490
+ assert all(hasattr(self._bindings_module, symbol) for symbol in valgrind_symbols)
491
+ self._supported_platform = self._bindings_module._valgrind_supported_platform()
492
+
493
+ self._commands_available: Dict[str, bool] = {}
494
+ if self._supported_platform:
495
+ # Only bother checking on supported platforms.
496
+ for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"):
497
+ self._commands_available[cmd] = not subprocess.run(
498
+ ["which", cmd],
499
+ capture_output=True,
500
+ check=False,
501
+ ).returncode
502
+
503
+ self._build_type: Optional[str] = None
504
+ build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show())
505
+ if build_search is not None:
506
+ self._build_type = build_search.groups()[0].split(",")[0]
507
+
508
+ def _validate(self) -> None:
509
+ if not self._supported_platform:
510
+ raise OSError("Valgrind is not supported on this platform.")
511
+
512
+ missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available]
513
+ if missing_cmds:
514
+ raise OSError("Missing: " + ", ".join(missing_cmds))
515
+
516
+ def collect_callgrind(
517
+ self,
518
+ task_spec: common.TaskSpec,
519
+ globals: Dict[str, Any],
520
+ *,
521
+ number: int,
522
+ repeats: int,
523
+ collect_baseline: bool,
524
+ is_python: bool,
525
+ retain_out_file: bool,
526
+ ) -> Tuple[CallgrindStats, ...]:
527
+ """Collect stats, and attach a reference run which can be used to filter interpreter overhead."""
528
+ self._validate()
529
+ assert is_python or not collect_baseline
530
+
531
+ *task_stats, baseline_stats = self._invoke(
532
+ task_spec=task_spec,
533
+ globals=globals,
534
+ number=number,
535
+ repeats=repeats,
536
+ collect_baseline=collect_baseline,
537
+ is_python=is_python,
538
+ retain_out_file=retain_out_file,
539
+ )
540
+ assert len(task_stats) == repeats
541
+
542
+ return tuple(
543
+ CallgrindStats(
544
+ task_spec=task_spec,
545
+ number_per_run=number,
546
+ built_with_debug_symbols=self._build_type == "RelWithDebInfo",
547
+ baseline_inclusive_stats=baseline_stats[0],
548
+ baseline_exclusive_stats=baseline_stats[1],
549
+ stmt_inclusive_stats=stmt_inclusive_stats,
550
+ stmt_exclusive_stats=stmt_exclusive_stats,
551
+ stmt_callgrind_out=out_contents,
552
+ )
553
+ for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats
554
+ )
555
+
556
+ def _invoke(
557
+ self,
558
+ *,
559
+ task_spec: common.TaskSpec,
560
+ globals: Dict[str, Any],
561
+ number: int,
562
+ repeats: int,
563
+ collect_baseline: bool,
564
+ is_python: bool,
565
+ retain_out_file: bool,
566
+ ) -> Tuple[Tuple[FunctionCounts, FunctionCounts, Optional[str]], ...]:
567
+ """Core invocation method for Callgrind collection.
568
+
569
+ Valgrind operates by effectively replacing the CPU with an emulated
570
+ version which allows it to instrument any code at the cost of severe
571
+ performance degradation. This has the practical effect that in order
572
+ to collect Callgrind statistics, a new process has to be created
573
+ running under `valgrind`. The steps for this process are:
574
+
575
+ 1) Create a scratch directory.
576
+ 2) Codegen a run script. (_ValgrindWrapper._construct_script)
577
+ Inside the run script:
578
+ * Validate that Python and torch match the parent process
579
+ * Validate that it is indeed running under valgrind
580
+ * Execute `setup` and warm up `stmt`
581
+ * Begin collecting stats
582
+ * Run the `stmt` loop
583
+ * Stop collecting stats
584
+ 3) Parse the run results.
585
+ 4) Cleanup the scratch directory.
586
+ """
587
+ working_dir = common._make_temp_dir(prefix="callgrind")
588
+ data_dir = os.path.join(working_dir, "data")
589
+ script_file = os.path.join(working_dir, "timer_callgrind.py")
590
+ callgrind_out = os.path.join(working_dir, "callgrind.out")
591
+ error_log = os.path.join(working_dir, "error.txt")
592
+ stat_log = os.path.join(working_dir, "callgrind_stat.txt")
593
+ stdout_stderr_log = os.path.join(working_dir, "stdout_stderr.log")
594
+
595
+ def run(args: List[str], **kwargs: Any) -> Tuple[CompletedProcessType, str]:
596
+ # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
597
+ f_stdout_stderr = open(stdout_stderr_log, "wb")
598
+ try:
599
+ invocation = subprocess.run(
600
+ args,
601
+ stdout=f_stdout_stderr,
602
+ stderr=subprocess.STDOUT,
603
+ **kwargs,
604
+ )
605
+ with open(stdout_stderr_log) as f:
606
+ return invocation, f.read()
607
+ finally:
608
+ f_stdout_stderr.close()
609
+
610
+ try:
611
+ if is_python:
612
+ if self._bindings_module is not None:
613
+ shutil.copy(
614
+ self._bindings_module.__file__,
615
+ os.path.join(working_dir, os.path.split(self._bindings_module.__file__)[1])
616
+ )
617
+
618
+ script_file = os.path.join(working_dir, "timer_callgrind.py")
619
+ with open(script_file, "w") as f:
620
+ f.write(self._construct_script(
621
+ task_spec,
622
+ globals=GlobalsBridge(globals, data_dir),
623
+ number=number,
624
+ repeats=repeats,
625
+ collect_baseline=collect_baseline,
626
+ error_log=error_log,
627
+ stat_log=stat_log,
628
+ bindings=self._bindings_module))
629
+
630
+ run_loop_cmd = ["python", script_file]
631
+ else:
632
+ assert not collect_baseline
633
+ run_loop_exec = cpp_jit.compile_callgrind_template(
634
+ stmt=task_spec.stmt,
635
+ setup=task_spec.setup,
636
+ global_setup=task_spec.global_setup,
637
+ )
638
+ run_loop_cmd = [
639
+ run_loop_exec,
640
+ "--number", str(number),
641
+ "--number-warmup", str(min(number, 10)),
642
+ "--repeats", str(repeats),
643
+ "--number-threads", str(task_spec.num_threads),
644
+ ]
645
+
646
+ valgrind_invocation, valgrind_invocation_output = run([
647
+ "valgrind",
648
+ "--tool=callgrind",
649
+ f"--callgrind-out-file={callgrind_out}",
650
+ "--dump-line=yes",
651
+ "--dump-instr=yes",
652
+ "--instr-atstart=yes",
653
+ "--collect-atstart=no",
654
+ ] + run_loop_cmd)
655
+
656
+ if valgrind_invocation.returncode:
657
+ error_report = ""
658
+ if os.path.exists(error_log):
659
+ with open(error_log) as f:
660
+ error_report = f.read()
661
+ if not error_report:
662
+ error_report = "Unknown error.\n" + valgrind_invocation_output
663
+
664
+ raise OSError(f"Failed to collect callgrind profile:\n{error_report}")
665
+
666
+ def parse_output(fpath: str, inclusive: bool) -> FunctionCounts:
667
+ annotate_invocation, annotate_invocation_output = run([
668
+ "callgrind_annotate",
669
+ f"--inclusive={'yes' if inclusive else 'no'}",
670
+ "--threshold=100",
671
+ "--show-percs=no",
672
+ fpath
673
+ ], check=True)
674
+
675
+ total_pattern = re.compile(r"^([0-9,]+)\s+PROGRAM TOTALS")
676
+ begin_pattern = re.compile(r"Ir\s+file:function")
677
+ function_pattern = re.compile(r"^\s*([0-9,]+)\s+(.+:.+)$")
678
+
679
+ class ScanState(enum.Enum):
680
+ SCANNING_FOR_TOTAL = 0
681
+ SCANNING_FOR_START = 1
682
+ PARSING = 2
683
+
684
+ scan_state = ScanState.SCANNING_FOR_TOTAL
685
+ fn_counts = []
686
+ for l in annotate_invocation_output.splitlines(keepends=False):
687
+ if scan_state == ScanState.SCANNING_FOR_TOTAL:
688
+ total_match = total_pattern.match(l)
689
+ if total_match:
690
+ program_totals = int(total_match.groups()[0].replace(",", ""))
691
+ scan_state = ScanState.SCANNING_FOR_START
692
+
693
+ elif scan_state == ScanState.SCANNING_FOR_START:
694
+ if begin_pattern.match(l):
695
+ scan_state = ScanState.PARSING
696
+
697
+ else:
698
+ assert scan_state == ScanState.PARSING
699
+ fn_match = function_pattern.match(l)
700
+ if fn_match:
701
+ ir_str, file_function = fn_match.groups()
702
+ ir = int(ir_str.replace(",", ""))
703
+ if ir == program_totals:
704
+ # Callgrind includes some top level red herring symbols when
705
+ # a program dumps multiple profiles.
706
+ continue
707
+ fn_counts.append(FunctionCount(ir, file_function))
708
+
709
+ elif re.match(r"-+", l):
710
+ # Ignore heading separator lines.
711
+ continue
712
+
713
+ else:
714
+ break
715
+
716
+ assert scan_state == ScanState.PARSING, f"Failed to parse {fpath}"
717
+ return FunctionCounts(tuple(sorted(fn_counts, reverse=True)), inclusive=inclusive)
718
+
719
+ def read_results(i: int) -> Tuple[FunctionCounts, FunctionCounts, Optional[str]]:
720
+ if i == repeats and not collect_baseline:
721
+ # Null baseline.
722
+ return (
723
+ FunctionCounts((), inclusive=True),
724
+ FunctionCounts((), inclusive=False),
725
+ None,
726
+ )
727
+
728
+ fpath = f"{callgrind_out}.{i + 1}" # Callgrind one-indexes files.
729
+ callgrind_out_contents: Optional[str] = None
730
+ if retain_out_file:
731
+ with open(fpath) as f:
732
+ callgrind_out_contents = f.read()
733
+
734
+ return (
735
+ parse_output(fpath, inclusive=True),
736
+ parse_output(fpath, inclusive=False),
737
+ callgrind_out_contents
738
+ )
739
+
740
+ return tuple(read_results(i) for i in range(repeats + 1))
741
+ finally:
742
+ shutil.rmtree(working_dir)
743
+
744
+ @staticmethod
745
+ def _construct_script(
746
+ task_spec: common.TaskSpec,
747
+ globals: GlobalsBridge,
748
+ *,
749
+ number: int,
750
+ repeats: int,
751
+ collect_baseline: bool,
752
+ error_log: str,
753
+ stat_log: str,
754
+ bindings: Optional[CallgrindModuleType],
755
+ ) -> str:
756
+ def block_stmt(stmt: str, indent: int = 0) -> str:
757
+ """Partially unroll benchmark loop.
758
+
759
+ The naive template looks something like:
760
+ "for _ in range({number}): {stmt}"
761
+
762
+ However a loop in Python is surprisingly expensive, and significantly
763
+ increases the number of background Python instructions. So instead we
764
+ partially unroll the loops, with a block size of 100 chosen to keep
765
+ the instruction overhead from `range` low while also not ballooning
766
+ the size of the generated file.
767
+ """
768
+ block_size = 100
769
+ loop_count = number // block_size
770
+ if loop_count == 1:
771
+ # There is no point in having `for _ in range(1): ...` rather
772
+ # than just `...`, and this lets us save shave a few background
773
+ # instructions.
774
+ loop_count = 0
775
+ remainder = number - block_size * loop_count
776
+ blocked_stmt = ""
777
+
778
+ if loop_count:
779
+ unrolled_stmts = textwrap.indent("\n".join([stmt] * block_size), " " * 4)
780
+ blocked_stmt += f"for _ in range({loop_count}):\n{unrolled_stmts}\n"
781
+
782
+ if remainder:
783
+ blocked_stmt += "\n".join([stmt] * remainder)
784
+
785
+ return textwrap.indent(blocked_stmt, " " * indent)
786
+
787
+ pass_baseline = (
788
+ "callgrind_bindings._valgrind_toggle()\n"
789
+ f"{block_stmt('pass')}\n"
790
+ "callgrind_bindings._valgrind_toggle_and_dump_stats()"
791
+ )
792
+
793
+ return textwrap.dedent(r"""
794
+ import gc
795
+ import os
796
+ import pickle
797
+ import subprocess
798
+ import sys
799
+ import time
800
+
801
+ # Mitigate https://github.com/pytorch/pytorch/issues/37377
802
+ # which can sometimes cause the subprocess call to fail.
803
+ import numpy as np
804
+
805
+ import torch
806
+ torch.set_num_threads({num_threads})
807
+
808
+ {bindings_import}
809
+
810
+ PID = os.getpid()
811
+
812
+ def log_failure(msg):
813
+ with open({error_log_repr}, "wt") as f:
814
+ f.write(msg)
815
+ sys.exit(1)
816
+
817
+ def check_result(completed_process):
818
+ if completed_process.returncode:
819
+ log_failure(f"Command failed: {{' '.join(completed_process.args)}}")
820
+ return completed_process
821
+
822
+ # =============================================================================
823
+ # == Check that subprocess matches parent =====================================
824
+ # =============================================================================
825
+ if os.path.realpath(sys.executable) != "{parent_interpreter}":
826
+ log_failure(
827
+ "Interpreter mismatch:\n"
828
+ f" {{os.path.realpath(sys.executable)}}\n vs.\n {parent_interpreter}"
829
+ )
830
+
831
+ if torch.__file__ != "{torch_file}":
832
+ log_failure(
833
+ "PyTorch does not match expected file:\n"
834
+ f" {{torch.__file__}}\n vs.\n {torch_file}"
835
+ )
836
+
837
+ # =============================================================================
838
+ # == User specified setup =====================================================
839
+ # =============================================================================
840
+ # Load serialized globals
841
+ {load_globals}
842
+
843
+ # User setup str
844
+ {setup}
845
+
846
+ for _ in range({warmup_number}):
847
+ {indented_stmt}
848
+
849
+ # =============================================================================
850
+ # == Callgrind management =====================================================
851
+ # =============================================================================
852
+ with open("{stat_log}", "wb") as stat_file:
853
+ # If many instances of callgrind are running at once, the output of
854
+ # `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE`
855
+ # to deadlock. So instead we use a file.
856
+ callgrind_stat = check_result(subprocess.run(
857
+ ["callgrind_control", "--stat"],
858
+ stdout=stat_file,
859
+ stderr=subprocess.STDOUT,
860
+ ))
861
+
862
+ with open("{stat_log}", "rt") as stat_file:
863
+ stat_lines = stat_file.read().splitlines()
864
+
865
+ if f"PID {{PID}}: python {{__file__}}" not in stat_lines:
866
+ log_failure("Process does not appear to be running callgrind.")
867
+
868
+ gc.collect()
869
+ time.sleep(0.01)
870
+
871
+ # =============================================================================
872
+ # == User code block ==========================================================
873
+ # =============================================================================
874
+ for _ in range({repeats}):
875
+ callgrind_bindings._valgrind_toggle()
876
+ {blocked_stmt}
877
+ callgrind_bindings._valgrind_toggle_and_dump_stats()
878
+ gc.collect()
879
+
880
+ {baseline}
881
+ """).strip().format(
882
+ indented_stmt=textwrap.indent(task_spec.stmt, " " * 4),
883
+ blocked_stmt=block_stmt(task_spec.stmt, indent=4),
884
+ baseline=(pass_baseline if collect_baseline else ""),
885
+ number=number,
886
+ repeats=repeats,
887
+ load_globals=globals.construct(),
888
+ setup=task_spec.setup,
889
+ warmup_number=min(number, 10),
890
+ num_threads=task_spec.num_threads,
891
+ error_log_repr=repr(error_log),
892
+ stat_log=stat_log,
893
+ parent_interpreter=os.path.realpath(sys.executable),
894
+ torch_file=torch.__file__,
895
+ bindings_import=(
896
+ "import torch._C as callgrind_bindings" if bindings is None
897
+ else f"import {bindings.__name__} as callgrind_bindings"),
898
+ )
899
+
900
+
901
+ CALLGRIND_SINGLETON: Optional[_ValgrindWrapper] = None
902
+ def wrapper_singleton() -> _ValgrindWrapper:
903
+ global CALLGRIND_SINGLETON
904
+ if CALLGRIND_SINGLETON is None:
905
+ CALLGRIND_SINGLETON = _ValgrindWrapper()
906
+ return CALLGRIND_SINGLETON
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .version import __version__
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (223 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/constants.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Constants for annotations in the mapping.
2
+
3
+ The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py.
4
+ They are based on
5
+ https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h
6
+ and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported
7
+ mapping.
8
+ """
9
+
10
+ CONV_VERSION = 0,
11
+ CONV_INIT = 1
12
+ CONV_DEVICE = 2
13
+ CONV_MEM = 3
14
+ CONV_KERN = 4
15
+ CONV_COORD_FUNC = 5
16
+ CONV_MATH_FUNC = 6
17
+ CONV_DEVICE_FUNC = 7
18
+ CONV_SPECIAL_FUNC = 8
19
+ CONV_STREAM = 9
20
+ CONV_EVENT = 10
21
+ CONV_OCCUPANCY = 11
22
+ CONV_CONTEXT = 12
23
+ CONV_PEER = 13
24
+ CONV_MODULE = 14
25
+ CONV_CACHE = 15
26
+ CONV_EXEC = 16
27
+ CONV_ERROR = 17
28
+ CONV_DEF = 18
29
+ CONV_TEX = 19
30
+ CONV_GL = 20
31
+ CONV_GRAPHICS = 21
32
+ CONV_SURFACE = 22
33
+ CONV_JIT = 23
34
+ CONV_D3D9 = 24
35
+ CONV_D3D10 = 25
36
+ CONV_D3D11 = 26
37
+ CONV_VDPAU = 27
38
+ CONV_EGL = 28
39
+ CONV_THREAD = 29
40
+ CONV_OTHER = 30
41
+ CONV_INCLUDE = 31
42
+ CONV_INCLUDE_CUDA_MAIN_H = 32
43
+ CONV_TYPE = 33
44
+ CONV_LITERAL = 34
45
+ CONV_NUMERIC_LITERAL = 35
46
+ CONV_LAST = 36
47
+
48
+ API_DRIVER = 37
49
+ API_RUNTIME = 38
50
+ API_BLAS = 39
51
+ API_SPECIAL = 40
52
+ API_RAND = 41
53
+ API_LAST = 42
54
+ API_FFT = 43
55
+ API_RTC = 44
56
+ API_ROCTX = 45
57
+
58
+ HIP_UNSUPPORTED = 46
59
+ API_PYTORCH = 1337
60
+ API_CAFFE2 = 1338
61
+ API_C10 = 1339
62
+ API_ROCMSMI = 1340
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py ADDED
@@ -0,0 +1,1129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """ The Python Hipify script.
3
+ ##
4
+ # Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
5
+ # 2017-2018 Advanced Micro Devices, Inc. and
6
+ # Facebook Inc. All rights reserved.
7
+ #
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to the following conditions:
14
+ #
15
+ # The above copyright notice and this permission notice shall be included in
16
+ # all copies or substantial portions of the Software.
17
+ #
18
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
+ # THE SOFTWARE.
25
+ """
26
+ import argparse
27
+ import fnmatch
28
+ import re
29
+ import shutil
30
+ import sys
31
+ import os
32
+
33
+ from . import constants
34
+ from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
35
+ from .cuda_to_hip_mappings import MATH_TRANSPILATIONS
36
+
37
+ from typing import Dict, List, Iterator, Optional
38
+ from collections.abc import Mapping, Iterable
39
+ from enum import Enum
40
+
41
+ class CurrentState(Enum):
42
+ INITIALIZED = 1
43
+ DONE = 2
44
+
45
+ class HipifyResult:
46
+ def __init__(self, current_state, hipified_path):
47
+ self.current_state = current_state
48
+ self.hipified_path = hipified_path
49
+ self.status = ""
50
+
51
+ def __str__(self):
52
+ return ("HipifyResult:: current_state: {}, hipified_path : {}, status: {}".format(self.current_state,
53
+ self.hipified_path, self.status))
54
+
55
+ HipifyFinalResult = Dict[str, HipifyResult]
56
+ HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n"
57
+ HIPIFY_FINAL_RESULT: HipifyFinalResult = {}
58
+
59
+ # Hardcode the PyTorch template map
60
+ """This dictionary provides the mapping from PyTorch kernel template types
61
+ to their actual types."""
62
+ PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"}
63
+
64
+ __all__ = ['InputError', 'openf', 'bcolors', 'GeneratedFileCleaner', 'match_extensions', 'matched_files_iter',
65
+ 'preprocess_file_and_save_result', 'compute_stats', 'add_dim3', 'processKernelLaunches', 'find_closure_group',
66
+ 'find_bracket_group', 'find_parentheses_group', 'replace_math_functions', 'hip_header_magic', 'replace_extern_shared',
67
+ 'get_hip_file_path', 'is_out_of_place', 'is_pytorch_file', 'is_cusparse_file', 'is_special_file', 'is_caffe2_gpu_file',
68
+ 'is_caffe2_gpu_file', 'Trie', 'preprocessor', 'file_specific_replacement', 'file_add_header',
69
+ 'fix_static_global_kernels', 'extract_arguments', 'str2bool', 'CurrentState', 'HipifyResult', 'hipify']
70
+
71
+
72
+ class InputError(Exception):
73
+ # Exception raised for errors in the input.
74
+
75
+ def __init__(self, message):
76
+ super().__init__(message)
77
+ self.message = message
78
+
79
+ def __str__(self):
80
+ return f"Input error: {self.message}"
81
+
82
+
83
+ def openf(filename, mode):
84
+ return open(filename, mode, errors='ignore')
85
+
86
+
87
+ # Color coding for printing
88
+ class bcolors:
89
+ HEADER = '\033[95m'
90
+ OKBLUE = '\033[94m'
91
+ OKGREEN = '\033[92m'
92
+ WARNING = '\033[93m'
93
+ FAIL = '\033[91m'
94
+ ENDC = '\033[0m'
95
+ BOLD = '\033[1m'
96
+ UNDERLINE = '\033[4m'
97
+
98
+
99
+ # To the programmer, the output of hipify most likely are intermediates.
100
+ # This class allows users of hipify to ask for a cleanup by running the
101
+ # hipify and compilation in a with instantiating this context manager class
102
+ # with keep_intermediates=False.
103
+ # The main usecase is the cpp_extensions, specifically the load method.
104
+ # It is a good idea to keep intermediates (in case of errors or to
105
+ # not recompile unchanged files), but in cases where you don't want to
106
+ # keep them (e.g. in the CI), this can be used to remove files.
107
+ class GeneratedFileCleaner:
108
+ """Context Manager to clean up generated files"""
109
+ def __init__(self, keep_intermediates=False):
110
+ self.keep_intermediates = keep_intermediates
111
+ self.files_to_clean = set()
112
+ self.dirs_to_clean = []
113
+
114
+ def __enter__(self):
115
+ return self
116
+
117
+ def open(self, fn, *args, **kwargs):
118
+ if not os.path.exists(fn):
119
+ self.files_to_clean.add(os.path.abspath(fn))
120
+ return open(fn, *args, **kwargs)
121
+
122
+ def makedirs(self, dn, exist_ok=False):
123
+ parent, n = os.path.split(dn)
124
+ if not n:
125
+ parent, n = os.path.split(parent)
126
+ if parent and n and not os.path.exists(parent):
127
+ self.makedirs(parent, exist_ok=True)
128
+ if not os.path.isdir(dn) or not exist_ok:
129
+ os.mkdir(dn)
130
+ self.dirs_to_clean.append(os.path.abspath(dn))
131
+
132
+ def __exit__(self, type, value, traceback):
133
+ if not self.keep_intermediates:
134
+ for f in self.files_to_clean:
135
+ os.unlink(f)
136
+ for d in self.dirs_to_clean[::-1]:
137
+ os.rmdir(d)
138
+
139
+
140
+ def match_extensions(filename: str, extensions: Iterable) -> bool:
141
+ """Helper method to see if filename ends with certain extension"""
142
+ return any(filename.endswith(e) for e in extensions)
143
+
144
+
145
+ def _fnmatch(filepath, patterns):
146
+ return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns)
147
+
148
+
149
+ def matched_files_iter(
150
+ root_path: str,
151
+ includes: Iterable = (),
152
+ ignores: Iterable = (),
153
+ extensions: Iterable = (),
154
+ out_of_place_only: bool = False,
155
+ is_pytorch_extension: bool = False) -> Iterator[str]:
156
+
157
+ exact_matches = set(includes)
158
+
159
+ # This is a very rough heuristic; really, we want to avoid scanning
160
+ # any file which is not checked into source control, but this script
161
+ # needs to work even if you're in a Git or Hg checkout, so easier to
162
+ # just block the biggest time sinks that won't matter in the
163
+ # end.
164
+ for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True):
165
+ rel_dirpath = os.path.relpath(abs_dirpath, root_path)
166
+ if rel_dirpath == '.':
167
+ # Blah blah blah O(n) blah blah
168
+ if ".git" in dirs:
169
+ dirs.remove(".git")
170
+ if "build" in dirs:
171
+ dirs.remove("build")
172
+ if "third_party" in dirs:
173
+ dirs.remove("third_party")
174
+ dirs.append("third_party/nvfuser")
175
+ for filename in filenames:
176
+ filepath = os.path.join(abs_dirpath, filename)
177
+ rel_filepath = os.path.join(rel_dirpath, filename)
178
+ # We respect extensions, UNLESS you wrote the entire
179
+ # filename verbatim, in which case we always accept it
180
+ if (
181
+ _fnmatch(filepath, includes)
182
+ and (not _fnmatch(filepath, ignores))
183
+ and (match_extensions(filepath, extensions) or filepath in exact_matches)
184
+ ):
185
+ if not is_pytorch_extension: # for pytorch extensions, consider all files
186
+ if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath):
187
+ continue
188
+ if out_of_place_only and not is_out_of_place(rel_filepath):
189
+ continue
190
+ yield filepath
191
+
192
+
193
+ def preprocess_file_and_save_result(
194
+ output_directory: str,
195
+ filepath: str,
196
+ all_files: Iterable,
197
+ header_include_dirs: Iterable,
198
+ stats: Dict[str, List],
199
+ hip_clang_launch: bool,
200
+ is_pytorch_extension: bool,
201
+ clean_ctx: GeneratedFileCleaner,
202
+ show_progress: bool) -> None:
203
+ fin_path = os.path.abspath(os.path.join(output_directory, filepath))
204
+ hipify_result = HipifyResult(current_state=CurrentState.INITIALIZED, hipified_path=fin_path)
205
+ HIPIFY_FINAL_RESULT[fin_path] = hipify_result
206
+ result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats,
207
+ hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
208
+
209
+ # Show what happened
210
+ if show_progress and "ignored" not in result.status:
211
+ print(
212
+ fin_path, "->",
213
+ result.hipified_path, result.status, flush=True)
214
+
215
+ HIPIFY_FINAL_RESULT[fin_path] = result
216
+
217
+
218
+ def compute_stats(stats):
219
+ unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]}
220
+
221
+ # Print the number of unsupported calls
222
+ print(f"Total number of unsupported CUDA function calls: {len(unsupported_calls):d}")
223
+
224
+ # Print the list of unsupported calls
225
+ print(", ".join(unsupported_calls))
226
+
227
+ # Print the number of kernel launches
228
+ print(f"\nTotal number of replaced kernel launches: {len(stats['kernel_launches']):d}")
229
+
230
+
231
+ def add_dim3(kernel_string, cuda_kernel):
232
+ '''adds dim3() to the second and third arguments in the kernel launch'''
233
+ count = 0
234
+ closure = 0
235
+ kernel_string = kernel_string.replace("<<<", "").replace(">>>", "")
236
+ arg_locs: List[Dict[str, int]] = [{} for _ in range(2)]
237
+ arg_locs[count]['start'] = 0
238
+ for ind, c in enumerate(kernel_string):
239
+ if count > 1:
240
+ break
241
+ if c == "(":
242
+ closure += 1
243
+ elif c == ")":
244
+ closure -= 1
245
+ if (c == "," or ind == len(kernel_string) - 1) and closure == 0:
246
+ arg_locs[count]['end'] = ind + (c != ",")
247
+ count += 1
248
+ if count < 2:
249
+ arg_locs[count]['start'] = ind + 1
250
+
251
+ first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1]
252
+ second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']]
253
+
254
+ first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ")
255
+ second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ")
256
+
257
+ first_arg_dim3 = f"dim3({first_arg_clean})"
258
+ second_arg_dim3 = f"dim3({second_arg_clean})"
259
+
260
+ first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3)
261
+ second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3)
262
+ cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3)
263
+ return cuda_kernel
264
+
265
+
266
+ RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+')
267
+
268
+
269
+ def processKernelLaunches(string, stats):
270
+ """ Replace the CUDA style Kernel launches with the HIP style kernel launches."""
271
+ # Concat the namespace with the kernel names. (Find cleaner way of doing this later).
272
+ string = RE_KERNEL_LAUNCH.sub(lambda inp: f"{inp.group(1)}{inp.group(2)}::", string)
273
+
274
+ def grab_method_and_template(in_kernel):
275
+ # The positions for relevant kernel components.
276
+ pos = {
277
+ "kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]},
278
+ "kernel_name": {"start": -1, "end": -1},
279
+ "template": {"start": -1, "end": -1}
280
+ }
281
+
282
+ # Count for balancing template
283
+ count = {"<>": 0}
284
+
285
+ # Status for whether we are parsing a certain item.
286
+ START = 0
287
+ AT_TEMPLATE = 1
288
+ AFTER_TEMPLATE = 2
289
+ AT_KERNEL_NAME = 3
290
+
291
+ status = START
292
+
293
+ # Parse the string character by character
294
+ for i in range(pos["kernel_launch"]["start"] - 1, -1, -1):
295
+ char = string[i]
296
+
297
+ # Handle Templating Arguments
298
+ if status in (START, AT_TEMPLATE):
299
+ if char == ">":
300
+ if status == START:
301
+ status = AT_TEMPLATE
302
+ pos["template"]["end"] = i
303
+ count["<>"] += 1
304
+
305
+ if char == "<":
306
+ count["<>"] -= 1
307
+ if count["<>"] == 0 and (status == AT_TEMPLATE):
308
+ pos["template"]["start"] = i
309
+ status = AFTER_TEMPLATE
310
+
311
+ # Handle Kernel Name
312
+ if status != AT_TEMPLATE:
313
+ if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}:
314
+ if status != AT_KERNEL_NAME:
315
+ status = AT_KERNEL_NAME
316
+ pos["kernel_name"]["end"] = i
317
+
318
+ # Case: Kernel name starts the string.
319
+ if i == 0:
320
+ pos["kernel_name"]["start"] = 0
321
+
322
+ # Finished
323
+ return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
324
+
325
+ else:
326
+ # Potential ending point if we're already traversing a kernel's name.
327
+ if status == AT_KERNEL_NAME:
328
+ pos["kernel_name"]["start"] = i
329
+
330
+ # Finished
331
+ return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
332
+
333
+ def find_kernel_bounds(string):
334
+ """Finds the starting and ending points for all kernel launches in the string."""
335
+ kernel_end = 0
336
+ kernel_positions = []
337
+
338
+ # Continue until we cannot find any more kernels anymore.
339
+ while string.find("<<<", kernel_end) != -1:
340
+ # Get kernel starting position (starting from the previous ending point)
341
+ kernel_start = string.find("<<<", kernel_end)
342
+
343
+ # Get kernel ending position (adjust end point past the >>>)
344
+ kernel_end = string.find(">>>", kernel_start) + 3
345
+ if kernel_end <= 0:
346
+ raise InputError("no kernel end found")
347
+
348
+ # Add to list of traversed kernels
349
+ kernel_positions.append({"start": kernel_start, "end": kernel_end,
350
+ "group": string[kernel_start: kernel_end]})
351
+
352
+ return kernel_positions
353
+
354
+ # Replace comments and string literals from the code so that find_kernel_bounds does not
355
+ # wrongly capture kernels in comments and string literals.
356
+ # This function replaces them with "x" to keep positions.
357
+ def mask_comments(string):
358
+ in_comment = ''
359
+ prev_c = ''
360
+ new_string = ''
361
+ for c in string:
362
+ if in_comment == '':
363
+ # Outside comments
364
+ if c == '/' and prev_c == '/':
365
+ in_comment = '//'
366
+ elif c == '*' and prev_c == '/':
367
+ in_comment = '/*'
368
+ elif c == '"' and prev_c != '\\' and prev_c != "'":
369
+ in_comment = '"'
370
+ elif in_comment == '//':
371
+ # In // xxx
372
+ if c == '\r' or c == '\n':
373
+ in_comment = ''
374
+ elif in_comment == '/*':
375
+ # In /* xxx */
376
+ if c == '/' and prev_c == '*':
377
+ in_comment = ''
378
+ elif in_comment == '"':
379
+ # In ""
380
+ if c == '"' and prev_c != '\\':
381
+ in_comment = ''
382
+ prev_c = c
383
+ if in_comment == '':
384
+ new_string += c
385
+ else:
386
+ new_string += 'x'
387
+ return new_string
388
+
389
+ # Grab positional ranges of all kernel launches
390
+ get_kernel_positions = list(find_kernel_bounds(mask_comments(string)))
391
+ output_string = string
392
+
393
+ # Replace each CUDA kernel with a HIP kernel.
394
+ for kernel in get_kernel_positions:
395
+ # Get kernel components
396
+ params = grab_method_and_template(kernel)
397
+
398
+ # Find parenthesis after kernel launch
399
+ parenthesis = string.find("(", kernel["end"])
400
+
401
+ # Extract cuda kernel
402
+ cuda_kernel = string[params[0]["start"]:parenthesis + 1]
403
+ kernel_string = string[kernel['start']:kernel['end']]
404
+ end_param_index = 0 if params[1]['end'] == -1 else 1
405
+ kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1]
406
+ cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel)
407
+ # Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size)
408
+ num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")")))
409
+
410
+ hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace(
411
+ ">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace(
412
+ ">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")")
413
+
414
+ # Replace cuda kernel with hip kernel
415
+ output_string = output_string.replace(cuda_kernel, hip_kernel)
416
+
417
+ # Update the statistics
418
+ stats["kernel_launches"].append(hip_kernel)
419
+
420
+ return output_string
421
+
422
+
423
+ def find_closure_group(input_string, start, group):
424
+ """Generalization for finding a balancing closure group
425
+
426
+ if group = ["(", ")"], then finds the first balanced parentheses.
427
+ if group = ["{", "}"], then finds the first balanced bracket.
428
+
429
+ Given an input string, a starting position in the input string, and the group type,
430
+ find_closure_group returns the positions of group[0] and group[1] as a tuple.
431
+
432
+ Example:
433
+ >>> find_closure_group("(hi)", 0, ["(", ")"])
434
+ (0, 3)
435
+ """
436
+
437
+ inside_parenthesis = False
438
+ parens = 0
439
+ pos = start
440
+ p_start, p_end = -1, -1
441
+
442
+ while pos < len(input_string):
443
+ if input_string[pos] == group[0]:
444
+ if inside_parenthesis is False:
445
+ inside_parenthesis = True
446
+ parens = 1
447
+ p_start = pos
448
+ else:
449
+ parens += 1
450
+ elif input_string[pos] == group[1] and inside_parenthesis:
451
+ parens -= 1
452
+
453
+ if parens == 0:
454
+ p_end = pos
455
+ return p_start, p_end
456
+
457
+ pos += 1
458
+ return None, None
459
+
460
+
461
+ def find_bracket_group(input_string, start):
462
+ """Finds the first balanced parantheses."""
463
+ return find_closure_group(input_string, start, group=["{", "}"])
464
+
465
+
466
+ def find_parentheses_group(input_string, start):
467
+ """Finds the first balanced bracket."""
468
+ return find_closure_group(input_string, start, group=["(", ")"])
469
+
470
+
471
+ RE_ASSERT = re.compile(r"\bassert[ ]*\(")
472
+
473
+
474
+ def replace_math_functions(input_string):
475
+ """FIXME: Temporarily replace std:: invocations of math functions
476
+ with non-std:: versions to prevent linker errors NOTE: This
477
+ can lead to correctness issues when running tests, since the
478
+ correct version of the math function (exp/expf) might not get
479
+ called. Plan is to remove this function once HIP supports
480
+ std:: math function calls inside device code
481
+
482
+ """
483
+ output_string = input_string
484
+ for func in MATH_TRANSPILATIONS:
485
+ output_string = output_string.replace(fr'{func}(', f'{MATH_TRANSPILATIONS[func]}(')
486
+
487
+ return output_string
488
+
489
+
490
+ RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()")
491
+
492
+
493
+ def hip_header_magic(input_string):
494
+ """If the file makes kernel builtin calls and does not include the cuda_runtime.h header,
495
+ then automatically add an #include to match the "magic" includes provided by NVCC.
496
+ TODO:
497
+ Update logic to ignore cases where the cuda_runtime.h is included by another file.
498
+ """
499
+
500
+ # Copy the input.
501
+ output_string = input_string
502
+
503
+ # Check if one of the following headers is already included.
504
+ headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"]
505
+ if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers):
506
+ return output_string
507
+
508
+ # Rough logic to detect if we're inside device code
509
+ hasDeviceLogic: int
510
+ hasDeviceLogic = "hipLaunchKernelGGL" in output_string
511
+ hasDeviceLogic += "__global__" in output_string
512
+ hasDeviceLogic += "__shared__" in output_string
513
+ hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None
514
+
515
+ # If device logic found, provide the necessary header.
516
+ if hasDeviceLogic:
517
+ output_string = '#include "hip/hip_runtime.h"\n' + input_string
518
+
519
+ return output_string
520
+
521
+
522
+ RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;")
523
+
524
+
525
+ def replace_extern_shared(input_string):
526
+ """Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead.
527
+ https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__
528
+ Example:
529
+ "extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)"
530
+ "extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)"
531
+ """
532
+ output_string = input_string
533
+ output_string = RE_EXTERN_SHARED.sub(
534
+ lambda inp: f"HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})", output_string)
535
+
536
+ return output_string
537
+
538
+
539
+ def get_hip_file_path(rel_filepath, is_pytorch_extension=False):
540
+ """
541
+ Returns the new name of the hipified file
542
+ """
543
+ # At the moment, some PyTorch source files are HIPified in place. The predicate
544
+ # is_out_of_place tells us if this is the case or not.
545
+ assert not os.path.isabs(rel_filepath)
546
+ if not is_pytorch_extension and not is_out_of_place(rel_filepath):
547
+ return rel_filepath
548
+
549
+ dirpath, filename = os.path.split(rel_filepath)
550
+ root, ext = os.path.splitext(filename)
551
+
552
+ # Here's the plan:
553
+ #
554
+ # In general, we need to disambiguate the HIPified filename so that
555
+ # it gets a different name from the original filename, so
556
+ # that we don't overwrite the original file
557
+ #
558
+ # There's a lot of different naming conventions across PyTorch
559
+ # and Caffe2, but the general recipe is to convert occurrences
560
+ # of cuda/gpu to hip, and add hip if there are no occurrences
561
+ # of cuda/gpu anywhere.
562
+ #
563
+ # Concretely, we do the following:
564
+ #
565
+ # - If there is a directory component named "cuda", replace
566
+ # it with "hip", AND
567
+ #
568
+ # - If the file name contains "CUDA", replace it with "HIP", AND
569
+ #
570
+ # - ALWAYS replace '.cu' with '.hip', because those files
571
+ # contain CUDA kernels that needs to be hipified and processed with
572
+ # hip compiler
573
+ #
574
+ # - If we are not hipifying a PyTorch extension, and the parent
575
+ # directory name did not change as a result of the above
576
+ # transformations, insert "hip" in the file path
577
+ # as the direct parent folder of the file
578
+ #
579
+ # - If we are hipifying a PyTorch extension, and the parent directory
580
+ # name as well as the filename (incl. extension) did not change as
581
+ # a result of the above transformations, insert "_hip" in the filename
582
+ #
583
+ # This isn't set in stone; we might adjust this to support other
584
+ # naming conventions.
585
+
586
+ if ext == '.cu':
587
+ ext = '.hip'
588
+
589
+ orig_filename = filename
590
+ orig_dirpath = dirpath
591
+
592
+ dirpath = dirpath.replace('cuda', 'hip')
593
+ dirpath = dirpath.replace('CUDA', 'HIP')
594
+ dirpath = dirpath.replace('THC', 'THH')
595
+
596
+ root = root.replace('cuda', 'hip')
597
+ root = root.replace('CUDA', 'HIP')
598
+ # Special case to handle caffe2/core/THCCachingAllocator
599
+ if dirpath != "caffe2/core":
600
+ root = root.replace('THC', 'THH')
601
+
602
+ if not is_pytorch_extension and dirpath == orig_dirpath:
603
+ dirpath = os.path.join(dirpath, 'hip')
604
+
605
+ if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename:
606
+ root = root + "_hip"
607
+
608
+ return os.path.join(dirpath, root + ext)
609
+
610
+
611
+ def is_out_of_place(rel_filepath):
612
+ assert not os.path.isabs(rel_filepath)
613
+ if rel_filepath.startswith("torch/"):
614
+ return False
615
+ if rel_filepath.startswith("third_party/nvfuser/"):
616
+ return False
617
+ if rel_filepath.startswith("tools/autograd/templates/"):
618
+ return False
619
+ return True
620
+
621
+
622
+ # Keep this synchronized with includes/ignores in build_amd.py
623
+ def is_pytorch_file(rel_filepath):
624
+ assert not os.path.isabs(rel_filepath)
625
+ if rel_filepath.startswith("aten/"):
626
+ if rel_filepath.startswith("aten/src/ATen/core/"):
627
+ return False
628
+ return True
629
+ if rel_filepath.startswith("torch/"):
630
+ return True
631
+ if rel_filepath.startswith("third_party/nvfuser/"):
632
+ return True
633
+ if rel_filepath.startswith("tools/autograd/templates/"):
634
+ return True
635
+ return False
636
+
637
+
638
+ def is_cusparse_file(rel_filepath):
639
+ if is_pytorch_file(rel_filepath):
640
+ return "sparse" in rel_filepath.lower()
641
+ return False
642
+
643
+
644
+ def is_special_file(rel_filepath):
645
+ if is_pytorch_file(rel_filepath):
646
+ if "sparse" in rel_filepath.lower():
647
+ return True
648
+ elif "linalg" in rel_filepath.lower():
649
+ if "batchlinearalgebralibblas" in rel_filepath.lower():
650
+ return False # don't use "special" mappings for this specific linalg cublas file
651
+ return True
652
+ return False
653
+
654
+ def is_caffe2_gpu_file(rel_filepath):
655
+ assert not os.path.isabs(rel_filepath)
656
+ if rel_filepath.startswith("c10/cuda"):
657
+ return True
658
+ filename = os.path.basename(rel_filepath)
659
+ _, ext = os.path.splitext(filename)
660
+ return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename)
661
+
662
+
663
+ # Cribbed from https://stackoverflow.com/questions/42742810/speed-up-millions-of-regex-replacements-in-python-3/42789508#42789508
664
+ class Trie:
665
+ """Regex::Trie in Python. Creates a Trie out of a list of words. The trie can be exported to a Regex pattern.
666
+ The corresponding Regex should match much faster than a simple Regex union."""
667
+
668
+ def __init__(self):
669
+ self.data = {}
670
+
671
+ def add(self, word):
672
+ ref = self.data
673
+ for char in word:
674
+ ref[char] = char in ref and ref[char] or {}
675
+ ref = ref[char]
676
+ ref[''] = 1
677
+
678
+ def dump(self):
679
+ return self.data
680
+
681
+ def quote(self, char):
682
+ return re.escape(char)
683
+
684
+ def _pattern(self, pData):
685
+ data = pData
686
+ if "" in data and len(data.keys()) == 1:
687
+ return None
688
+
689
+ alt = []
690
+ cc = []
691
+ q = 0
692
+ for char in sorted(data.keys()):
693
+ if isinstance(data[char], dict):
694
+ try:
695
+ recurse = self._pattern(data[char])
696
+ alt.append(self.quote(char) + recurse)
697
+ except Exception:
698
+ cc.append(self.quote(char))
699
+ else:
700
+ q = 1
701
+ cconly = not len(alt) > 0
702
+
703
+ if len(cc) > 0:
704
+ if len(cc) == 1:
705
+ alt.append(cc[0])
706
+ else:
707
+ alt.append('[' + ''.join(cc) + ']')
708
+
709
+ if len(alt) == 1:
710
+ result = alt[0]
711
+ else:
712
+ result = "(?:" + "|".join(alt) + ")"
713
+
714
+ if q:
715
+ if cconly:
716
+ result += "?"
717
+ else:
718
+ result = f"(?:{result})?"
719
+ return result
720
+
721
+ def pattern(self):
722
+ return self._pattern(self.dump())
723
+
724
+
725
+ CAFFE2_TRIE = Trie()
726
+ CAFFE2_MAP = {}
727
+ PYTORCH_TRIE = Trie()
728
+ PYTORCH_MAP: Dict[str, object] = {}
729
+
730
+ # In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip.
731
+ # The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance.
732
+ # Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex.
733
+ # In the case of SPARSE, we must use the hip types for complex instead of the roc types,
734
+ # but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority.
735
+ # Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place.
736
+ # When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices.
737
+ # Similarly, "linalg" files require rocBLAS -> hipSOLVER so they also need special handling.
738
+ PYTORCH_SPECIAL_MAP = {}
739
+
740
+ for mapping in CUDA_TO_HIP_MAPPINGS:
741
+ assert isinstance(mapping, Mapping)
742
+ for src, value in mapping.items():
743
+ dst = value[0]
744
+ meta_data = value[1:]
745
+ if constants.API_CAFFE2 not in meta_data:
746
+ PYTORCH_TRIE.add(src)
747
+ # if src is already in PYTORCH_MAP and dst belongs to API_SPECIAL
748
+ # do not overwrite PYTORCH_MAP, store dst separately
749
+ if constants.API_SPECIAL in meta_data and PYTORCH_MAP.get(src, ""):
750
+ PYTORCH_SPECIAL_MAP[src] = dst
751
+ else:
752
+ PYTORCH_MAP[src] = dst
753
+ if constants.API_PYTORCH not in meta_data and constants.API_SPECIAL not in meta_data:
754
+ CAFFE2_TRIE.add(src)
755
+ CAFFE2_MAP[src] = dst
756
+ RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.pattern())
757
+ RE_PYTORCH_PREPROCESSOR = re.compile(fr'(?<=\W)({PYTORCH_TRIE.pattern()})(?=\W)')
758
+
759
+ RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"')
760
+ RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>')
761
+ RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"')
762
+ RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh
763
+
764
+ """
765
+ Returns a HipifyResult object with the following details:
766
+ "hipified_path" : absolute path of hipified source file
767
+ "status" : "ok" if hipified file was written out
768
+ "skipped" if an identical hipified file already existed or hipified file couldn't be written out
769
+ "ignored" if the source file was a hipified file itself or not meant to be hipified
770
+ "current_state" : CurrentState.INITIALIZED if source file is first ready to be hipified
771
+ CurrentState.DONE if source file is done with hipification process
772
+ """
773
+
774
+
775
+ def preprocessor(
776
+ output_directory: str,
777
+ filepath: str,
778
+ all_files: Iterable,
779
+ header_include_dirs: Iterable,
780
+ stats: Dict[str, List],
781
+ hip_clang_launch: bool,
782
+ is_pytorch_extension: bool,
783
+ clean_ctx: GeneratedFileCleaner,
784
+ show_progress: bool) -> HipifyResult:
785
+ """ Executes the CUDA -> HIP conversion on the specified file. """
786
+ fin_path = os.path.abspath(os.path.join(output_directory, filepath))
787
+ hipify_result = HIPIFY_FINAL_RESULT[fin_path]
788
+ if filepath not in all_files:
789
+ hipify_result.hipified_path = None
790
+ hipify_result.status = "[ignored, not to be hipified]"
791
+ hipify_result.current_state = CurrentState.DONE
792
+ return hipify_result
793
+
794
+ rel_filepath = os.path.relpath(filepath, output_directory)
795
+
796
+ with open(fin_path, encoding='utf-8') as fin:
797
+ if fin.readline() == HIPIFY_C_BREADCRUMB:
798
+ hipify_result.hipified_path = None
799
+ hipify_result.status = "[ignored, input is hipified output]"
800
+ hipify_result.current_state = CurrentState.DONE
801
+ return hipify_result
802
+ fin.seek(0)
803
+ output_source = fin.read()
804
+
805
+ orig_output_source = output_source
806
+
807
+ # get_hip_file_path needs a relative path to work correctly
808
+ fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension)))
809
+ if not os.path.exists(os.path.dirname(fout_path)):
810
+ clean_ctx.makedirs(os.path.dirname(fout_path))
811
+
812
+ # unsupported_calls statistics reporting is broken atm
813
+ def pt_repl(m):
814
+ return PYTORCH_MAP[m.group(0)]
815
+
816
+ def pt_special_repl(m):
817
+ # checks SPECIAL map first, and if a miss occurs, falls back to pytorch mappings
818
+ return PYTORCH_SPECIAL_MAP.get(m.group(0), pt_repl(m))
819
+
820
+
821
+ if is_pytorch_extension:
822
+ output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
823
+ else:
824
+ if is_special_file(rel_filepath):
825
+ output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_special_repl, output_source)
826
+ elif is_pytorch_file(rel_filepath):
827
+ output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
828
+ else:
829
+ def c2_repl(m):
830
+ return CAFFE2_MAP[m.group(0)]
831
+ output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)
832
+
833
+ # Header rewrites
834
+ def mk_repl(templ, include_current_dir=True):
835
+ def repl(m):
836
+ f = m.group(1)
837
+ dirpath, filename = os.path.split(f)
838
+ if (
839
+ f.startswith(("ATen/cuda",
840
+ "ATen/native/cuda",
841
+ "ATen/native/nested/cuda",
842
+ "ATen/native/quantized/cuda",
843
+ "ATen/native/sparse/cuda",
844
+ "ATen/native/transformers/cuda",
845
+ "THC/")) or
846
+ (f.startswith("THC") and not f.startswith("THCP"))
847
+ ):
848
+ return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension))
849
+ # if filename is one of the files being hipified for this extension
850
+ if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)):
851
+ header_dir = None
852
+ header_filepath = None
853
+ # If include_current_dir True, look first in same dir as the including source file
854
+ if include_current_dir:
855
+ header_dir_to_check = os.path.dirname(fin_path)
856
+ header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
857
+ if os.path.exists(header_path_to_check):
858
+ header_dir = header_dir_to_check
859
+ header_filepath = header_path_to_check
860
+ # If not found, look in include dirs one by one and first match wins
861
+ if header_filepath is None:
862
+ for header_include_dir in header_include_dirs:
863
+ header_dir_to_check = os.path.join(output_directory, header_include_dir)
864
+ header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
865
+ if os.path.exists(header_path_to_check):
866
+ header_dir = header_dir_to_check
867
+ header_filepath = header_path_to_check
868
+ # If header file not found, keep as is
869
+ if header_filepath is None:
870
+ return m.group(0)
871
+ # Hipify header file first if needed
872
+ if header_filepath not in HIPIFY_FINAL_RESULT:
873
+ preprocess_file_and_save_result(output_directory,
874
+ header_filepath,
875
+ all_files, header_include_dirs, stats, hip_clang_launch,
876
+ is_pytorch_extension, clean_ctx, show_progress)
877
+ elif header_filepath in HIPIFY_FINAL_RESULT:
878
+ header_result = HIPIFY_FINAL_RESULT[header_filepath]
879
+ if header_result.current_state == CurrentState.INITIALIZED:
880
+ # get_hip_file_path needs a relative path to work correctly
881
+ header_rel_path = os.path.relpath(header_filepath, output_directory)
882
+ header_fout_path = os.path.abspath(os.path.join(output_directory,
883
+ get_hip_file_path(header_rel_path, is_pytorch_extension)))
884
+ header_result.hipified_path = header_fout_path
885
+ HIPIFY_FINAL_RESULT[header_filepath] = header_result
886
+ return templ.format(os.path.relpath(header_fout_path if header_fout_path is not None
887
+ else header_filepath, header_dir))
888
+ hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath].hipified_path
889
+ return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None
890
+ else header_filepath, header_dir))
891
+
892
+ return m.group(0)
893
+ return repl
894
+ output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source)
895
+ output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source)
896
+ output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source)
897
+
898
+ # CMakeLists.txt rewrites
899
+ if filepath.endswith('CMakeLists.txt'):
900
+ output_source = output_source.replace('CUDA', 'HIP')
901
+ output_source = output_source.replace('THC', 'THH')
902
+ output_source = RE_CU_SUFFIX.sub('.hip', output_source)
903
+
904
+ # Perform Kernel Launch Replacements
905
+ if not hip_clang_launch:
906
+ output_source = processKernelLaunches(output_source, stats)
907
+
908
+ # Replace std:: with non-std:: versions
909
+ if (filepath.endswith((".cu", ".cuh"))) and "PowKernel" not in filepath:
910
+ output_source = replace_math_functions(output_source)
911
+
912
+ # Include header if device code is contained.
913
+ output_source = hip_header_magic(output_source)
914
+
915
+ # Replace the extern __shared__
916
+ # NOTE: No longer needed after transition from hcc to hipclang.
917
+ # output_source = replace_extern_shared(output_source)
918
+
919
+ # Don't write out identical hipified files for extensions if dirpath has not changed
920
+ if (
921
+ is_pytorch_extension
922
+ and orig_output_source == output_source
923
+ and os.path.dirname(fin_path) == os.path.dirname(fout_path)
924
+ ):
925
+ hipify_result.hipified_path = fin_path
926
+ hipify_result.status = "[skipped, no changes]"
927
+ hipify_result.current_state = CurrentState.DONE
928
+ return hipify_result
929
+
930
+ # Add hipify breadcrumb for C-style files to avoid re-hipification
931
+ if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")):
932
+ output_source = HIPIFY_C_BREADCRUMB + output_source
933
+
934
+ do_write = True
935
+ if os.path.exists(fout_path):
936
+ with open(fout_path, encoding='utf-8') as fout_old:
937
+ do_write = fout_old.read() != output_source
938
+ if do_write:
939
+ try:
940
+ with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:
941
+ fout.write(output_source)
942
+ hipify_result.hipified_path = fout_path
943
+ hipify_result.status = "[ok]"
944
+ hipify_result.current_state = CurrentState.DONE
945
+ return hipify_result
946
+ except PermissionError as e:
947
+ print(f"{bcolors.WARNING}Failed to save {fout_path} with \"{e.strerror}\", leaving {fin_path} unchanged.{bcolors.ENDC}",
948
+ file=sys.stderr)
949
+ hipify_result.hipified_path = fin_path
950
+ hipify_result.status = "[skipped, no permissions]"
951
+ hipify_result.current_state = CurrentState.DONE
952
+ return hipify_result
953
+ else:
954
+ hipify_result.hipified_path = fout_path
955
+ hipify_result.status = "[skipped, already hipified]"
956
+ hipify_result.current_state = CurrentState.DONE
957
+ return hipify_result
958
+
959
+ def file_specific_replacement(filepath, search_string, replace_string, strict=False):
960
+ with openf(filepath, "r+") as f:
961
+ contents = f.read()
962
+ if strict:
963
+ contents = re.sub(fr'\b({re.escape(search_string)})\b', lambda x: replace_string, contents)
964
+ else:
965
+ contents = contents.replace(search_string, replace_string)
966
+ f.seek(0)
967
+ f.write(contents)
968
+ f.truncate()
969
+
970
+
971
+ def file_add_header(filepath, header):
972
+ with openf(filepath, "r+") as f:
973
+ contents = f.read()
974
+ if header[0] != "<" and header[-1] != ">":
975
+ header = f'"{header}"'
976
+ contents = (f'#include {header} \n') + contents
977
+ f.seek(0)
978
+ f.write(contents)
979
+ f.truncate()
980
+
981
+
982
+ def fix_static_global_kernels(in_txt):
983
+ """Static global kernels in HIP results in a compilation error."""
984
+ in_txt = in_txt.replace(" __global__ static", "__global__")
985
+ return in_txt
986
+
987
+
988
+ RE_INCLUDE = re.compile(r"#include .*\n")
989
+
990
+
991
+ def extract_arguments(start, string):
992
+ """ Return the list of arguments in the upcoming function parameter closure.
993
+ Example:
994
+ string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
995
+ arguments (output):
996
+ '[{'start': 1, 'end': 7},
997
+ {'start': 8, 'end': 16},
998
+ {'start': 17, 'end': 19},
999
+ {'start': 20, 'end': 53}]'
1000
+ """
1001
+
1002
+ arguments = []
1003
+ closures = {
1004
+ "<": 0,
1005
+ "(": 0
1006
+ }
1007
+ current_position = start
1008
+ argument_start_pos = current_position + 1
1009
+
1010
+ # Search for final parenthesis
1011
+ while current_position < len(string):
1012
+ if string[current_position] == "(":
1013
+ closures["("] += 1
1014
+ elif string[current_position] == ")":
1015
+ closures["("] -= 1
1016
+ elif string[current_position] == "<":
1017
+ closures["<"] += 1
1018
+ elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
1019
+ closures["<"] -= 1
1020
+
1021
+ # Finished all arguments
1022
+ if closures["("] == 0 and closures["<"] == 0:
1023
+ # Add final argument
1024
+ arguments.append({"start": argument_start_pos, "end": current_position})
1025
+ break
1026
+
1027
+ # Finished current argument
1028
+ if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
1029
+ arguments.append({"start": argument_start_pos, "end": current_position})
1030
+ argument_start_pos = current_position + 1
1031
+
1032
+ current_position += 1
1033
+
1034
+ return arguments
1035
+
1036
+
1037
+ def str2bool(v):
1038
+ """ArgumentParser doesn't support type=bool. Thus, this helper method will convert
1039
+ from possible string types to True / False."""
1040
+ if v.lower() in ('yes', 'true', 't', 'y', '1'):
1041
+ return True
1042
+ elif v.lower() in ('no', 'false', 'f', 'n', '0'):
1043
+ return False
1044
+ else:
1045
+ raise argparse.ArgumentTypeError('Boolean value expected.')
1046
+
1047
+
1048
+ def hipify(
1049
+ project_directory: str,
1050
+ show_detailed: bool = False,
1051
+ extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"),
1052
+ header_extensions: Iterable = (".cuh", ".h", ".hpp"),
1053
+ output_directory: str = "",
1054
+ header_include_dirs: Iterable = (),
1055
+ includes: Iterable = ('*',),
1056
+ extra_files: Iterable = (),
1057
+ out_of_place_only: bool = False,
1058
+ ignores: Iterable = (),
1059
+ show_progress: bool = True,
1060
+ hip_clang_launch: bool = False,
1061
+ is_pytorch_extension: bool = False,
1062
+ hipify_extra_files_only: bool = False,
1063
+ clean_ctx: Optional[GeneratedFileCleaner] = None
1064
+ ) -> HipifyFinalResult:
1065
+ if project_directory == "":
1066
+ project_directory = os.getcwd()
1067
+
1068
+ # Verify the project directory exists.
1069
+ if not os.path.exists(project_directory):
1070
+ print("The project folder specified does not exist.")
1071
+ sys.exit(1)
1072
+
1073
+ # If no output directory, provide a default one.
1074
+ if not output_directory:
1075
+ project_directory.rstrip("/")
1076
+ output_directory = project_directory + "_amd"
1077
+
1078
+ if project_directory != output_directory:
1079
+ includes = [include.replace(project_directory, output_directory) for include in includes]
1080
+ ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores]
1081
+
1082
+ # Copy from project directory to output directory if not done already.
1083
+ if not os.path.exists(output_directory):
1084
+ shutil.copytree(project_directory, output_directory)
1085
+
1086
+ all_files = list(matched_files_iter(output_directory, includes=includes,
1087
+ ignores=ignores, extensions=extensions,
1088
+ out_of_place_only=out_of_place_only,
1089
+ is_pytorch_extension=is_pytorch_extension))
1090
+ all_files_set = set(all_files)
1091
+ for f in extra_files:
1092
+ if not os.path.isabs(f):
1093
+ f = os.path.join(output_directory, f)
1094
+ if f not in all_files_set:
1095
+ all_files.append(f)
1096
+
1097
+ # List all files in header_include_paths to ensure they are hipified
1098
+ from pathlib import Path
1099
+ for header_include_dir in header_include_dirs:
1100
+ if os.path.isabs(header_include_dir):
1101
+ header_include_dir_path = Path(header_include_dir)
1102
+ else:
1103
+ header_include_dir_path = Path(os.path.join(output_directory, header_include_dir))
1104
+ for path in header_include_dir_path.rglob('*'):
1105
+ if (
1106
+ path.is_file()
1107
+ and _fnmatch(str(path), includes)
1108
+ and (not _fnmatch(str(path), ignores))
1109
+ and match_extensions(path.name, header_extensions)
1110
+ ):
1111
+ all_files.append(str(path))
1112
+
1113
+ if clean_ctx is None:
1114
+ clean_ctx = GeneratedFileCleaner(keep_intermediates=True)
1115
+
1116
+ # Preprocessing statistics.
1117
+ stats: Dict[str, List] = {"unsupported_calls": [], "kernel_launches": []}
1118
+
1119
+ for filepath in (all_files if not hipify_extra_files_only else extra_files):
1120
+ preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs,
1121
+ stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
1122
+
1123
+ print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr)
1124
+
1125
+ # Show detailed summary
1126
+ if show_detailed:
1127
+ compute_stats(stats)
1128
+
1129
+ return HIPIFY_FINAL_RESULT
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/hipify/version.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = '1.0.0'
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/jit/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorboard
2
+ from torch._vendor.packaging.version import Version
3
+
4
+ if not hasattr(tensorboard, "__version__") or Version(
5
+ tensorboard.__version__
6
+ ) < Version("1.15"):
7
+ raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above")
8
+
9
+ del Version
10
+ del tensorboard
11
+
12
+ from .writer import FileWriter, SummaryWriter # noqa: F401
13
+ from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (584 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc ADDED
Binary file (3.56 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc ADDED
Binary file (2.06 kB). View file