ZTWHHH commited on
Commit
19315e5
·
verified ·
1 Parent(s): 35633cc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc +0 -0
  3. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/cuda_properties.cpython-310.pyc +0 -0
  8. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc +0 -0
  9. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc +0 -0
  10. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc +0 -0
  11. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc +0 -0
  12. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc +0 -0
  15. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc +0 -0
  17. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc +0 -0
  18. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc +0 -0
  19. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc +0 -0
  20. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc +0 -0
  21. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc +0 -0
  22. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc +0 -0
  23. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc +0 -0
  24. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc +0 -0
  25. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc +0 -0
  26. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc +0 -0
  27. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc +0 -0
  28. llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc +0 -0
  30. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py +0 -0
  31. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc +0 -0
  32. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc +0 -0
  33. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc +0 -0
  34. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc +0 -0
  35. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc +0 -0
  36. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc +0 -0
  37. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc +0 -0
  38. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py +128 -0
  39. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py +487 -0
  40. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py +238 -0
  41. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py +200 -0
  42. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm_plus_mm.py +239 -0
  43. llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/unpack_mixed_mm.py +82 -0
  44. llava_next/lib/python3.10/site-packages/torch/_lazy/__init__.py +55 -0
  45. llava_next/lib/python3.10/site-packages/torch/_lazy/closure.py +134 -0
  46. llava_next/lib/python3.10/site-packages/torch/_lazy/computation.py +26 -0
  47. llava_next/lib/python3.10/site-packages/torch/_lazy/config.py +16 -0
  48. llava_next/lib/python3.10/site-packages/torch/_lazy/device_context.py +25 -0
  49. llava_next/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py +223 -0
  50. llava_next/lib/python3.10/site-packages/torch/_lazy/metrics.py +21 -0
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc ADDED
Binary file (7.04 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/cuda_properties.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc ADDED
Binary file (59.9 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc ADDED
Binary file (3.65 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc ADDED
Binary file (709 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (612 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc ADDED
Binary file (9.88 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc ADDED
Binary file (39.5 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc ADDED
Binary file (477 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (58 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc ADDED
Binary file (4.14 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc ADDED
Binary file (8.86 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc ADDED
Binary file (7.66 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc ADDED
Binary file (5.99 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc ADDED
Binary file (5.27 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ..lowering import register_lowering
4
+ from ..select_algorithm import (
5
+ autotune_select_algorithm,
6
+ ExternKernelChoice,
7
+ TritonTemplate,
8
+ )
9
+ from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
10
+
11
+ from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
12
+
13
+ aten = torch.ops.aten
14
+
15
+
16
+ def bmm_grid(b, m, n, meta):
17
+ return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), b, 1)
18
+
19
+
20
+ bmm_template = TritonTemplate(
21
+ name="bmm",
22
+ grid=bmm_grid,
23
+ source=r"""
24
+ {{def_kernel("A", "B")}}
25
+ M = {{size("A", -2)}}
26
+ N = {{size("B", -1)}}
27
+ K = {{size("A", -1)}}
28
+
29
+ stride_aq = {{stride("A", 0)}}
30
+ stride_am = {{stride("A", 1)}}
31
+ stride_ak = {{stride("A", 2)}}
32
+
33
+ stride_bq = {{stride("B", 0)}}
34
+ stride_bk = {{stride("B", 1)}}
35
+ stride_bn = {{stride("B", 2)}}
36
+
37
+ # based on triton.ops.matmul
38
+ pid = tl.program_id(0)
39
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
40
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
41
+
42
+ # re-order program ID for better L2 performance
43
+ width = GROUP_M * grid_n
44
+ group_id = pid // width
45
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
46
+ pid_m = group_id * GROUP_M + (pid % group_size)
47
+ pid_n = (pid % width) // (group_size)
48
+
49
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
50
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
51
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
52
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
53
+ rk = tl.arange(0, BLOCK_K)
54
+
55
+ idx_q = tl.program_id(1) # batch dimension for BMM
56
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
57
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)
58
+
59
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
60
+ for k in range(K, 0, -BLOCK_K):
61
+ if EVEN_K:
62
+ a = tl.load(A)
63
+ b = tl.load(B)
64
+ else:
65
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
66
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
67
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
68
+ A += BLOCK_K * stride_ak
69
+ B += BLOCK_K * stride_bk
70
+
71
+ # rematerialize rm and rn to save registers
72
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
73
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
74
+ idx_q = tl.program_id(1) # batch dimension for BMM
75
+ idx_m = rm[:, None]
76
+ idx_n = rn[None, :]
77
+ mask = (idx_m < M) & (idx_n < N)
78
+
79
+ # inductor generates a suffix
80
+ {{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
81
+ """,
82
+ )
83
+
84
+ aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out")
85
+ aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
86
+
87
+
88
+ @register_lowering(aten.bmm)
89
+ def tuned_bmm(mat1, mat2, *, layout=None):
90
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
91
+
92
+ # options to tune from
93
+ choices = [aten_bmm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
94
+ if use_triton_template(layout):
95
+ for config in mm_configs(m, n, k):
96
+ bmm_template.maybe_append_choice(
97
+ choices,
98
+ (mat1, mat2),
99
+ layout,
100
+ **mm_options(config, k, layout),
101
+ )
102
+
103
+ return autotune_select_algorithm("bmm", choices, [mat1, mat2], layout)
104
+
105
+
106
+ # Don't register this since it is slower than decomposing it
107
+ # @register_lowering(aten.baddbmm)
108
+ def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
109
+ m, n, k, layout, mat1, mat2, inp = mm_args(mat1, mat2, inp, layout=layout)
110
+
111
+ # options to tune from
112
+ choices = (
113
+ [aten_baddbmm.bind((inp, mat1, mat2), layout, alpha=alpha, beta=beta)]
114
+ if use_aten_gemm_kernels()
115
+ else []
116
+ )
117
+ if use_triton_template(layout):
118
+ for config in mm_configs(m, n, k):
119
+ bmm_template.maybe_append_choice(
120
+ choices,
121
+ (inp, mat1, mat2),
122
+ layout,
123
+ **mm_options(config, k, layout),
124
+ prefix_args=1,
125
+ epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
126
+ )
127
+
128
+ return autotune_select_algorithm("baddbmm", choices, [inp, mat1, mat2], layout)
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import logging
5
+ from typing import cast, List, Tuple, TypedDict
6
+
7
+ import torch
8
+ from .. import config, ir
9
+ from ..ir import TensorBox
10
+
11
+ from ..lowering import (
12
+ add_layout_constraint,
13
+ constrain_to_fx_strides,
14
+ lowerings as L,
15
+ register_lowering,
16
+ )
17
+ from ..select_algorithm import (
18
+ autotune_select_algorithm,
19
+ ExternKernelChoice,
20
+ TritonTemplate,
21
+ )
22
+ from ..utils import (
23
+ ceildiv,
24
+ is_ones,
25
+ is_zeros,
26
+ pad_listlike,
27
+ sympy_product,
28
+ use_triton_template,
29
+ )
30
+ from ..virtualized import V
31
+ from .mm_common import filtered_configs
32
+
33
+ log = logging.getLogger(__name__)
34
+
35
+
36
+ aten = torch.ops.aten
37
+
38
+
39
+ def conv_grid(n, c, h, w, meta):
40
+ return (
41
+ ceildiv(n * h * w, meta["BLOCK_M"]),
42
+ ceildiv(c, meta["BLOCK_N"]),
43
+ meta["GROUPS"],
44
+ )
45
+
46
+
47
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
48
+ # will be utilised on the target platform
49
+ kernel_configs = [
50
+ # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
51
+ {"config": (64, 256, 16, 2, 4), "cond": True},
52
+ {"config": (256, 64, 16, 2, 4), "cond": True},
53
+ {"config": (1024, 16, 16, 1, 8), "cond": True},
54
+ {"config": (128, 128, 32, 2, 8), "cond": True},
55
+ {"config": (64, 64, 32, 2, 4), "cond": True},
56
+ {"config": (64, 256, 32, 2, 8), "cond": True},
57
+ {"config": (256, 64, 32, 2, 8), "cond": True},
58
+ ]
59
+
60
+ # Create filtered list of configs based on conv
61
+ platform_configs = tuple(
62
+ cast(Tuple[int, int, int, int, int], config["config"])
63
+ for config in kernel_configs
64
+ if config["cond"]
65
+ )
66
+
67
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
68
+ if torch.version.hip:
69
+ platform_configs = tuple(
70
+ (config[0], config[1], config[2], 1, config[4]) for config in platform_configs
71
+ )
72
+
73
+ conv_configs = functools.partial(
74
+ filtered_configs,
75
+ configs=platform_configs,
76
+ )
77
+
78
+ LOOP_BODY = """
79
+ idx_x_h = i - PADDING_H + idx_y_h * STRIDE_H
80
+ idx_x_w = j - PADDING_W + idx_y_w * STRIDE_W
81
+ idx_x_c = tl.arange(0, BLOCK_K) + k
82
+
83
+ x_ptrs = x_base + (
84
+ (idx_x_h * stride_xh)[:, None]
85
+ + (idx_x_w * stride_xw)[:, None]
86
+ + (idx_x_c * stride_xc)[None, :]
87
+ )
88
+ mask_x = (
89
+ (idx_n < BATCH)[:, None]
90
+ & (idx_x_h >= 0)[:, None]
91
+ & (idx_x_h < IN_H)[:, None]
92
+ & (idx_x_w >= 0)[:, None]
93
+ & (idx_x_w < IN_W)[:, None]
94
+ & (idx_x_c < GROUP_IN_C)[None, :]
95
+ )
96
+ matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0)
97
+
98
+ w_ptrs = w_base + (
99
+ (idx_x_c * stride_wc_in)[:, None] + (i * stride_wh) + (j * stride_ww)
100
+ )
101
+ mask_w = (idx_x_c[:, None] < GROUP_IN_C) & (idx_y_c[None, :] < GROUP_OUT_C)
102
+ matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0)
103
+ acc += tl.dot(matrix_x, matrix_w, allow_tf32=ALLOW_TF32)
104
+ """
105
+
106
+ """
107
+ This is a relatively simple conv implementation that can likely be
108
+ improved. Many alternate conv versions can be found here:
109
+ https://github.com/pytorch/torchdynamo/pull/971
110
+ """
111
+ conv2d_template = TritonTemplate(
112
+ name="convolution",
113
+ grid=conv_grid,
114
+ source=r"""
115
+ {{def_kernel("X", "W")}}
116
+ # Tensor dimensions
117
+ BATCH = {{size("X", 0)}}
118
+ IN_C = {{size("X", 1)}}
119
+ IN_H = {{size("X", 2)}}
120
+ IN_W = {{size("X", 3)}}
121
+ OUT_C = {{size(None, 1)}}
122
+ OUT_H = {{size(None, 2)}}
123
+ OUT_W = {{size(None, 3)}}
124
+
125
+ # Strides:
126
+ stride_xn = {{stride("X", 0)}}
127
+ stride_xc = {{stride("X", 1)}}
128
+ stride_xh = {{stride("X", 2)}}
129
+ stride_xw = {{stride("X", 3)}}
130
+ stride_wc_out = {{stride("W", 0)}}
131
+ stride_wc_in = {{stride("W", 1)}}
132
+ stride_wh = {{stride("W", 2)}}
133
+ stride_ww = {{stride("W", 3)}}
134
+
135
+ nhw = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
136
+ idx_y_w = nhw % OUT_W
137
+ nh = nhw // OUT_W
138
+ idx_y_h = nh % OUT_H
139
+ idx_n = nh // OUT_H
140
+ idx_y_c = tl.program_id(1) * BLOCK_N + tl.arange(0, BLOCK_N)
141
+
142
+ {% if GROUPS == 1 %}
143
+ group = 0
144
+ GROUP_IN_C = IN_C
145
+ GROUP_OUT_C = OUT_C
146
+ {% else %}
147
+ group = tl.program_id(2)
148
+ GROUP_IN_C = IN_C // GROUPS
149
+ GROUP_OUT_C = OUT_C // GROUPS
150
+ {% endif %}
151
+
152
+ x_base = X + (group * stride_xc * GROUP_IN_C + idx_n * stride_xn)[:, None]
153
+ w_base = (
154
+ W + (group * stride_wc_out * GROUP_OUT_C + idx_y_c * stride_wc_out)[None, :]
155
+ )
156
+
157
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
158
+
159
+ {% if UNROLL %}
160
+ {% for i in range(KERNEL_H) %}
161
+ {% for j in range(KERNEL_W) %}
162
+ i = {{i}}
163
+ j = {{j}}
164
+ for k in range(0, GROUP_IN_C, BLOCK_K):
165
+ """
166
+ + LOOP_BODY
167
+ + """
168
+ {% endfor %}
169
+ {% endfor %}
170
+ {% else %}
171
+ # Could be simplified, but slightly slower:
172
+ # for i in range(KERNEL_H):
173
+ # for j in range(KERNEL_W):
174
+ # for k in range(0, GROUP_IN_C, BLOCK_K):
175
+ BLOCK_K_COUNT = (GROUP_IN_C + BLOCK_K - 1) // BLOCK_K
176
+ for ijk in range(KERNEL_H * KERNEL_W * BLOCK_K_COUNT):
177
+ k = (ijk % BLOCK_K_COUNT) * BLOCK_K
178
+ ij = ijk // BLOCK_K_COUNT
179
+ i = ij // KERNEL_W
180
+ j = ij % KERNEL_W
181
+ """
182
+ + LOOP_BODY
183
+ + """
184
+ {% endif %}
185
+
186
+ mask = (
187
+ (idx_n < BATCH)[:, None]
188
+ & (idx_y_h < OUT_H)[:, None]
189
+ & (idx_y_w < OUT_W)[:, None]
190
+ & (idx_y_c < GROUP_OUT_C)[None, :]
191
+ )
192
+ idx_n = idx_n[:, None]
193
+ idx_c = idx_y_c[None, :] + group * GROUP_OUT_C
194
+ idx_h = idx_y_h[:, None]
195
+ idx_w = idx_y_w[:, None]
196
+
197
+ # inductor generates a suffix
198
+ {{store_output(("idx_n", "idx_c", "idx_h", "idx_w"), "acc", "mask")}}
199
+ """,
200
+ )
201
+
202
+ aten_convolution = ExternKernelChoice(
203
+ torch.convolution,
204
+ "at::convolution",
205
+ has_out_variant=False,
206
+ )
207
+
208
+
209
+ def conv1x1_via_mm(x, w, *, out):
210
+ w = torch.squeeze(torch.squeeze(w, -1), -1)
211
+ return torch.matmul(
212
+ x.permute(0, 2, 3, 1), w.permute(1, 0), out=out.permute(0, 2, 3, 1)
213
+ )
214
+
215
+
216
+ aten_conv1x1_via_mm = ExternKernelChoice(conv1x1_via_mm, None)
217
+
218
+
219
+ class ConvLayoutParams(TypedDict):
220
+ stride: tuple[int, ...]
221
+ padding: tuple[int, ...]
222
+ dilation: tuple[int, ...]
223
+ transposed: bool
224
+ output_padding: tuple[int, ...]
225
+ groups: int
226
+
227
+
228
+ def conv_layout(
229
+ x: TensorBox,
230
+ weight: TensorBox,
231
+ bias: TensorBox,
232
+ stride: tuple[int, ...],
233
+ padding: tuple[int, ...],
234
+ dilation: tuple[int, ...],
235
+ transposed: bool,
236
+ output_padding: tuple[int, ...],
237
+ groups: int,
238
+ ) -> ir.Layout:
239
+ """Determine output layout for a convolution"""
240
+ with V.graph.fake_mode:
241
+ output = torch.ops.aten.convolution(
242
+ ir.ir_node_to_tensor(x, guard_shape=True),
243
+ ir.ir_node_to_tensor(weight, guard_shape=True),
244
+ ir.ir_node_to_tensor(bias, guard_shape=True),
245
+ stride,
246
+ tuple(V.graph.sizevars.size_hint(p) for p in padding),
247
+ dilation,
248
+ transposed,
249
+ tuple(V.graph.sizevars.size_hint(p) for p in output_padding),
250
+ groups,
251
+ )
252
+ sizes = ir.convert_shape_to_inductor(output.size())
253
+ stride = ir.convert_shape_to_inductor(output.stride())
254
+
255
+ return ir.FixedLayout(
256
+ x.get_device(),
257
+ x.get_dtype(),
258
+ sizes,
259
+ stride,
260
+ )
261
+
262
+
263
+ def channels_last_order(rank):
264
+ order = list(reversed(range(rank)))
265
+ order.insert(1, order.pop(-1))
266
+ return order
267
+
268
+
269
+ def convert_1x1_conv_to_mm(x, weight, bias):
270
+ # special case for 1x1 convolution, which is actually just a matmul
271
+ rank = len(weight.get_size())
272
+ for _ in range(rank - 2):
273
+ weight = L[aten.squeeze](weight, dim=-1)
274
+ weight = L[aten.permute](weight, [1, 0])
275
+
276
+ if x.get_size()[0] != 1:
277
+ x = ir.ExternKernel.require_stride_order(x, channels_last_order(rank))
278
+ else:
279
+ x.realize()
280
+ x.freeze_layout()
281
+
282
+ x_permute = list(range(rank))
283
+ x_permute.append(x_permute.pop(1))
284
+ x = L[aten.permute](x, x_permute)
285
+ *sizes, in_chan = x.get_size()
286
+ x = L[aten.reshape](x, [sympy_product(sizes), in_chan])
287
+ if bias is None:
288
+ result = L[aten.mm](x, weight)
289
+ else:
290
+ result = L[aten.addmm](bias, x, weight)
291
+ result = L[aten.reshape](result, [*sizes, -1])
292
+ result_permute = list(range(rank))
293
+ result_permute.insert(1, result_permute.pop(-1))
294
+ return L[aten.permute](result, result_permute)
295
+
296
+
297
+ @register_lowering(aten.convolution)
298
+ def convolution(
299
+ x: TensorBox,
300
+ weight: TensorBox,
301
+ bias: TensorBox,
302
+ stride: List[int],
303
+ padding: List[int],
304
+ dilation: List[int],
305
+ transposed: bool,
306
+ output_padding: List[int],
307
+ groups: int,
308
+ ):
309
+ stride = tuple(stride)
310
+ padding = tuple(padding)
311
+ dilation = tuple(dilation)
312
+ output_padding = tuple(output_padding)
313
+ assert isinstance(groups, int)
314
+ kwargs: ConvLayoutParams = {
315
+ "stride": stride,
316
+ "padding": padding,
317
+ "dilation": dilation,
318
+ "transposed": transposed,
319
+ "output_padding": output_padding,
320
+ "groups": groups,
321
+ }
322
+
323
+ if len(x.get_size()) == len(weight.get_size()) - 1:
324
+ # add batch dimension to simplify rest of function
325
+ return L[aten.squeeze](
326
+ convolution(L[aten.expand](x, [1, *x.get_size()]), weight, bias, **kwargs),
327
+ dim=0,
328
+ )
329
+
330
+ out_chan, in_chan, *kernel_shape = V.graph.sizevars.evaluate_static_shapes(
331
+ weight.get_size()
332
+ )
333
+ ndim = len(kernel_shape)
334
+ stride = pad_listlike(stride, ndim)
335
+ padding = pad_listlike(padding, ndim)
336
+ dilation = pad_listlike(dilation, ndim)
337
+ output_padding = pad_listlike(output_padding, ndim)
338
+
339
+ def channels_last_conv():
340
+ if V.graph.layout_opt and ndim == 2:
341
+ return True
342
+
343
+ layout = conv_layout(x, weight, None, **kwargs)
344
+ req_stride_order = ir.get_stride_order(
345
+ V.graph.sizevars.size_hints(layout.stride)
346
+ )
347
+ return req_stride_order == ir.NHWC_STRIDE_ORDER
348
+
349
+ autotuning_gemm = config.max_autotune or config.max_autotune_gemm
350
+
351
+ if (
352
+ (config.conv_1x1_as_mm or (autotuning_gemm and channels_last_conv()))
353
+ and is_ones(kernel_shape)
354
+ and is_ones(stride)
355
+ and is_zeros(padding)
356
+ and is_ones(dilation)
357
+ and not transposed
358
+ and is_zeros(output_padding)
359
+ and groups == 1
360
+ ):
361
+ return convert_1x1_conv_to_mm(x, weight, bias)
362
+
363
+ if bias is not None and ir.get_device_type(x) != "cpu":
364
+ # peel off the bias, cudnn is slower with it
365
+ result = convolution(x, weight, None, **kwargs)
366
+ return L[aten.add](
367
+ result, L[aten.view](bias, [result.get_size()[1]] + ndim * [1])
368
+ )
369
+
370
+ x.realize()
371
+ weight.realize()
372
+
373
+ # ndim can be 1 for convolution in models such as demucs
374
+ # TODO: check if it's beneficial to convert Conv1d to Conv2d and then
375
+ # apply channels last.
376
+ if V.graph.layout_opt and ndim == 2:
377
+ V.graph.num_channels_last_conv += 1
378
+ x = ir.ExternKernel.require_channels_last(x)
379
+ # TODO maybe we can convert weights to channels last just once before
380
+ # running the model.
381
+ weight = ir.ExternKernel.require_channels_last(weight)
382
+ layout = conv_layout(x, weight, None, **kwargs)
383
+ else:
384
+ layout = conv_layout(x, weight, None, **kwargs)
385
+ req_stride_order = ir.get_stride_order(
386
+ V.graph.sizevars.size_hints(layout.stride)
387
+ )
388
+ x = ir.ExternKernel.require_stride_order(x, req_stride_order)
389
+ weight = ir.ExternKernel.require_stride_order(weight, req_stride_order)
390
+
391
+ ordered_kwargs_for_cpp_kernel = [
392
+ "stride",
393
+ "padding",
394
+ "dilation",
395
+ "transposed",
396
+ "output_padding",
397
+ "groups",
398
+ ]
399
+ if bias is None:
400
+ args = [x, weight]
401
+ kwargs["bias"] = None # type: ignore[typeddict-unknown-key]
402
+ ordered_kwargs_for_cpp_kernel.insert(0, "bias")
403
+ else:
404
+ args = [x, weight, bias]
405
+ bias.realize()
406
+ bias.freeze_layout()
407
+ V.graph.sizevars.evaluate_static_shapes(bias.get_size())
408
+
409
+ choices = [
410
+ aten_convolution.bind(args, layout, ordered_kwargs_for_cpp_kernel, **kwargs)
411
+ ]
412
+ if (
413
+ use_triton_template(layout)
414
+ # templates only support these:
415
+ and ndim == 2
416
+ and is_ones(dilation)
417
+ and not transposed
418
+ and is_zeros(output_padding)
419
+ # there are some odd models where this check fails (e.g. shufflenet_v2_x1_0)
420
+ and V.graph.sizevars.statically_known_equals(in_chan, x.get_size()[1])
421
+ ):
422
+ if (
423
+ is_ones(kernel_shape)
424
+ and is_ones(stride)
425
+ and is_zeros(padding)
426
+ and groups == 1
427
+ ):
428
+ choices.append(aten_conv1x1_via_mm.bind(args, layout))
429
+
430
+ for cfg in conv_configs(
431
+ sympy_product([x.get_size()[0], *x.get_size()[2:]]),
432
+ out_chan,
433
+ in_chan,
434
+ ):
435
+ conv2d_template.maybe_append_choice(
436
+ choices,
437
+ (x, weight),
438
+ layout,
439
+ KERNEL_H=kernel_shape[0],
440
+ KERNEL_W=kernel_shape[1],
441
+ STRIDE_H=stride[0],
442
+ STRIDE_W=stride[1],
443
+ PADDING_H=padding[0],
444
+ PADDING_W=padding[1],
445
+ GROUPS=groups,
446
+ # TODO(jansel): try unroll for bigger kernels once fixed:
447
+ # https://github.com/openai/triton/issues/1254
448
+ UNROLL=is_ones(kernel_shape),
449
+ ALLOW_TF32=torch.backends.cudnn.allow_tf32,
450
+ num_stages=cfg.num_stages,
451
+ num_warps=cfg.num_warps,
452
+ **cfg.kwargs,
453
+ )
454
+
455
+ return autotune_select_algorithm("convolution", choices, args, layout)
456
+
457
+
458
+ @register_lowering(aten._convolution)
459
+ def _convolution(
460
+ x,
461
+ weight,
462
+ bias,
463
+ stride,
464
+ padding,
465
+ dilation,
466
+ transposed,
467
+ output_padding,
468
+ groups,
469
+ benchmark,
470
+ deterministic,
471
+ cudnn_enabled,
472
+ allow_tf32,
473
+ ):
474
+ return convolution(
475
+ x, weight, bias, stride, padding, dilation, transposed, output_padding, groups
476
+ )
477
+
478
+
479
+ def constrain_conv_to_fx_strides(fx_node, *args, **kwargs):
480
+ assert fx_node.target == torch.ops.aten.convolution.default
481
+ if V.graph.layout_opt:
482
+ return args, kwargs
483
+ else:
484
+ return constrain_to_fx_strides(fx_node, *args, **kwargs)
485
+
486
+
487
+ add_layout_constraint(aten.convolution, constrain_conv_to_fx_strides)
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import torch
4
+
5
+ from .. import config as inductor_config
6
+ from ..lowering import register_lowering
7
+ from ..select_algorithm import (
8
+ autotune_select_algorithm,
9
+ ExternKernelChoice,
10
+ TritonTemplate,
11
+ )
12
+ from ..utils import use_aten_gemm_kernels, use_triton_template
13
+ from .mm_common import (
14
+ addmm_epilogue,
15
+ int8_mm_configs,
16
+ mm_args,
17
+ mm_configs,
18
+ mm_grid,
19
+ mm_options,
20
+ )
21
+
22
+ log = logging.getLogger(__name__)
23
+ aten = torch.ops.aten
24
+
25
+ mm_template = TritonTemplate(
26
+ name="mm",
27
+ grid=mm_grid,
28
+ source=r"""
29
+ {{def_kernel("A", "B")}}
30
+ M = {{size("A", 0)}}
31
+ N = {{size("B", 1)}}
32
+ K = {{size("A", 1)}}
33
+ if M * N == 0:
34
+ # early exit due to zero-size input(s)
35
+ return
36
+ stride_am = {{stride("A", 0)}}
37
+ stride_ak = {{stride("A", 1)}}
38
+ stride_bk = {{stride("B", 0)}}
39
+ stride_bn = {{stride("B", 1)}}
40
+
41
+ # based on triton.ops.matmul
42
+ pid = tl.program_id(0)
43
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
44
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
45
+
46
+ # re-order program ID for better L2 performance
47
+ width = GROUP_M * grid_n
48
+ group_id = pid // width
49
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
50
+ pid_m = group_id * GROUP_M + (pid % group_size)
51
+ pid_n = (pid % width) // (group_size)
52
+
53
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
54
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
55
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
56
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
57
+ rk = tl.arange(0, BLOCK_K)
58
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
59
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
60
+
61
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
62
+ for k in range(K, 0, -BLOCK_K):
63
+ if EVEN_K:
64
+ a = tl.load(A)
65
+ b = tl.load(B)
66
+ else:
67
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
68
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
69
+ if B_PROLOGUE_CAST_TYPE is not None:
70
+ b = b.to(B_PROLOGUE_CAST_TYPE)
71
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
72
+ A += BLOCK_K * stride_ak
73
+ B += BLOCK_K * stride_bk
74
+
75
+ # rematerialize rm and rn to save registers
76
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
77
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
78
+ idx_m = rm[:, None]
79
+ idx_n = rn[None, :]
80
+ mask = (idx_m < M) & (idx_n < N)
81
+
82
+ # inductor generates a suffix
83
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
84
+ """,
85
+ )
86
+
87
+ aten_mm = ExternKernelChoice(torch.mm, "at::mm_out")
88
+
89
+
90
+ aten_addmm = ExternKernelChoice(torch.addmm, "at::addmm_out")
91
+
92
+ aten__int_mm = ExternKernelChoice(torch._int_mm, "at::_int_mm")
93
+
94
+
95
+ def _is_int8_mat(mat):
96
+ return mat.get_dtype() in (torch.int8, torch.uint8)
97
+
98
+
99
+ def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1):
100
+ """
101
+ Giving torch.addmm a 1D tensor calls a different (faster) cublasLt
102
+ kernel under the hood. There are a few shapes where this is slower,
103
+ but they are rare.
104
+ """
105
+ if inp.stride(0) == 0 or inp.size(0) == 1:
106
+ return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)
107
+ return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)
108
+
109
+
110
+ aten_bias_addmm = ExternKernelChoice(bias_addmm, None)
111
+
112
+
113
+ @register_lowering(aten.mm)
114
+ def tuned_mm(mat1, mat2, *, layout=None):
115
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
116
+
117
+ # options to tune from
118
+ choices = [aten_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
119
+ if m * n != 0 and use_triton_template(layout):
120
+ for config in mm_configs(m, n, k):
121
+ mm_template.maybe_append_choice(
122
+ choices,
123
+ (mat1, mat2),
124
+ layout,
125
+ **mm_options(config, k, layout),
126
+ )
127
+
128
+ return autotune_select_algorithm("mm", choices, [mat1, mat2], layout)
129
+
130
+
131
+ @register_lowering(aten._int_mm)
132
+ def tuned_int_mm(mat1, mat2, *, layout=None):
133
+ m, n, k, layout, mat1, mat2 = mm_args(
134
+ mat1, mat2, layout=layout, out_dtype=torch.int32
135
+ )
136
+ choices = (
137
+ [aten__int_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
138
+ )
139
+ if m * n != 0 and use_triton_template(layout, enable_int32=True):
140
+ # TODO: Re-enable eager mode implementation once cuBLAS is fixed
141
+ choices = []
142
+ for config in int8_mm_configs(m, n, k):
143
+ mm_template.maybe_append_choice(
144
+ choices,
145
+ (mat1, mat2),
146
+ layout,
147
+ **mm_options(config, k, layout),
148
+ )
149
+ return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout)
150
+
151
+
152
+ @register_lowering(aten.addmm)
153
+ def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
154
+ ordered_kwargs_for_cpp_kernel = ("beta", "alpha")
155
+
156
+ m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout)
157
+ if m * n == 0 or not use_triton_template(layout):
158
+ choices = (
159
+ [
160
+ aten_addmm.bind(
161
+ (inp, mat1, mat2),
162
+ layout,
163
+ ordered_kwargs_for_cpp_kernel,
164
+ alpha=alpha,
165
+ beta=beta,
166
+ )
167
+ ]
168
+ if use_aten_gemm_kernels()
169
+ else []
170
+ )
171
+ return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout)
172
+
173
+ choices = (
174
+ [
175
+ aten_addmm.bind(
176
+ (inp_expanded, mat1, mat2),
177
+ layout,
178
+ ordered_kwargs_for_cpp_kernel,
179
+ alpha=alpha,
180
+ beta=beta,
181
+ )
182
+ ]
183
+ if use_aten_gemm_kernels()
184
+ else []
185
+ )
186
+ if (
187
+ inp_expanded.get_stride()[0] == 0
188
+ and inp_expanded.get_device().type == "cuda"
189
+ and inductor_config.triton.autotune_cublasLt
190
+ ):
191
+ # unexpand inp to make sure fused addmm from cublasLt is used
192
+ choices.insert(
193
+ 0,
194
+ aten_bias_addmm.bind(
195
+ (inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta
196
+ ),
197
+ )
198
+
199
+ for config in mm_configs(m, n, k):
200
+ mm_template.maybe_append_choice(
201
+ choices,
202
+ (inp_expanded, mat1, mat2),
203
+ layout,
204
+ **mm_options(config, k, layout),
205
+ prefix_args=1,
206
+ epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
207
+ )
208
+
209
+ return autotune_select_algorithm(
210
+ "addmm", choices, [inp_expanded, mat1, mat2], layout
211
+ )
212
+
213
+
214
+ def fallback_mixed_mm(mat1, mat2, *, out):
215
+ return torch.mm(mat1, mat2.to(mat1.dtype), out=out)
216
+
217
+
218
+ aten_fallback_mixed_mm = ExternKernelChoice(fallback_mixed_mm, None)
219
+
220
+
221
+ def tuned_mixed_mm(mat1, mat2, mat2_dtype):
222
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None)
223
+ choices = [aten_fallback_mixed_mm.bind((mat1, mat2), layout)]
224
+ if mat1.layout.dtype != torch.float32 and not mat2.layout.is_contiguous():
225
+ # can't use triton kernel unless one of these is true
226
+ return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
227
+ if inductor_config.force_mixed_mm:
228
+ choices = []
229
+ b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "")
230
+ has_int8_tensor = _is_int8_mat(mat1) or _is_int8_mat(mat2)
231
+ for config in mm_configs(m, n, k, has_int8_tensor=has_int8_tensor):
232
+ mm_template.maybe_append_choice(
233
+ choices,
234
+ (mat1, mat2),
235
+ layout,
236
+ **mm_options(config, k, layout, b_prologue_cast_type),
237
+ )
238
+ return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ from typing import cast, List, Tuple
4
+
5
+ import sympy
6
+
7
+ import torch
8
+ from torch._inductor.select_algorithm import realize_inputs
9
+ from torch._inductor.virtualized import V
10
+ from ..utils import ceildiv as cdiv, next_power_of_2
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ def triton_config(num_stages, num_warps, **kwargs):
16
+ from triton import Config
17
+
18
+ return Config(kwargs, num_stages=num_stages, num_warps=num_warps)
19
+
20
+
21
+ def filtered_configs(
22
+ m: int,
23
+ n: int,
24
+ k: int,
25
+ configs: List[Tuple[int, int, int, int, int]],
26
+ has_int8_tensor=False,
27
+ ):
28
+ """Heuristic to shrink configs when they are bigger than the input size"""
29
+
30
+ # According to https://github.com/openai/triton/issues/2156#issuecomment-1695897424
31
+ # it's safer to use at least [32, 32] block size for int8/uint8
32
+ # tensors
33
+ min_block_size = 32 if has_int8_tensor else 16
34
+ m = max(next_power_of_2(V.graph.sizevars.size_hint(m)), min_block_size)
35
+ n = max(next_power_of_2(V.graph.sizevars.size_hint(n)), min_block_size)
36
+ k = max(next_power_of_2(V.graph.sizevars.size_hint(k)), min_block_size)
37
+ used = set()
38
+ for block_m, block_n, block_k, num_stages, num_warps in configs:
39
+ # shrink configs for small sizes
40
+ block_m = max(min(block_m, m), min_block_size)
41
+ block_n = max(min(block_n, n), min_block_size)
42
+ block_k = max(min(block_k, k), min_block_size)
43
+ # each warp computes 16x16 tile = 256
44
+ num_warps = min(num_warps, block_m * block_n // 256)
45
+ if (block_m, block_n, block_k, num_stages, num_warps) not in used:
46
+ used.add((block_m, block_n, block_k, num_stages, num_warps))
47
+ yield triton_config(
48
+ BLOCK_M=block_m,
49
+ BLOCK_N=block_n,
50
+ BLOCK_K=block_k,
51
+ num_stages=num_stages,
52
+ num_warps=num_warps,
53
+ )
54
+
55
+
56
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
57
+ # will be utilised on the target platform
58
+ mm_kernel_configs = [
59
+ # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
60
+ {"config": (64, 64, 32, 2, 4), "cond": True},
61
+ {"config": (64, 128, 32, 3, 4), "cond": True},
62
+ {"config": (128, 64, 32, 3, 4), "cond": True},
63
+ {"config": (64, 128, 32, 4, 8), "cond": True},
64
+ {"config": (128, 64, 32, 4, 8), "cond": True},
65
+ {"config": (64, 32, 32, 5, 8), "cond": True},
66
+ {"config": (32, 64, 32, 5, 8), "cond": True},
67
+ {"config": (128, 128, 32, 2, 8), "cond": True},
68
+ {"config": (64, 64, 64, 3, 8), "cond": True},
69
+ {"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
70
+ {"config": (64, 64, 16, 2, 4), "cond": True},
71
+ {"config": (32, 32, 16, 1, 2), "cond": True},
72
+ ]
73
+
74
+ int8_mm_kernel_configs = [
75
+ {"config": (64, 64, 32, 2, 4), "cond": True},
76
+ {"config": (64, 128, 32, 3, 4), "cond": True},
77
+ {"config": (128, 64, 32, 3, 4), "cond": True},
78
+ {"config": (64, 128, 32, 4, 8), "cond": True},
79
+ {"config": (128, 64, 32, 4, 8), "cond": True},
80
+ {"config": (64, 32, 32, 5, 8), "cond": True},
81
+ {"config": (32, 64, 32, 5, 8), "cond": True},
82
+ {"config": (128, 128, 32, 2, 8), "cond": True},
83
+ {"config": (64, 64, 64, 3, 8), "cond": True},
84
+ # {"config": (32, 32, 128, 2, 4), "cond": True},
85
+ # {"config": (64, 64, 16, 2, 4), "cond": True},
86
+ # {"config": (32, 32, 16, 1, 2), "cond": True},
87
+ {"config": (128, 256, 128, 3, 8), "cond": torch.version.hip is None},
88
+ {"config": (256, 128, 128, 3, 8), "cond": torch.version.hip is None},
89
+ ]
90
+
91
+ # Create filtered list of configs based on cond evaluation
92
+
93
+
94
+ mm_platform_configs = tuple(
95
+ cast(Tuple[int, int, int, int, int], config["config"])
96
+ for config in mm_kernel_configs
97
+ if config["cond"]
98
+ )
99
+ int8_platform_configs = tuple(
100
+ cast(Tuple[int, int, int, int, int], config["config"])
101
+ for config in int8_mm_kernel_configs
102
+ if config["cond"]
103
+ )
104
+
105
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
106
+ if torch.version.hip:
107
+ mm_platform_configs = tuple(
108
+ (config[0], config[1], config[2], 1, config[4])
109
+ for config in mm_platform_configs
110
+ )
111
+ int8_platform_configs = tuple(
112
+ (config[0], config[1], config[2], 1, config[4])
113
+ for config in mm_platform_configs
114
+ )
115
+
116
+ mm_configs = functools.partial(
117
+ filtered_configs,
118
+ configs=mm_platform_configs,
119
+ )
120
+
121
+ int8_mm_configs = functools.partial(
122
+ filtered_configs,
123
+ configs=int8_platform_configs,
124
+ )
125
+
126
+
127
+ def mm_grid(m, n, meta):
128
+ """
129
+ The CUDA grid size for matmul triton templates.
130
+ """
131
+ return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), 1, 1)
132
+
133
+
134
+ def acc_type(dtype):
135
+ if dtype in (torch.float16, torch.bfloat16):
136
+ return "tl.float32"
137
+ return f"tl.{dtype}".replace("torch.", "")
138
+
139
+
140
+ def mm_options(config, sym_k, layout, b_prologue_cast_type=None):
141
+ """
142
+ Common options to matmul triton templates.
143
+ """
144
+ even_k_symbolic = (
145
+ # it isn't worth guarding on this
146
+ sympy.gcd(sym_k, config.kwargs["BLOCK_K"])
147
+ == config.kwargs["BLOCK_K"]
148
+ )
149
+ return dict(
150
+ GROUP_M=8,
151
+ EVEN_K=even_k_symbolic,
152
+ ALLOW_TF32=torch.backends.cuda.matmul.allow_tf32,
153
+ ACC_TYPE=acc_type(layout.dtype),
154
+ B_PROLOGUE_CAST_TYPE=b_prologue_cast_type,
155
+ num_stages=config.num_stages,
156
+ num_warps=config.num_warps,
157
+ **config.kwargs,
158
+ )
159
+
160
+
161
+ def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False):
162
+ """
163
+ Common arg processing for mm,bmm,addmm,etc
164
+ """
165
+ mat1, mat2 = realize_inputs(mat1, mat2)
166
+ *b1, m, k1 = mat1.get_size()
167
+ *b2, k2, n = mat2.get_size()
168
+ b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)]
169
+ if use_4x2_dim:
170
+ k2 = k2 * 2
171
+ k = V.graph.sizevars.guard_equals(k1, k2)
172
+ if layout is None:
173
+ from torch._inductor.ir import FixedLayout
174
+
175
+ if out_dtype is None:
176
+ out_dtype = mat1.get_dtype()
177
+ layout = FixedLayout(
178
+ mat1.get_device(),
179
+ out_dtype,
180
+ [*b, m, n],
181
+ )
182
+ else:
183
+ assert out_dtype is None, "out_dtype is ignored if layout is specified."
184
+
185
+ from ..lowering import expand
186
+
187
+ others = [realize_inputs(expand(x, layout.size)) for x in others]
188
+
189
+ return [m, n, k, layout, mat1, mat2, *others]
190
+
191
+
192
+ def addmm_epilogue(dtype, alpha, beta):
193
+ def epilogue(acc, bias):
194
+ if alpha != 1:
195
+ acc = V.ops.mul(acc, V.ops.constant(alpha, dtype)) # type: ignore[attr-defined]
196
+ if beta != 1:
197
+ bias = V.ops.mul(bias, V.ops.constant(beta, dtype)) # type: ignore[attr-defined]
198
+ return V.ops.add(acc, bias) # type: ignore[attr-defined]
199
+
200
+ return epilogue
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/mm_plus_mm.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+
5
+ from ..lowering import lowerings
6
+ from ..select_algorithm import (
7
+ autotune_select_algorithm,
8
+ ExternKernelChoice,
9
+ TritonTemplate,
10
+ )
11
+ from ..utils import use_aten_gemm_kernels, use_triton_template
12
+ from ..virtualized import V
13
+ from .mm_common import mm_args, mm_grid, mm_options
14
+
15
+ aten = torch.ops.aten
16
+
17
+ aten_mm_plus_mm = ExternKernelChoice(
18
+ torch.ops.inductor._mm_plus_mm, "torch::inductor::_mm_plus_mm"
19
+ )
20
+
21
+ mm_plus_mm_template = TritonTemplate(
22
+ name="mm_plus_mm",
23
+ grid=mm_grid,
24
+ debug=False,
25
+ source=r"""
26
+ {{def_kernel("A", "B", "C", "D")}}
27
+ M = {{size("A", 0)}}
28
+ N = {{size("B", 1)}}
29
+ K1 = {{size("A", 1)}}
30
+ if M * N == 0:
31
+ # early exit due to zero-size input(s)
32
+ return
33
+ # K2 = {{size("C", 1)}}
34
+ stride_am = {{stride("A", 0)}}
35
+ stride_ak = {{stride("A", 1)}}
36
+ stride_bk = {{stride("B", 0)}}
37
+ stride_bn = {{stride("B", 1)}}
38
+ stride_cm = {{stride("C", 0)}}
39
+ stride_ck = {{stride("C", 1)}}
40
+ stride_dk = {{stride("D", 0)}}
41
+ stride_dn = {{stride("D", 1)}}
42
+
43
+ # based on triton.ops.matmul
44
+ pid = tl.program_id(0)
45
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
46
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
47
+
48
+ # re-order program ID for better L2 performance
49
+ width = GROUP_M * grid_n
50
+ group_id = pid // width
51
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
52
+ pid_m = group_id * GROUP_M + (pid % group_size)
53
+ pid_n = (pid % width) // (group_size)
54
+
55
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
56
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
57
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
58
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
59
+ rk = tl.arange(0, BLOCK_K)
60
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
61
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
62
+ C = C + (ram[:, None] * stride_cm + rk[None, :] * stride_ck)
63
+ D = D + (rk[:, None] * stride_dk + rbn[None, :] * stride_dn)
64
+
65
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
66
+ for k1 in range(K1, 0, -BLOCK_K):
67
+ # First matmul with A @ B
68
+ if EVEN_K:
69
+ a = tl.load(A)
70
+ b = tl.load(B)
71
+ else:
72
+ a = tl.load(A, mask=rk[None, :] < k1, other=0.)
73
+ b = tl.load(B, mask=rk[:, None] < k1, other=0.)
74
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
75
+ A += BLOCK_K * stride_ak
76
+ B += BLOCK_K * stride_bk
77
+
78
+ for k2 in range(K1, 0, -BLOCK_K):
79
+
80
+ # Second matmul with C @ D
81
+ if EVEN_K:
82
+ c = tl.load(C)
83
+ d = tl.load(D)
84
+ else:
85
+ c = tl.load(C, mask=rk[None, :] < k2, other=0.)
86
+ d = tl.load(D, mask=rk[:, None] < k2, other=0.)
87
+ acc += tl.dot(c, d, allow_tf32=ALLOW_TF32)
88
+ C += BLOCK_K * stride_ck
89
+ D += BLOCK_K * stride_dk
90
+
91
+
92
+ idx_m = rm[:, None]
93
+ idx_n = rn[None, :]
94
+ mask = (idx_m < M) & (idx_n < N)
95
+
96
+ # inductor generates a suffix
97
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
98
+ """,
99
+ )
100
+
101
+
102
+ @functools.lru_cache(None)
103
+ def mm_configs():
104
+ import triton
105
+
106
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
107
+ # will be utilised on the target platform
108
+ mm_triton_configs = [
109
+ {
110
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
111
+ "num_stages": 2,
112
+ "num_warps": 4,
113
+ "cond": True,
114
+ },
115
+ {
116
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
117
+ "num_stages": 3,
118
+ "num_warps": 8,
119
+ "cond": True,
120
+ },
121
+ {
122
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
123
+ "num_stages": 4,
124
+ "num_warps": 16,
125
+ "cond": True,
126
+ },
127
+ {
128
+ "config": {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32},
129
+ "num_stages": 4,
130
+ "num_warps": 8,
131
+ "cond": True,
132
+ },
133
+ {
134
+ "config": {"BLOCK_M": 32, "BLOCK_N": 64, "BLOCK_K": 32},
135
+ "num_stages": 4,
136
+ "num_warps": 8,
137
+ "cond": True,
138
+ },
139
+ {
140
+ "config": {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32},
141
+ "num_stages": 1,
142
+ "num_warps": 8,
143
+ "cond": True,
144
+ },
145
+ {
146
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 64},
147
+ "num_stages": 1,
148
+ "num_warps": 8,
149
+ "cond": True,
150
+ },
151
+ {
152
+ "config": {"BLOCK_M": 32, "BLOCK_N": 32, "BLOCK_K": 128},
153
+ "num_stages": 1,
154
+ "num_warps": 8,
155
+ "cond": torch.version.hip is None,
156
+ },
157
+ {
158
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 16},
159
+ "num_stages": 2,
160
+ "num_warps": 4,
161
+ "cond": True,
162
+ },
163
+ {
164
+ "config": {"BLOCK_M": 32, "BLOCK_N": 32, "BLOCK_K": 16},
165
+ "num_stages": 1,
166
+ "num_warps": 2,
167
+ "cond": True,
168
+ },
169
+ ]
170
+
171
+ # Filter out configs in which cond evaluates to true
172
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
173
+ if torch.version.hip:
174
+ filtered_configs = [
175
+ triton.Config(c["config"], num_stages=1, num_warps=c["num_warps"])
176
+ for c in mm_triton_configs
177
+ if c["cond"]
178
+ ]
179
+ else:
180
+ filtered_configs = [
181
+ triton.Config(
182
+ c["config"], num_stages=c["num_stages"], num_warps=c["num_warps"]
183
+ )
184
+ for c in mm_triton_configs
185
+ if c["cond"]
186
+ ]
187
+
188
+ return filtered_configs
189
+
190
+
191
+ def tuned_mm_plus_mm(mat1, mat2, mat3, mat4, *, layout=None):
192
+ """
193
+ Computes mm(mat1, mat2) + mm(mat3, mat4)
194
+ """
195
+ m1, n1, k1, layout1, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
196
+ m2, n2, _, layout2, mat3, mat4 = mm_args(mat3, mat4, layout=layout)
197
+ # Optimization is optional, because we can always just not do the fusion
198
+ if (
199
+ m1 * n1 == 0
200
+ or m2 * n2 == 0
201
+ or not V.graph.sizevars.statically_known_list_equals(
202
+ mat1.get_size(), mat3.get_size()
203
+ )
204
+ or not V.graph.sizevars.statically_known_list_equals(
205
+ mat2.get_size(), mat4.get_size()
206
+ )
207
+ ):
208
+ # TODO(jansel): support different K values when this is fixed:
209
+ # https://github.com/openai/triton/issues/967
210
+ if m1 == m2 and n1 == n2:
211
+ V.graph.sizevars.guard_equals(m1, m2)
212
+ V.graph.sizevars.guard_equals(n1, n2)
213
+ return lowerings[aten.addmm](lowerings[aten.mm](mat3, mat4), mat1, mat2)
214
+ return lowerings[aten.add](
215
+ lowerings[aten.mm](mat1, mat2), lowerings[aten.mm](mat3, mat4)
216
+ )
217
+
218
+ assert layout1 == layout2
219
+ # options to tune from
220
+ choices = (
221
+ [aten_mm_plus_mm.bind((mat1, mat2, mat3, mat4), layout1)]
222
+ if use_aten_gemm_kernels()
223
+ else []
224
+ )
225
+ if use_triton_template(layout1):
226
+ for config in mm_configs():
227
+ # see https://github.com/openai/triton/issues/1298
228
+ # BLOCK_K = K causes llvm error
229
+ if config.kwargs["BLOCK_K"] < k1:
230
+ mm_plus_mm_template.maybe_append_choice(
231
+ choices,
232
+ (mat1, mat2, mat3, mat4),
233
+ layout1,
234
+ **mm_options(config, k1, layout1),
235
+ )
236
+
237
+ return autotune_select_algorithm(
238
+ "mm_plus_mm", choices, [mat1, mat2, mat3, mat4], layout1
239
+ )
llava_next/lib/python3.10/site-packages/torch/_inductor/kernel/unpack_mixed_mm.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import List
3
+
4
+ from ..select_algorithm import autotune_select_algorithm, ChoiceCaller, TritonTemplate
5
+ from .mm_common import mm_args, mm_configs, mm_grid, mm_options
6
+
7
+ log = logging.getLogger(__name__)
8
+
9
+ uint4x2_mixed_mm_template = TritonTemplate(
10
+ name="uint4x2_mixed_mm",
11
+ grid=mm_grid,
12
+ source=r"""
13
+ {{def_kernel("A", "B")}}
14
+ M = {{size("A", 0)}}
15
+ N = {{size("B", 1)}}
16
+ K = {{size("A", 1)}}
17
+ stride_am = {{stride("A", 0)}}
18
+ stride_ak = {{stride("A", 1)}}
19
+ stride_bk = {{stride("B", 0)}}
20
+ stride_bn = {{stride("B", 1)}}
21
+
22
+ # based on triton.ops.matmul
23
+ pid = tl.program_id(0)
24
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
25
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
26
+
27
+ # re-order program ID for better L2 performance
28
+ width = GROUP_M * grid_n
29
+ group_id = pid // width
30
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
31
+ pid_m = group_id * GROUP_M + (pid % group_size)
32
+ pid_n = (pid % width) // (group_size)
33
+
34
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
35
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
36
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
37
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
38
+ rk = tl.arange(0, BLOCK_K)
39
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
40
+ B = B + (rk[:, None]//2 * stride_bk + rbn[None, :] * stride_bn)
41
+ b_shifts = 4*(rk%2)
42
+ b_subs = 8*(1-(rk%2))
43
+
44
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
45
+ for k in range(K, 0, -BLOCK_K):
46
+ if EVEN_K:
47
+ a = tl.load(A)
48
+ b = tl.load(B)
49
+ else:
50
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
51
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
52
+ b = ((b >> b_shifts[:, None]) & 0xF) - 8
53
+ b = b.to(B_PROLOGUE_CAST_TYPE)
54
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
55
+ A += BLOCK_K * stride_ak
56
+ B += BLOCK_K//2 * stride_bk
57
+
58
+ # rematerialize rm and rn to save registers
59
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
60
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
61
+ idx_m = rm[:, None]
62
+ idx_n = rn[None, :]
63
+ mask = (idx_m < M) & (idx_n < N)
64
+
65
+ # inductor generates a suffix
66
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
67
+ """,
68
+ )
69
+
70
+
71
+ def tuned_uint4x2_mixed_mm(mat1, mat2, mat2_mm_shape, mat2_dtype):
72
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None, use_4x2_dim=True)
73
+ choices: List[ChoiceCaller] = []
74
+ b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "")
75
+ for config in mm_configs(m, n, k):
76
+ uint4x2_mixed_mm_template.maybe_append_choice(
77
+ choices,
78
+ (mat1, mat2),
79
+ layout,
80
+ **mm_options(config, k, layout, b_prologue_cast_type),
81
+ )
82
+ return autotune_select_algorithm("uint4x2_mixed_mm", choices, [mat1, mat2], layout)
llava_next/lib/python3.10/site-packages/torch/_lazy/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+
3
+ import torch._C._lazy
4
+ from torch.utils._pytree import tree_flatten, tree_unflatten
5
+
6
+ from .closure import add_step_closure, run_step_closures
7
+
8
+
9
+ def mark_step(device: str = "", wait=False):
10
+ """Triggers a mark step, which amounts to
11
+ - collecting a group of 'live' lazy tensors to index into the compilation cache
12
+ (lowering/compiling their IR graphs if not cached)
13
+ - kicking off execution of the compiled function
14
+ - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
15
+ """
16
+ # TODO(whc) expand this to include backend hooks and align with XLA backend needs
17
+ torch._C._lazy._mark_step(device, [], wait=wait)
18
+
19
+ run_step_closures()
20
+
21
+
22
+ def wait_device_ops(devices=None):
23
+ """Waits for all the async operations on the given devices to complete.
24
+ Args:
25
+ devices (string..., optional): The devices whose async ops need to be waited
26
+ for. If empty, all the local devices will be waited for.
27
+ """
28
+ if devices is None:
29
+ devices = []
30
+ torch._C._lazy._wait_device_ops(devices=devices)
31
+
32
+
33
+ def sync_multi(tensors, devices):
34
+ """
35
+ Sync the list of lazy tensors so there IR get lowered for the activate backend
36
+ and the compiled computation graph get cached.
37
+ """
38
+ torch._C._lazy._sync_multi(tensors, devices)
39
+
40
+
41
+ def get_tensor_id(tensor):
42
+ """Return a unique id of the lazy tensor maintained by LTC"""
43
+ return torch._C._lazy._get_tensor_id(tensor)
44
+
45
+
46
+ def to_cpu(tensors, devices=None):
47
+ devices = devices or ["lazy"]
48
+
49
+ flattened, spec = tree_flatten(tensors)
50
+ sync_multi(flattened, devices)
51
+ return tree_unflatten([t.to("cpu") for t in flattened], spec)
52
+
53
+
54
+ def save(tensors, *args, **kwargs):
55
+ torch.save(to_cpu(tensors), *args, **kwargs)
llava_next/lib/python3.10/site-packages/torch/_lazy/closure.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+ from queue import Empty as EmptyQueue, Queue
4
+
5
+ from torch._lazy.device_context import get_device_context
6
+
7
+
8
+ class ClosureHandler:
9
+ def __init__(self):
10
+ pass
11
+
12
+ def run(self, closure):
13
+ """Run closure function
14
+
15
+ Args:
16
+ closure: callable function to run
17
+ """
18
+ closure()
19
+
20
+ def __call__(self, closures):
21
+ for closure in closures:
22
+ self.run(closure)
23
+
24
+
25
+ class AsyncClosureHandler(ClosureHandler):
26
+ """Handler for Asynchronous Step Closures
27
+ Args:
28
+ max_queue_size: The maximum length of the closure queue after which
29
+ the training loop will block until closures are evaluated.
30
+ By default, a reasonable limit of a maximum of 100 on the queue.
31
+ This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment
32
+ variable.
33
+ """
34
+
35
+ def __init__(self, max_queue_size=100):
36
+ super().__init__()
37
+ self._closure_queue: Queue = Queue(
38
+ int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size))
39
+ )
40
+ self._closure_exception: Queue = Queue()
41
+ self._closure_lock = threading.Lock()
42
+ self._closure_event_loop_finished = threading.Event()
43
+ self._closure_event_loop = None
44
+
45
+ def start_event_loop(self):
46
+ """Start closure event loop if not started"""
47
+ if self._closure_event_loop is None:
48
+
49
+ def event_loop():
50
+ # Run loop until closure event is set and closure queue is empty
51
+ while True:
52
+ try:
53
+ closure = self._closure_queue.get(block=True, timeout=3)
54
+ closure()
55
+ self._closure_queue.task_done()
56
+ except EmptyQueue:
57
+ with self._closure_lock:
58
+ if self._closure_queue.empty():
59
+ self._closure_event_loop_finished.set()
60
+ return
61
+ except Exception as e:
62
+ self._closure_exception.put(e)
63
+ return
64
+
65
+ self._closure_event_loop = threading.Thread(target=event_loop)
66
+ self._closure_event_loop.start()
67
+
68
+ def run(self, closure):
69
+ with self._closure_lock:
70
+ self._closure_queue.put(closure, block=True)
71
+ if (
72
+ self._closure_event_loop is None
73
+ or not self._closure_event_loop.is_alive()
74
+ ):
75
+ try:
76
+ e = self._closure_exception.get(block=False)
77
+ raise RuntimeError(
78
+ "Cannot run asynchronous closure due to previously raised exception"
79
+ ) from e
80
+ except EmptyQueue:
81
+ self._closure_event_loop = None
82
+ self.start_event_loop()
83
+
84
+
85
+ def add_step_closure(closure, args=(), run_async=False):
86
+ """Adds a closure to the list of the ones to be run at the end of the step.
87
+ Many times during model training there is the need to print/report (print to
88
+ console, post to tensorboard, etc...) information which require the content of
89
+ intermediary tensors to be inspected.
90
+ Inspecting different tensors content in different points of the model code
91
+ requires many executions and typically causes performance issues.
92
+ Adding a step closure will ensure that it will be run after the barrier, when
93
+ all the live tensors will be already materialized to device data.
94
+ Live tensors which will include the ones captured by the closure arguments.
95
+ So using `add_step_closure()` will ensure a single execution will be
96
+ performed, even when multiple closures are queued, requiring multiple tensors
97
+ to be inspected.
98
+ Step closures will be run sequentially in the order they have been queued.
99
+ Note that even though using this API the execution will be optimized, it is
100
+ advised to throttle the printing/reporting events once every N steps.
101
+ Args:
102
+ closure (callable): The function to be called.
103
+ args (tuple): The arguments to be passed to the closure.
104
+ run_async: If True, run the closure asynchronously.
105
+ """
106
+ devctx = get_device_context()
107
+ closures_type = "async_step_closures" if run_async else "step_closures"
108
+ step_closures = getattr(devctx, closures_type, None)
109
+ if step_closures is None:
110
+ step_closures = []
111
+ setattr(devctx, closures_type, step_closures)
112
+ step_closures.append(lambda a=args: closure(*a))
113
+
114
+
115
+ def run_step_closures():
116
+ devctx = get_device_context()
117
+ async_step_closures = getattr(devctx, "async_step_closures", None)
118
+ if async_step_closures is not None:
119
+ devctx.async_step_closures = []
120
+ async_closure_handler = getattr(devctx, "async_closure_handler", None)
121
+ if async_closure_handler is None:
122
+ async_closure_handler = AsyncClosureHandler()
123
+ devctx.async_closure_handler = async_closure_handler
124
+ async_closure_handler(async_step_closures)
125
+
126
+ step_closures = getattr(devctx, "step_closures", None)
127
+ if step_closures is not None:
128
+ devctx.step_closures = []
129
+ closure_handler = getattr(devctx, "closure_handler", None)
130
+ if closure_handler is None:
131
+ closure_handler = ClosureHandler()
132
+ devctx.closure_handler = closure_handler
133
+ closure_handler(step_closures)
134
+ return devctx
llava_next/lib/python3.10/site-packages/torch/_lazy/computation.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+ import torch._C._lazy_ts_backend
3
+
4
+
5
+ def get_tensors_ts_device_data_node(tensors):
6
+ """Return tensor ids and eager tensors for DeviceData nodes in the
7
+ IR for the passed in lazy tensors.
8
+
9
+ TODO: This API is currently ts backend specific. We are working on
10
+ generalizing it to all backends including XLA.
11
+ """
12
+ return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors)
13
+
14
+
15
+ def get_graph_hash(tensors):
16
+ """Return the graph hash for the passed in lazy tensors"""
17
+ return torch._C._lazy._get_graph_hash(tensors)
18
+
19
+
20
+ def run_cached_graph(hash_str, graph_inputs):
21
+ """Running the cached computation graph with the given inputs
22
+
23
+ TODO: This API is currently ts backend specific. We are working on
24
+ generalizing it to all backends including XLA.
25
+ """
26
+ return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs)
llava_next/lib/python3.10/site-packages/torch/_lazy/config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def get_force_fallback():
5
+ """Get the config used to force LTC fallback"""
6
+ return torch._C._lazy._get_force_fallback()
7
+
8
+
9
+ def set_force_fallback(configval):
10
+ """Set the config used to force LTC fallback"""
11
+ torch._C._lazy._set_force_fallback(configval)
12
+
13
+
14
+ def set_reuse_ir(val: bool):
15
+ """Set the config to reuse IR nodes for faster tracing"""
16
+ torch._C._lazy._set_reuse_ir(val)
llava_next/lib/python3.10/site-packages/torch/_lazy/device_context.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from typing import Any, Dict
3
+
4
+ import torch._C._lazy
5
+
6
+
7
+ class DeviceContext:
8
+ _CONTEXTS: Dict[str, Any] = dict()
9
+ _CONTEXTS_LOCK = threading.Lock()
10
+
11
+ def __init__(self, device):
12
+ self.device = device
13
+
14
+
15
+ def get_device_context(device=None):
16
+ if device is None:
17
+ device = torch._C._lazy._get_default_device_type()
18
+ else:
19
+ device = str(device)
20
+ with DeviceContext._CONTEXTS_LOCK:
21
+ devctx = DeviceContext._CONTEXTS.get(device, None)
22
+ if devctx is None:
23
+ devctx = DeviceContext(device)
24
+ DeviceContext._CONTEXTS[device] = devctx
25
+ return devctx
llava_next/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import itertools
4
+ import os
5
+ from typing import Any, Callable, Dict, List
6
+
7
+ import torch
8
+ import torch._lazy as lazy
9
+ import torch._lazy.metrics as metrics
10
+ from torch import fx
11
+ from torch._lazy import computation, debug as lazy_debug
12
+ from torch._lazy.tensor_factory_functions import tensor_factory_functions
13
+
14
+ debug = os.environ.get("debug_extract_compiled_graph") is not None
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class GraphInputMatcher:
19
+ """
20
+ The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing.
21
+ Specifically, those graph inputs corresponding to method parameters should be replaced with the
22
+ arguments for the current call.
23
+
24
+ tensor_id_to_arg_idx maps the tensor id to the parameter index.
25
+ graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the
26
+ TS/XLA graph inputs.
27
+ """
28
+
29
+ tensor_id_to_arg_idx: Dict[int, int]
30
+ graph_input_tensor_ids: List[int]
31
+ # there are 2 categories of graph_input_tensors.
32
+ # Category 1: those whose id are not found in tensor_id_to_arg_idx. These are
33
+ # most likely const tensors and we can get its content from graph_input_tensors
34
+ # Category 2: those whose id are found in tensor_id_to_arg_idx. We should get
35
+ # the tensor from method arguments
36
+ graph_input_ivalues: List[Any]
37
+
38
+ # get the real graph input tensors
39
+ def __call__(self, args):
40
+ real_input = []
41
+ for tensor_id, traced_ivalue in zip(
42
+ self.graph_input_tensor_ids, self.graph_input_ivalues
43
+ ):
44
+ arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)
45
+ if arg_idx is None:
46
+ inp = traced_ivalue
47
+ else:
48
+ inp = args[arg_idx]
49
+ real_input.append(inp)
50
+ return real_input
51
+
52
+
53
+ class ReturnValueHandler:
54
+ r"""
55
+ When ltc_sync_multi is called on multi tensors, the compiled graph
56
+ will contain output only for unique tensors - if a tensor appears multiple
57
+ times in the input to _ltc_sync_multi, only the first occurance matters.
58
+
59
+ However from python level, we still expect multi tensors returned with duplciation
60
+ even if the TS graph dedup the output. e.g. for method:
61
+
62
+ def forward(self, a):
63
+ return a, a
64
+
65
+ the TS graph captured by LTC will return a single tensor, but Python method expects 2.
66
+
67
+ This class dedup the lazy tensors first to get the index that will be used
68
+ to duplicate the eager tensors later.
69
+ """
70
+
71
+ def __init__(self, lazy_out_list):
72
+ self.index: List[List[int]] = []
73
+ self.total_count = len(lazy_out_list)
74
+
75
+ tensor_id_to_idx: Dict[int, int] = {}
76
+ for dup_idx, lazy_tensor in enumerate(lazy_out_list):
77
+ uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)
78
+ if uniq_idx is not None:
79
+ self.index[uniq_idx].append(dup_idx)
80
+ else:
81
+ uniq_idx = len(self.index)
82
+ self.index.append([dup_idx])
83
+ tensor_id_to_idx[id(lazy_tensor)] = uniq_idx
84
+
85
+ def duplicate_eager_tensors(self, eager_tensor_list):
86
+ duplicated_list = [None] * self.total_count
87
+ assert len(eager_tensor_list) == len(self.index)
88
+
89
+ for uniq_idx, eager_tensor in enumerate(eager_tensor_list):
90
+ for dup_idx in self.index[uniq_idx]:
91
+ duplicated_list[dup_idx] = eager_tensor
92
+ return duplicated_list
93
+
94
+
95
+ def force_lazy_device(model: fx.GraphModule):
96
+ """
97
+ Factory methods in a Fx graph may create tensors for a specific eager devices.
98
+ If we take no actions, those eager tensors will be mixed with lazy tensors and
99
+ cause crash. This method overwrite those eager device to lazy device.
100
+ """
101
+
102
+ def tolazydevice(dev):
103
+ if isinstance(dev, torch.device):
104
+ return torch.device("lazy", index=dev.index)
105
+ return dev
106
+
107
+ def hasDeviceArg(args, kwargs):
108
+ return any(
109
+ isinstance(arg, torch.device)
110
+ for arg in itertools.chain(args, kwargs.values())
111
+ )
112
+
113
+ for nd in model.graph.nodes:
114
+ nd.args = tuple(tolazydevice(arg) for arg in nd.args)
115
+ nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}
116
+
117
+ # For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return
118
+ # eager tensors on the default device
119
+ # (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove,
120
+ # and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart).
121
+ # To force those tensors on the lazy device, we can not simply override
122
+ # the device argument since there is no explicit device argument.
123
+ # What we are doing here is, for the list of covered tensor factory methods
124
+ # we add a lazy device argument explicity.
125
+ #
126
+ # TODO: This solution is no ideal since we may miss some factory methods. In future
127
+ # when we support lazy mode, this method can be replaced by that.
128
+ if nd.target in tensor_factory_functions and not hasDeviceArg(
129
+ nd.args, nd.kwargs
130
+ ):
131
+ kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy.
132
+ kwargs["device"] = torch.device("lazy")
133
+ nd.kwargs = kwargs
134
+
135
+ model.recompile()
136
+
137
+
138
+ def get_fallback_ops():
139
+ fallback_ops = []
140
+ for opname in metrics.counter_names():
141
+ if "aten::" not in opname:
142
+ continue
143
+ val = int(metrics.counter_value(opname))
144
+ if val > 0:
145
+ fallback_ops.append(f"{opname}={val}")
146
+
147
+ return fallback_ops
148
+
149
+
150
+ def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:
151
+ """
152
+ Optimize an eager model with LTC and returns a wrapper to execute the
153
+ compiled graph directly without retracing. It depends on other mechanisms
154
+ like TorchDynamo guards to guarantee the returned wrapper is only called
155
+ when it's safe.
156
+ """
157
+ lazy_args = [arg.to(device="lazy") for arg in example_inputs]
158
+ args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]
159
+ tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)}
160
+ lazy_model = copy.deepcopy(model).to(device=torch.device("lazy"))
161
+ force_lazy_device(lazy_model)
162
+
163
+ # This line executes lazy tracing and enable us extracting compiled graph later
164
+ metrics.reset()
165
+ lazy_out = lazy_model(*lazy_args)
166
+ fallback_ops = get_fallback_ops()
167
+ metrics.reset()
168
+
169
+ if len(fallback_ops) > 0:
170
+ raise RuntimeError(
171
+ f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}"
172
+ )
173
+
174
+ if not isinstance(lazy_out, (tuple, list)):
175
+ lazy_out = (lazy_out,)
176
+
177
+ args_and_out = tuple(lazy_args) + tuple(lazy_out)
178
+ return_value_handler = ReturnValueHandler(args_and_out)
179
+ if debug:
180
+ print("Fx code:\n", model.code)
181
+ print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text"))
182
+
183
+ # TODO: this part is TS backend specific for now and will be generalized to
184
+ # support XLA
185
+ (
186
+ graph_input_tensor_ids,
187
+ graph_input_ivalues,
188
+ ) = computation.get_tensors_ts_device_data_node(args_and_out)
189
+ assert len(graph_input_tensor_ids) == len(graph_input_ivalues)
190
+ graph_input_matcher = GraphInputMatcher(
191
+ tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues
192
+ )
193
+
194
+ graph_hash = computation.get_graph_hash(args_and_out)
195
+
196
+ if debug:
197
+ print("graph_hash", graph_hash)
198
+ print(f"args_tensor_ids {args_tensor_ids}")
199
+ print("tensor ids from device data:", graph_input_tensor_ids)
200
+
201
+ # sync the list of output tensors so the computation graph for these
202
+ # tensors will be cached. Those computation graphs can be retrieved
203
+ # by graph hash later.
204
+ lazy.sync_multi(args_and_out, [])
205
+
206
+ def optimized_mod(*args):
207
+ if len(args_and_out) == 0:
208
+ return ()
209
+ graph_input = graph_input_matcher(args)
210
+ res = return_value_handler.duplicate_eager_tensors(
211
+ computation.run_cached_graph(graph_hash, graph_input)
212
+ )
213
+
214
+ assert len(res) == len(args_and_out)
215
+ for i, arg in enumerate(args):
216
+ # only copy those tensors that get inplace updated
217
+ if arg is not res[i]:
218
+ arg.copy_(res[i])
219
+
220
+ # skip the args
221
+ return res[len(args) :]
222
+
223
+ return optimized_mod
llava_next/lib/python3.10/site-packages/torch/_lazy/metrics.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def reset():
5
+ """Resets all metric counters."""
6
+ torch._C._lazy._reset_metrics()
7
+
8
+
9
+ def counter_names():
10
+ """Retrieves all the currently active counter names."""
11
+ return torch._C._lazy._counter_names()
12
+
13
+
14
+ def counter_value(name: str):
15
+ """Return the value of the counter with the speficied name"""
16
+ return torch._C._lazy._counter_value(name)
17
+
18
+
19
+ def metrics_report():
20
+ """Return the combined (lazy core and backend) metric report"""
21
+ return torch._C._lazy._metrics_report()