File size: 6,452 Bytes
a3f2e4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from bitblas import tvm
from tvm import IRModule
from tvm.runtime import ndarray
from tvm.driver import lower
from tvm.target import Target
from typing import Tuple, List
from tvm import tir
from tvm import tl
from tvm.tl.engine import is_device_call


def get_annotated_device_mod_from_tl(mod: IRModule, target: Target) -> "IRModule":
    target_host = tvm.target.Target("llvm -keys=cpu")
    target = tvm.target.Target(target, target_host)
    mod = tir.transform.BindTarget(target)(mod)

    mod = tl.transform.FrontendLegalize()(mod)
    mod = tir.transform.Simplify()(mod)
    mod = tl.transform.LayoutInference()(mod)
    mod = tl.transform.LowerTileOp()(mod)
    mod = tir.transform.Simplify()(mod)

    if target.arch == "sm_90":
        mod = tl.transform.WarpSpecializedPipeline()(mod)
    else:
        mod = tir.transform.PlanAndUpdateBufferAllocationLocation()(mod)
        mod = tl.transform.PipelinePlanning()(mod)
        mod = tl.transform.InjectSoftwarePipeline()(mod)

    mod = tir.transform.LowerOpaqueBlock()(mod)
    mod = tir.transform.FlattenBuffer()(mod)
    mod = tir.transform.NarrowDataType(32)(mod)
    mod = tir.transform.Simplify()(mod)

    mod = tir.transform.VectorizeLoop()(mod)
    mod = tir.transform.StorageRewrite()(mod)
    mod = tir.transform.UnrollLoop()(mod)
    mod = tir.transform.RenormalizeSplitPattern()(mod)
    mod = tir.transform.Simplify()(mod)
    mod = tir.transform.RemoveNoOp()(mod)
    mod = tir.transform.RewriteUnsafeSelect()(mod)
    mod = tir.transform.HoistIfThenElse()(mod)

    mod = tir.transform.VerifyMemory()(mod)
    mod = tir.transform.AnnotateEntryFunc()(mod)
    mod = tir.transform.ThreadSync("shared")(mod)
    # TODO(lei): This is a hack to make sure the
    # thread level allreduce pass can be applied
    # in TL. As Tl only use one thread dimension
    # the var binding information will be lost
    # in the lowering process with Legalization
    # and Simplify pass.
    # We can find a way better to create var instead
    # of putting the LowerThreadAllreduce before
    # the Legalization.
    mod = tir.transform.LowerThreadAllreduce()(mod)
    mod = tir.transform.ThreadSync("shared.dyn")(mod)
    mod = tl.transform.LowerHopperIntrin()(mod)
    mod = tir.transform.InjectPTXAsyncCopy()(mod)

    mod = tir.transform.AnnotateDeviceRegions()(mod)
    mod = tir.transform.SplitHostDevice()(mod)
    mod = tir.transform.MergeSharedMemoryAllocations()(mod)
    mod = tir.transform.MakePackedAPI()(mod)
    mod = tir.transform.LowerDeviceKernelLaunch()(mod)

    device_mod = tir.transform.Filter(is_device_call)(mod)

    return device_mod


def get_annotated_device_mod_from_tir(mod: IRModule, target: Target) -> "IRModule":
    """
    Lower the given IRModule and create a device module for the specified target.

    Parameters:
    - mod: The input IRModule.
    - target: The compilation target.

    Returns:
    - A device module ready for execution.
    """
    input_mod = lower(mod)
    target_input_mod = {target: input_mod}
    annotated_mods = {}
    runtime = None
    target_host = None
    for tgt, mod in target_input_mod.items():
        if not isinstance(tgt, (str, Target)):
            raise ValueError("The key of inputs must be str or "
                             "Target when inputs is dict.")
        if not isinstance(mod, tvm.IRModule):
            raise ValueError("inputs must be Schedule, IRModule, "
                             "or dict of str to IRModule.")
        annotated_mods[tgt] = mod.with_attr("runtime", runtime)
    annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
    if not target_host:
        for tar, _ in annotated_mods.items():
            device_type = ndarray.device(tar.kind.name, 0).device_type
            if device_type == ndarray.cpu(0).device_type:
                target_host = tar
                break
    if not target_host:
        target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
    annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
    for target, mod in annotated_mods.items():
        mixed_mod_passes = tvm.get_global_func("driver.mixed_mod_passes")
        device_mod_passes = tvm.get_global_func("driver.device_mod_passes")
        mod = mixed_mod_passes(mod, target)(mod)
        device_mod = device_mod_passes(mod, target)(mod)
    return device_mod


def get_annotated_device_mod(mod: IRModule, target: Target, backend="tir") -> "IRModule":
    if backend == "tir":
        return get_annotated_device_mod_from_tir(mod, target)
    elif backend == "tl":
        return get_annotated_device_mod_from_tl(mod, target)
    else:
        raise ValueError("Unsupported backend: {}".format(backend))


def get_thread_block_information(mod: IRModule) -> Tuple[List[int], List[int]]:
    """
    Extracts the thread block and grid dimensions for the reduction block within a given IRModule.

    Parameters:
    - mod: The input IRModule from which to extract thread block and grid information.

    Returns:
    A tuple containing two lists:
    - The first list contains the dimensions of the thread block (threadIdx.x, threadIdx.y, threadIdx.z).
    - The second list contains the dimensions of the grid (blockIdx.x, blockIdx.y, blockIdx.z).
    """

    # Initialize the schedule from the IRModule
    sch = tvm.tir.Schedule(mod)

    # Get the root block and its child blocks
    root_block = sch.get_block("root")
    child_blocks = sch.get_child_blocks(root_block)

    # Initialize default block and grid dimensions (1, 1, 1)
    block_dims, grid_dims = [1, 1, 1], [1, 1, 1]

    for block in child_blocks:
        # Get the loops surrounding the main block
        loops = sch.get_loops(block)

        # Iterate over each loop to extract thread and block bindings
        for loop in loops:
            stmt = sch.get(loop)
            thread_binding = stmt.thread_binding
            extent = int(stmt.extent)

            # Skip loops without thread binding
            if thread_binding:
                if "threadIdx" in thread_binding.thread_tag:
                    block_dims["xyz".index(thread_binding.thread_tag[-1])] = extent
                elif "blockIdx" in thread_binding.thread_tag:
                    grid_dims["xyz".index(thread_binding.thread_tag[-1])] = extent

    return block_dims, grid_dims