id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
327,600
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.MMA
import torch.fx as fx from ..lang.wave_types import IndexMapping, Memory, Register from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields @define_op('mma') @dataclass class MMA(MMABase): lhs: fx.Node rhs: fx.Node acc: fx.Node mma_type: Optional['MMAType'] | 'GenericDot' = None @property def indexing_dims(self) -> list[IndexSymbol]: combined_dims = get_custom(self.lhs).indexing_dims + get_custom(self.rhs).indexing_dims + get_custom(self.acc).indexing_dims unique_dims = list(dict.fromkeys(combined_dims)) return unique_dims @property def lhs_type(self) -> Memory: return get_custom(self.lhs).type @property def rhs_type(self) -> Memory: return get_custom(self.rhs).type @property def acc_type(self) -> Memory: return get_custom(self.acc).type def infer_type(self, *args): self.type = self.acc_type def operand_index(self, operand_map: dict[IndexSymbol, int], shape: list[IndexExpr]) -> dict[IndexSymbol, IndexSequence]: from ..wave.utils.general_utils import infer_dim indices: dict[IndexSymbol, IndexSequence] = {} for dim_expr in shape: dim = infer_dim(dim_expr) indices[dim] = self.index[dim].subs(operand_map) return indices @property def lhs_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 1, MMA_RHS: 0, MMA_ACC: 0} return self.operand_index(operand_map, self.lhs_type.symbolic_shape) @property def rhs_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 1, MMA_ACC: 0} return self.operand_index(operand_map, self.rhs_type.symbolic_shape) @property def acc_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 0, MMA_ACC: 1} if self.acc_type is None: return None return self.operand_index(operand_map, self.acc_type.symbolic_shape) def custom_string(self, value_map: dict[str, str]) -> str: if self.index is None: return super().custom_string(value_map) custom_str = f'{self.tkw_op_name}(' custom_str += f'lhs={self.lhs} (index = {self.lhs_index}), ' custom_str += f'rhs={self.rhs} (index = {self.rhs_index}), ' custom_str += f'acc={self.acc} (index = {self.acc_index}))' custom_str += f' type({self.fx_node.type})' return custom_str @property def reduction_dim(self) -> IndexSymbol: if hasattr(self.fx_node, 'reduction_dim'): return self.fx_node.reduction_dim @reduction_dim.setter def reduction_dim(self, value: IndexSymbol): self.fx_node.reduction_dim = value
@define_op('mma') @dataclass class MMA(MMABase): @property def indexing_dims(self) -> list[IndexSymbol]: pass @property def lhs_type(self) -> Memory: pass @property def rhs_type(self) -> Memory: pass @property def acc_type(self) -> Memory: pass def infer_type(self, *args): pass def operand_index(self, operand_map: dict[IndexSymbol, int], shape: list[IndexExpr]) -> dict[IndexSymbol, IndexSequence]: pass @property def lhs_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def rhs_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def acc_index(self) -> dict[IndexSymbol, IndexSequence]: pass def custom_string(self, value_map: dict[str, str]) -> str: pass @property def reduction_dim(self) -> IndexSymbol: pass @reduction_dim.setter def reduction_dim(self) -> IndexSymbol: pass
24
0
4
0
4
0
1
0
1
7
2
0
12
1
12
71
77
13
64
36
39
0
49
25
35
2
6
1
16
327,601
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.MMABase
class MMABase(CustomOp): pass
class MMABase(CustomOp): pass
1
0
0
0
0
0
0
0
1
0
0
2
0
0
0
59
2
0
2
1
1
0
2
1
1
0
5
0
0
327,602
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.NestedRegionOp
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final class NestedRegionOp(CustomOp): def captured_vars(self, graph: fx.Graph) -> list[fx.Node]: """ Nodes that are placeholders and are not iter args are captured vars. """ captured_vars = [] for nested_node in graph.nodes: custom = get_custom(nested_node) if isinstance(custom, Placeholder) and (not isinstance(custom, IterArg)): captured_vars.append(nested_node) return captured_vars def get_outer_node(self, outer_node: fx.Node) -> fx.Node: while 'lifted' in outer_node.meta: outer_node = outer_node.meta['lifted'] return outer_node def get_captured_fx_node(self, graph: fx.Graph, outer_node: fx.Node) -> Optional[fx.Node]: outer_node = self.get_outer_node(outer_node) for var in self.captured_vars(graph): custom = get_custom(var) if custom.get_captured_fx_node() == outer_node: return var return None def get_root_graph(self): """ Return the "root"/outermost layer of our computation graph. This is done by iteratively accessing parent_graph of current graph. This is done until we find the "root" graph who will have "subgraph" attribute. """ cur_graph = self.graph while not hasattr(cur_graph, 'subgraphs'): if not hasattr(cur_graph, 'parent_op'): raise ValueError('All subgraphs should have parent_op') cur_graph = cur_graph.parent_op.graph return cur_graph def erase(self): subgraphs = self.get_root_graph().subgraphs subgraph = subgraphs[self.subgraph_name] for node in list(subgraph.nodes)[::-1]: get_custom(node).erase() del subgraphs[self.subgraph_name] super().erase()
class NestedRegionOp(CustomOp): def captured_vars(self, graph: fx.Graph) -> list[fx.Node]: ''' Nodes that are placeholders and are not iter args are captured vars. ''' pass def get_outer_node(self, outer_node: fx.Node) -> fx.Node: pass def get_captured_fx_node(self, graph: fx.Graph, outer_node: fx.Node) -> Optional[fx.Node]: pass def get_root_graph(self): ''' Return the "root"/outermost layer of our computation graph. This is done by iteratively accessing parent_graph of current graph. This is done until we find the "root" graph who will have "subgraph" attribute. ''' pass def erase(self): pass
6
2
9
0
7
2
3
0.26
1
5
2
2
5
0
5
64
50
6
35
17
27
9
33
15
27
3
5
2
13
327,603
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.NewRegister
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from .._support.dtype import DataType, i1 from dataclasses import dataclass, field, fields from ..lang.wave_types import IndexMapping, Memory, Register @define_op('register') @dataclass class NewRegister(CustomOp): shape: tuple[IndexExpr, ...] dtype: DataType value: float @property def indexing_dims(self) -> list[IndexSymbol]: return list(self.shape) def infer_type(self, *args): self.type = Register[*self.shape, self.dtype]
@define_op('register') @dataclass class NewRegister(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass
6
0
2
0
2
0
1
0
1
2
1
0
2
1
2
61
11
2
9
5
5
0
8
4
5
1
5
0
2
327,604
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.NewScalar
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from .._support.dtype import DataType, i1 from dataclasses import dataclass, field, fields @define_op('scalar') @dataclass class NewScalar(CustomOp): value: float | IndexExpr dtype: DataType @property def indexing_dims(self) -> list[IndexSymbol]: return list() def infer_type(self, *args): self.type = self.dtype
@define_op('scalar') @dataclass class NewScalar(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass
6
0
2
0
2
0
1
0
1
1
0
0
2
1
2
61
10
2
8
5
4
0
7
4
4
1
5
0
2
327,605
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Output
from .._support.regions import RegionGraph from dataclasses import dataclass, field, fields import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final @dataclass class Output(CustomOp): """ Represents an output node in the graph, representing the return value of a traced function. """ return_vals: Sequence[Any] tkw_op_name: str = field(default='output', init=False) @classmethod def from_fx_node(cls: Type[CustomOpT], node: fx.Node) -> CustomOpT: instance = cls(node.args) instance.fx_node = node instance.graph = node.graph return instance def add_to_graph(self, region_graph: RegionGraph) -> fx.Node: self.graph = region_graph self.fx_node = region_graph.create_node('output', target='output', args=tuple([self.return_vals]), kwargs={}) self.fx_node.tkw_op = self.__class__ self.fx_node.tkw_op_name = self.tkw_op_name return self.fx_node @property def has_side_effects(self) -> bool: return True
@dataclass class Output(CustomOp): ''' Represents an output node in the graph, representing the return value of a traced function. ''' @classmethod def from_fx_node(cls: Type[CustomOpT], node: fx.Node) -> CustomOpT: pass def add_to_graph(self, region_graph: RegionGraph) -> fx.Node: pass @property def has_side_effects(self) -> bool: pass
7
1
6
0
6
0
1
0.17
1
3
1
0
2
2
3
62
31
4
23
10
17
4
16
8
12
1
5
0
3
327,606
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Permute
from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from abc import ABC import torch.fx as fx @define_op('permute') @dataclass class Permute(CustomOp, ABC): """ Represents a permute operation that permutes arg into the target shape. """ arg: fx.Node target_shape: Sequence[IndexExpr] @property def indexing_dims(self) -> list[IndexExpr]: return self.target_shape def infer_type(self, *args): src_type = get_custom(self.arg).type assert set(src_type.symbolic_shape) == set(self.target_shape), f'Target shape {self.target_shape} must be a permutation of source shape {src_type.symbolic_shape}' self.type = Register[*self.target_shape, src_type.dtype] def transform_index(self, index: dict[IndexSymbol, IndexSequence]) -> dict[IndexSymbol, IndexSequence]: """ The permute operation swaps the strides of the permuted indices. So say we have a permute operation that swaps [B, M, N] to [M, N, B], then we swap the strides of the dimensions. """ assert self.vector_shapes is not None, '`vector_shapes` must be set before calling this function' custom_src = get_custom(self.arg) src_shape = custom_src.type.symbolic_shape src_to_target = {src: self.target_shape[src_shape.index(src)] for src in src_shape} non_unit_its = [k for k, v in self.vector_shapes.items() if v != 0] non_unit_src = [d for d in src_shape if d in non_unit_its] non_unit_tgt = [d for d in self.target_shape if d in non_unit_its] if non_unit_src == non_unit_tgt: return {k: index[k] for k in self.target_shape if k in index} permuted_index = {k: IndexSequence(v.start, v.size, index[src_to_target[k]].stride) for k, v in index.items() if k in src_shape} return permuted_index
@define_op('permute') @dataclass class Permute(CustomOp, ABC): ''' Represents a permute operation that permutes arg into the target shape. ''' @property def indexing_dims(self) -> list[IndexExpr]: pass def infer_type(self, *args): pass def transform_index(self, index: dict[IndexSymbol, IndexSequence]) -> dict[IndexSymbol, IndexSequence]: ''' The permute operation swaps the strides of the permuted indices. So say we have a permute operation that swaps [B, M, N] to [M, N, B], then we swap the strides of the dimensions. ''' pass
7
2
15
0
10
5
1
0.53
2
5
2
0
3
1
3
62
57
5
34
16
27
18
21
13
17
2
5
1
4
327,607
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Placeholder
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from .._support.regions import RegionGraph from .._support.dtype import DataType, i1 @dataclass class Placeholder(CustomOp): """ Represents a placeholder node in the graph, i.e. an input to a function. """ _name: str _type: Optional[Type[DataType] | Type[Memory]] = None tkw_op_name: str = field(default='placeholder', init=False) @classmethod def from_fx_node(cls: Type[PlaceholderT], node: fx.Node) -> PlaceholderT: instance = cls(node.name, node.type) instance.fx_node = node instance.graph = node.graph return instance def add_to_graph(self, region_graph: RegionGraph) -> fx.Node: self.graph = region_graph self.fx_node = region_graph.create_node('placeholder', target=self._name) self.fx_node.tkw_op = self.__class__ self.fx_node.tkw_op_name = self.tkw_op_name return self.fx_node def custom_string(self, value_map: dict[str, str]) -> str: vars_list = [f'{key}={value}' for key, value in vars(self).items()][:-2] vars_str = ', '.join(vars_list) return f'{self.tkw_op_name}({vars_str}) type({self.fx_node.type})' def erase(self): """Erase the current node from the graph where it exists.""" super().erase() if not hasattr(self.graph, 'parent_op'): return parent = self.graph.parent_op custom = get_custom(parent) if not isinstance(custom, NestedRegionOp): return subgraph = custom.get_root_graph().subgraphs[custom.subgraph_name] live_captures = [] for var in custom.implicit_captures: if custom.get_captured_fx_node(subgraph, var): live_captures.append(var) custom.update_arg('implicit_captures', live_captures) @property def indexing_dims(self) -> list[IndexSymbol]: if not hasattr(self._type, 'symbolic_shape'): return [] return list(self._type.symbolic_shape) if self._type else [] def get_captured_fx_node(self) -> Optional[fx.Node]: return self.fx_node.meta.get('lifted', None) def infer_type(self, *args): self.fx_node.type = self._type @property def index(self) -> list[dict[IndexSymbol, IndexSequence]]: var = self.get_captured_fx_node() if var is not None: return get_custom(var).index if hasattr(self.fx_node, 'index'): return self.fx_node.index return None @index.setter def index(self, value: Any): var = self.get_captured_fx_node() if var is None: CustomOp.index.fset(self, value) return get_custom(var).index = value
@dataclass class Placeholder(CustomOp): ''' Represents a placeholder node in the graph, i.e. an input to a function. ''' @classmethod def from_fx_node(cls: Type[PlaceholderT], node: fx.Node) -> PlaceholderT: pass def add_to_graph(self, region_graph: RegionGraph) -> fx.Node: pass def custom_string(self, value_map: dict[str, str]) -> str: pass def erase(self): '''Erase the current node from the graph where it exists.''' pass @property def indexing_dims(self) -> list[IndexSymbol]: pass def get_captured_fx_node(self) -> Optional[fx.Node]: pass def infer_type(self, *args): pass @property def indexing_dims(self) -> list[IndexSymbol]: pass @index.setter def indexing_dims(self) -> list[IndexSymbol]: pass
15
2
7
1
6
0
2
0.1
1
8
3
1
8
2
9
68
81
17
58
28
44
6
54
24
44
5
5
2
18
327,608
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Read
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from .._support.dtype import DataType, i1 @define_op('read') @dataclass class Read(CustomOp): memory: fx.Proxy elements_per_thread: Optional[Any] = None mapping: Optional[IndexMapping] = None mapping_dynamic_vals: tuple[fx.Node, ...] = () bounds: Optional[dict[IndexSymbol, IndexExpr]] = None source: Optional[tuple[IndexExpr]] = None target: Optional[tuple[IndexExpr]] = None _write_dependency: Optional[list[fx.Node]] = None @property def indexing_dims(self) -> list[IndexSymbol]: from ..wave.utils.general_utils import infer_dim if self.mapping is not None: return list(self.mapping.output_shape) shape = list(self.memory_type.symbolic_shape) dims = [infer_dim(expr) for expr in shape] return dims def infer_type(self, *args): from ..wave.utils.general_utils import infer_dim dtype = self.memory_type.dtype memory_shape = list(self.memory_type.symbolic_shape) dim_to_shape = {infer_dim(expr): expr for expr in memory_shape} shape = [dim_to_shape.get(dim, dim) for dim in self.indexing_dims] if self.target: shape = get_shape_from_bindings(args[0], self.target) self.type = Register[*shape, dtype] @property def memory_type(self) -> 'Memory': return get_custom(self.memory).type @property def dtype(self) -> DataType: return self.memory_type.dtype @property def write_dependency(self) -> fx.Node: return self._write_dependency @write_dependency.setter def write_dependency(self, value: fx.Node): self.update_arg(len(self.fx_node.args) - 1, value) def transform_index_backwards(self, index: dict[IndexSymbol, IndexSequence], arg: fx.Node) -> dict[IndexSymbol, IndexSequence]: """ Propagate index backwards. Dynamic values potentially can have non-identity mapping, so we need to update index when walking from the node to dyn val arguments. E.g. if `index` is $idx and dynamic_val_mappings={N: j // ELEMS_PER_THREAD} resulted arg index will be $idx // ELEMS_PER_THREAD. """ if arg in self.mapping_dynamic_vals: assert self.mapping.is_output_identity() i = self.mapping_dynamic_vals.index(arg) iters = self.mapping.iters mapping = self.mapping.dynamic_val_mappings[i] subs = {k: index[v] for k, v in zip(iters, self.mapping.output_mapping.keys())} return {k: IndexSequence.from_expr(mapping[k], subs) for k in get_custom(arg).type.symbolic_shape if k in mapping} return index def get_derived_indices(self) -> list[tuple[dict[IndexSymbol, IndexSequence], fx.Node]]: def transform_idx(arg): return {k: v for k, v in self.transform_index_backwards(self.index, arg).items() if v.start != 0} return [(arg, transform_idx(arg)) for arg in self.mapping_dynamic_vals] def has_identity_mapping(self) -> bool: """Check if mapping between input memory and output register is identity.""" mapping = self.mapping if mapping is None: return True mem_shape = get_custom(self.memory).type.symbolic_shape if mapping.is_identity() and mapping.input_shape == mem_shape: return True return False def is_contiguous_vec(self) -> bool: """Check if op can be lowered to contiguous vector ops If False we will have to lower it to gather""" if self.has_identity_mapping(): return True mapping = self.mapping mem_shape = get_custom(self.memory).type.symbolic_shape from ..wave.utils.mapping_utils import check_is_mapping_contiguous return check_is_mapping_contiguous(mapping=mapping, symbolic_shape=mem_shape, index=self.index, elements_per_thread=self.elements_per_thread, is_read=True)
@define_op('read') @dataclass class Read(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass @property def memory_type(self) -> 'Memory': pass @property def dtype(self) -> DataType: pass @property def write_dependency(self) -> fx.Node: pass @write_dependency.setter def write_dependency(self) -> fx.Node: pass def transform_index_backwards(self, index: dict[IndexSymbol, IndexSequence], arg: fx.Node) -> dict[IndexSymbol, IndexSequence]: ''' Propagate index backwards. Dynamic values potentially can have non-identity mapping, so we need to update index when walking from the node to dyn val arguments. E.g. if `index` is $idx and dynamic_val_mappings={N: j // ELEMS_PER_THREAD} resulted arg index will be $idx // ELEMS_PER_THREAD. ''' pass def get_derived_indices(self) -> list[tuple[dict[IndexSymbol, IndexSequence], fx.Node]]: pass def transform_idx(arg): pass def has_identity_mapping(self) -> bool: '''Check if mapping between input memory and output register is identity.''' pass def is_contiguous_vec(self) -> bool: '''Check if op can be lowered to contiguous vector ops If False we will have to lower it to gather''' pass
19
3
11
1
7
2
2
0.23
1
8
3
0
10
2
10
69
130
24
86
47
62
20
61
37
46
3
5
1
17
327,609
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.ReduceOp
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from abc import ABC @define_interface_op('max') @define_interface_op('min') @define_interface_op('sum') @dataclass class ReduceOp(CustomOp, ABC): """ Represents a Reduce computation. arg: Source tensor/value to reduce init: init/accumulator for reduce dim: which symbolic dim to reduce. block: When set to true, reduce across block, else reduce across warp. """ arg: fx.Node | list[fx.Node] init: fx.Node = None dim: Optional[Any] = None block: Optional[bool] = False @property def indexing_dims(self) -> list[IndexSymbol]: from ..wave.utils.general_utils import all_equal if isinstance(self.arg, Sequence): src_indexings = [get_custom(arg).indexing_dims for arg in self.arg] if not all_equal(src_indexings): raise NotImplementedError('NYI: Only support case where all inputs to ReduceOp to have same indexing dim.') src_indexing = src_indexings[0] else: src_indexing = get_custom(self.arg).indexing_dims dst_indexing = [dim for dim in src_indexing if dim != self.dim] return dst_indexing def infer_type(self, *args): if isinstance(self.arg, Sequence): src_types = [get_custom(arg).type for arg in self.arg] ref_shape = src_types[0].symbolic_shape ref_dtype = src_types[0].dtype if not all((src_type.symbolic_shape == ref_shape and src_type.dtype == ref_dtype for src_type in src_types)): raise NotImplementedError('NYI: Only support case where all inputs to ReduceOp to have same type.') src_type = src_types[0] else: src_type = get_custom(self.arg).type dtype = src_type.dtype reduced_dims = [dims for dims in src_type.symbolic_shape if dims != self.dim] self.type = Register[*reduced_dims, dtype] if reduced_dims else dtype if self.init is not None and get_custom(self.init).type.symbolic_shape != self.type.symbolic_shape: raise RuntimeError(f'Init type for {self.tkw_op_name} {get_custom(self.init).type.symbolic_shape} must match reduce type {self.type.symbolic_shape}\n{self}') @property def reduction_dim(self) -> IndexSymbol: return self.dim
@define_interface_op('max') @define_interface_op('min') @define_interface_op('sum') @dataclass class ReduceOp(CustomOp, ABC): ''' Represents a Reduce computation. arg: Source tensor/value to reduce init: init/accumulator for reduce dim: which symbolic dim to reduce. block: When set to true, reduce across block, else reduce across warp. ''' @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass @property def reduction_dim(self) -> IndexSymbol: pass
10
1
15
0
14
0
3
0.16
2
4
1
0
3
1
3
62
63
6
49
19
42
8
31
17
26
5
5
2
9
327,610
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Reshape
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from abc import ABC @define_op('reshape') @dataclass class Reshape(CustomOp, ABC): """ Represents a reshape operation that reshapes vectors along the same dimension. """ args: fx.Node | Sequence[fx.Node] target_vector_shape: dict[IndexSymbol, int] @property def indexing_dims(self) -> list[IndexExpr]: return get_custom(_to_sequence(self.args)[0]).indexing_dims def infer_type(self, *args): self.type = get_custom(_to_sequence(self.args)[0]).type
@define_op('reshape') @dataclass class Reshape(CustomOp, ABC): ''' Represents a reshape operation that reshapes vectors along the same dimension. ''' @property def indexing_dims(self) -> list[IndexExpr]: pass def infer_type(self, *args): pass
6
1
2
0
2
0
1
0.5
2
1
0
0
2
1
2
61
16
4
8
5
4
4
7
4
4
1
5
0
2
327,611
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.ScaledMMA
import torch.fx as fx from ..lang.wave_types import IndexMapping, Memory, Register from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields @define_op('scaled_mma') @dataclass class ScaledMMA(MMABase): lhs: fx.Node lhs_scale: fx.Node rhs: fx.Node rhs_scale: fx.Node acc: fx.Node mma_type: Optional['ScaledMMAType'] = None @property def indexing_dims(self) -> list[IndexSymbol]: combined_dims = get_custom(self.lhs).indexing_dims + get_custom(self.lhs_scale).indexing_dims + get_custom(self.rhs).indexing_dims + get_custom(self.rhs_scale).indexing_dims + get_custom(self.acc).indexing_dims unique_dims = list(dict.fromkeys(combined_dims)) return unique_dims @property def lhs_type(self) -> Memory: return get_custom(self.lhs).type @property def lhs_scale_type(self) -> Memory: return get_custom(self.lhs_scale).type @property def rhs_type(self) -> Memory: return get_custom(self.rhs).type @property def rhs_scale_type(self) -> Memory: return get_custom(self.rhs_scale).type @property def acc_type(self) -> Memory: return get_custom(self.acc).type def infer_type(self, *args): self.type = self.acc_type def operand_index(self, operand_map: dict[IndexSymbol, int], shape: list[IndexExpr]) -> dict[IndexSymbol, IndexSequence]: from ..wave.utils.general_utils import infer_dim indices: dict[IndexSymbol, IndexSequence] = {} for dim_expr in shape: dim = infer_dim(dim_expr) indices[dim] = self.index[dim].subs(operand_map) return indices @property def lhs_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 1, MMA_RHS: 0, MMA_ACC: 0, MMA_LHS_SCALE: 0, MMA_RHS_SCALE: 0} return self.operand_index(operand_map, self.lhs_type.symbolic_shape) @property def lhs_scale_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 0, MMA_ACC: 0, MMA_LHS_SCALE: 1, MMA_RHS_SCALE: 0} return self.operand_index(operand_map, self.lhs_scale_type.symbolic_shape) @property def rhs_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 1, MMA_ACC: 0, MMA_LHS_SCALE: 0, MMA_RHS_SCALE: 0} return self.operand_index(operand_map, self.rhs_type.symbolic_shape) @property def rhs_scale_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 0, MMA_ACC: 0, MMA_LHS_SCALE: 0, MMA_RHS_SCALE: 1} return self.operand_index(operand_map, self.rhs_scale_type.symbolic_shape) @property def acc_index(self) -> dict[IndexSymbol, IndexSequence]: operand_map = {MMA_LHS: 0, MMA_RHS: 0, MMA_ACC: 1, MMA_LHS_SCALE: 0, MMA_RHS_SCALE: 0} if self.acc_type is None: return None return self.operand_index(operand_map, self.acc_type.symbolic_shape) def custom_string(self, value_map: dict[str, str]) -> str: if self.index is None: return super().custom_string(value_map) custom_str = f'{self.tkw_op_name}(' custom_str += f'lhs={self.lhs} (index = {self.lhs_index}), ' custom_str += f'lhs_scale={self.lhs_scale} (index = {self.lhs_scale_index}), ' custom_str += f'rhs={self.rhs} (index = {self.rhs_index}), ' custom_str += f'rhs_scale={self.rhs_scale} (index = {self.rhs_scale_index}), ' custom_str += f'acc={self.acc} (index = {self.acc_index}))' custom_str += f' type({self.fx_node.type})' return custom_str def align_index(self, constraints: list['Constraint']) -> None: from ..wave.utils.general_utils import align_index_vars self.index = align_index_vars(self.index, constraints) @property def reduction_dim(self) -> IndexSymbol: if hasattr(self.fx_node, 'reduction_dim'): return self.fx_node.reduction_dim @reduction_dim.setter def reduction_dim(self, value: IndexSymbol): self.fx_node.reduction_dim = value
@define_op('scaled_mma') @dataclass class ScaledMMA(MMABase): @property def indexing_dims(self) -> list[IndexSymbol]: pass @property def lhs_type(self) -> Memory: pass @property def lhs_scale_type(self) -> Memory: pass @property def rhs_type(self) -> Memory: pass @property def rhs_scale_type(self) -> Memory: pass @property def acc_type(self) -> Memory: pass def infer_type(self, *args): pass def operand_index(self, operand_map: dict[IndexSymbol, int], shape: list[IndexExpr]) -> dict[IndexSymbol, IndexSequence]: pass @property def lhs_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def lhs_scale_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def rhs_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def rhs_scale_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def acc_index(self) -> dict[IndexSymbol, IndexSequence]: pass def custom_string(self, value_map: dict[str, str]) -> str: pass def align_index(self, constraints: list['Constraint']) -> None: pass @property def reduction_dim(self) -> IndexSymbol: pass @reduction_dim.setter def reduction_dim(self) -> IndexSymbol: pass
33
0
6
0
6
0
1
0.01
1
7
2
0
17
2
17
76
137
19
117
49
82
1
66
34
46
2
6
1
21
327,612
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.ScanOp
import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from abc import ABC @define_interface_op('cumsum') @dataclass class ScanOp(CustomOp, ABC): """ Base class for all scan-style operations (e.g., cumsum). arg: Source tensor/value to scan. init: Optional initial value. dim: Symbolic dimension along which to scan. """ arg: fx.Node | list[fx.Node] init: Optional[fx.Node] = None dim: Optional[IndexSymbol] = None @property def indexing_dims(self) -> list[IndexSymbol]: from ..wave.utils.general_utils import all_equal if isinstance(self.arg, Sequence): src_indexings = [get_custom(arg).indexing_dims for arg in self.arg] if not all_equal(src_indexings): raise NotImplementedError('All inputs to ScanOp must have same indexing dims.') indexing = src_indexings[0] else: indexing = get_custom(self.arg).indexing_dims return [dim for dim in indexing if dim != self.dim] def infer_type(self, *args): if isinstance(self.arg, Sequence): src_types = [get_custom(arg).type for arg in self.arg] ref_shape = src_types[0].symbolic_shape ref_dtype = src_types[0].dtype for src_type in src_types: if src_type.symbolic_shape != ref_shape or src_type.dtype != ref_dtype: raise NotImplementedError('ScanOp requires all args to have same shape and dtype.') src_type = src_types[0] else: src_type = get_custom(self.arg).type if self.dim == -1 or self.dim is None: self.dim = src_type.symbolic_shape[-1] self.type = Register[*src_type.symbolic_shape, src_type.dtype] if self.init is not None: init_shape = get_custom(self.init).type.symbolic_shape if init_shape != self.type.symbolic_shape: raise RuntimeError(f'Init shape {init_shape} must match result shape {self.type.symbolic_shape}') @property def scan_dim(self) -> IndexSymbol: return self.dim
@define_interface_op('cumsum') @dataclass class ScanOp(CustomOp, ABC): ''' Base class for all scan-style operations (e.g., cumsum). arg: Source tensor/value to scan. init: Optional initial value. dim: Symbolic dimension along which to scan. ''' @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass @property def scan_dim(self) -> IndexSymbol: pass
8
1
14
2
12
0
4
0.17
2
4
1
0
3
1
3
62
59
10
42
17
35
7
32
15
27
7
5
3
11
327,613
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.ScatterAdd
import torch.fx as fx from ..lang.wave_types import IndexMapping, Memory, Register from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields @define_op('scatter_add') @dataclass class ScatterAdd(CustomOp): """ ScatterAdd performs element-wise accumulation from a source register into shared memory (LDS), at locations determined by the index register along a specified dimension. Limitations: - Only intra-workgroup scattering is supported (i.e., within shared memory / LDS), assuming a single wave. - Multi-wave execution is not guaranteed to be safe: synchronization issues may occur when threads write to the same index. Further investigation is needed. - The operation supports multiple elements per thread, assuming the non-scatter dimension is large enough (i.e., > elements_per_thread). """ register_src: fx.Node register_idx: fx.Node dim: IndexExpr memory: fx.Node mapping: IndexMapping elements_per_thread: Optional[int] = 1 bounds: Optional[dict[IndexSymbol, IndexExpr]] = None @property def indexing_dims(self) -> list[IndexSymbol]: if self.mapping is not None: return list(self.mapping.input_shape) return list(self.memory_type.symbolic_shape) def infer_type(self, *args): address_space = self.memory_type.address_space dtype = self.memory_type.dtype self.type = Memory[*self.indexing_dims, address_space, dtype] @property def memory_type(self) -> 'Memory': return get_custom(self.memory).type @property def register_type(self) -> 'Register': return get_custom(self.register_src).type @property def register_index(self) -> dict[IndexSymbol, IndexSequence]: custom = get_custom(self.register_src) return custom.index @property def has_side_effects(self) -> bool: return True
@define_op('scatter_add') @dataclass class ScatterAdd(CustomOp): ''' ScatterAdd performs element-wise accumulation from a source register into shared memory (LDS), at locations determined by the index register along a specified dimension. Limitations: - Only intra-workgroup scattering is supported (i.e., within shared memory / LDS), assuming a single wave. - Multi-wave execution is not guaranteed to be safe: synchronization issues may occur when threads write to the same index. Further investigation is needed. - The operation supports multiple elements per thread, assuming the non-scatter dimension is large enough (i.e., > elements_per_thread). ''' @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass @property def memory_type(self) -> 'Memory': pass @property def register_type(self) -> 'Register': pass @property def register_index(self) -> dict[IndexSymbol, IndexSequence]: pass @property def has_side_effects(self) -> bool: pass
14
1
3
0
3
0
1
0.27
1
5
2
0
6
1
6
65
46
8
30
18
18
8
25
13
18
2
5
1
7
327,614
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SchedulingBarrier
from dataclasses import dataclass, field, fields @define_op('scheduling_barrier') @dataclass class SchedulingBarrier(CustomOp): """ Represents a scheduling barrier in the graph. Takes in a list of operations that are allowed to cross the barrier. """ operations: list[Operation]
@define_op('scheduling_barrier') @dataclass class SchedulingBarrier(CustomOp): ''' Represents a scheduling barrier in the graph. Takes in a list of operations that are allowed to cross the barrier. ''' pass
3
1
0
0
0
0
0
2.5
1
0
0
0
0
0
0
59
8
1
2
1
1
5
2
1
1
0
5
0
0
327,615
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SchedulingGroupBarrier
from dataclasses import dataclass, field, fields @define_op('scheduling_group_barrier') @dataclass class SchedulingGroupBarrier(CustomOp): """ Represents a scheduling group barrier in the graph. The scheduling group barrier defines scheduling groups. Each scheduling group contains different instructions in a specific order. The sync_id identifies scheduling groups that need to be aware of each other. """ instructions: dict[Operation, int] sync_id: int
@define_op('scheduling_group_barrier') @dataclass class SchedulingGroupBarrier(CustomOp): ''' Represents a scheduling group barrier in the graph. The scheduling group barrier defines scheduling groups. Each scheduling group contains different instructions in a specific order. The sync_id identifies scheduling groups that need to be aware of each other. ''' pass
3
1
0
0
0
0
0
2
1
0
0
0
0
0
0
59
10
1
3
1
2
6
3
1
2
0
5
0
0
327,616
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SelectOp
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from .._support.dtype import DataType, i1 from dataclasses import dataclass, field, fields import torch.fx as fx @define_op('select') @dataclass class SelectOp(CustomOp): cond: fx.Node if_true: fx.Node if_false: fx.Node @property def indexing_dims(self) -> list[IndexSymbol]: combined_dims = [] combined_dims += get_custom(self.cond).indexing_dims combined_dims += get_custom(self.if_true).indexing_dims combined_dims += get_custom(self.if_false).indexing_dims return list(dict.fromkeys(combined_dims)) def infer_type(self, *args): cond_type = get_custom(self.cond).type if_true_type = get_custom(self.if_true).type if_false_type = get_custom(self.if_false).type if cond_type.dtype != i1: raise ValueError('SelectOp expects condition type to be i1.') if if_true_type.dtype != if_false_type.dtype: raise ValueError('SelectOp expects lhs and rhs dtype to match.') if cond_type.symbolic_shape != if_true_type.symbolic_shape or cond_type.symbolic_shape != if_false_type.symbolic_shape: raise ValueError("SelectOp doesn't support broadcasting. (yet?)") self.type = if_true_type
@define_op('select') @dataclass class SelectOp(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass
6
0
13
2
10
1
3
0.04
1
3
0
0
2
1
2
61
32
6
25
9
21
1
21
8
18
4
5
1
5
327,617
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SelfIndex
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from .._support.dtype import DataType, i1 from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register @define_op('self_index') @dataclass class SelfIndex(CustomOp): dim: IndexExpr dtype: DataType elements_per_thread: Optional[IndexExpr | int] = None @property def indexing_dims(self) -> list[IndexSymbol]: return [self.dim] @property def type(self) -> 'Register': return Register[self.dim, self.dtype]
@define_op('self_index') @dataclass class SelfIndex(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass @property def type(self) -> 'Register': pass
7
0
2
0
2
0
1
0
1
2
1
0
2
0
2
61
12
2
10
6
5
0
8
4
5
1
5
0
2
327,618
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SetSymbol
from dataclasses import dataclass, field, fields import torch.fx as fx from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @define_op('set_symbol') @dataclass class SetSymbol(CustomOp): symbol: IndexExpr register_: fx.Proxy @property def type(self) -> 'Register': return get_custom(self.register_).type @property def indexing_dims(self) -> list[IndexSymbol]: return get_custom(self.register_).indexing_dims @property def has_side_effects(self) -> bool: return True
@define_op('set_symbol') @dataclass class SetSymbol(CustomOp): @property def type(self) -> 'Register': pass @property def indexing_dims(self) -> list[IndexSymbol]: pass @property def has_side_effects(self) -> bool: pass
9
0
2
0
2
0
1
0
1
2
0
0
3
0
3
62
15
3
12
7
5
0
9
4
5
1
5
0
3
327,619
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SetWavePrio
from dataclasses import dataclass, field, fields @define_op('set_wave_prio') @dataclass class SetWavePrio(CustomOp): """ An op that sets/tells hardware what level of priority certain instructions/region is. This is useful for ping-pong or general case where two Waves share the same SIMD, but we want to tell the SIMD to prioritize on wave or the other. """ priority: int @property def has_side_effects(self) -> bool: return True
@define_op('set_wave_prio') @dataclass class SetWavePrio(CustomOp): ''' An op that sets/tells hardware what level of priority certain instructions/region is. This is useful for ping-pong or general case where two Waves share the same SIMD, but we want to tell the SIMD to prioritize on wave or the other. ''' @property def has_side_effects(self) -> bool: pass
5
1
2
0
2
0
1
1
1
1
0
0
1
0
1
60
12
2
5
3
2
5
4
2
2
1
5
0
1
327,620
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SharedMemoryBarrier
from dataclasses import dataclass, field, fields @define_op('shared_memory_barrier') @dataclass class SharedMemoryBarrier(CustomOp): """ Represents a shared memory barrier in the graph. """ wait_async_ops: bool = False @property def has_side_effects(self) -> bool: return True
@define_op('shared_memory_barrier') @dataclass class SharedMemoryBarrier(CustomOp): ''' Represents a shared memory barrier in the graph. ''' @property def has_side_effects(self) -> bool: pass
5
1
2
0
2
0
1
0.6
1
1
0
0
1
0
1
60
10
2
5
4
2
3
4
3
2
1
5
0
1
327,621
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.ShuffleOp
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields import torch.fx as fx @define_op('shuffle') @dataclass class ShuffleOp(CustomOp): """ Represents a shuffle.xor op. arg: value/vector to shuffle. offset: xor offset. width: xor width. """ arg: fx.Node offset: int width: int mode: 'ShuffleMode' @property def indexing_dims(self) -> list[IndexSymbol]: return get_custom(self.arg).indexing_dims def infer_type(self, *args): self.type = get_custom(self.arg).type
@define_op('shuffle') @dataclass class ShuffleOp(CustomOp): ''' Represents a shuffle.xor op. arg: value/vector to shuffle. offset: xor offset. width: xor width. ''' @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass
6
1
2
0
2
0
1
0.6
1
1
0
0
2
1
2
61
20
4
10
5
6
6
9
4
6
1
5
0
2
327,622
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.SoftsignOp
from abc import ABC from dataclasses import dataclass, field, fields import torch.fx as fx from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @define_interface_op('softsign') @dataclass class SoftsignOp(CustomOp, ABC): arg: fx.Node logit_cap: float = 30.0 apply_scaling: bool = False head_dim: int = None @property def indexing_dims(self) -> list[IndexSymbol]: return get_custom(self.arg).indexing_dims def infer_type(self, *args): src_type = get_custom(self.arg).type self.type = src_type
@define_interface_op('softsign') @dataclass class SoftsignOp(CustomOp, ABC): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass
6
0
3
0
3
0
1
0
2
1
0
0
2
1
2
61
13
2
11
9
7
0
10
8
7
1
5
0
2
327,623
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.UnaryPyOp
import operator import torch.fx as fx from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields from abc import ABC @define_interface_op('abs') @define_interface_op('exp') @define_interface_op('exp2') @define_interface_op('sqrt') @define_interface_op('rsqrt') @define_interface_op('log2') @define_interface_op('log10') @define_interface_op('reciprocal') @define_interface_op('roundeven') @define_interface_op('sin') @define_interface_op('sinh') @define_interface_op('tanh') @define_interface_op('tanh_approx') @define_interface_op('cos') @define_interface_op('cbrt') @define_py_op(operator.neg) @define_py_op(operator.invert) @dataclass class UnaryPyOp(CustomOp, ABC): """ Represents a unary python operator. """ arg: fx.Node @property def indexing_dims(self) -> list[IndexSymbol]: return get_custom(self.arg).indexing_dims @property def py_operator(self) -> str: return self.tkw_op_name def infer_type(self, *args): src_type = get_custom(self.arg).type self.type = src_type
@define_interface_op('abs') @define_interface_op('exp') @define_interface_op('exp2') @define_interface_op('sqrt') @define_interface_op('rsqrt') @define_interface_op('log2') @define_interface_op('log10') @define_interface_op('reciprocal') @define_interface_op('roundeven') @define_interface_op('sin') @define_interface_op('sinh') @define_interface_op('tanh') @define_interface_op('tanh_approx') @define_interface_op('cos') @define_interface_op('cbrt') @define_py_op(operator.neg) @define_py_op(operator.invert) @dataclass class UnaryPyOp(CustomOp, ABC): ''' Represents a unary python operator. ''' @property def indexing_dims(self) -> list[IndexSymbol]: pass @property def py_operator(self) -> str: pass def infer_type(self, *args): pass
24
1
2
0
2
0
1
0.27
2
2
0
0
3
1
3
62
18
4
11
8
5
3
9
6
5
1
5
0
3
327,624
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Unknown
from dataclasses import dataclass, field, fields import torch.fx as fx from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final @final @dataclass class Unknown(CustomOp): """ Represents an fx.Node that has no corresponding CustomNode class. """ args: Sequence[Any] kwargs: dict[Any, Any] @classmethod def from_fx_node(cls, node: fx.Node) -> 'Unknown': instance = cls(node.args, node.kwargs) instance.fx_node = node instance.graph = node.graph return instance def custom_string(self, value_map: dict[str, str]) -> str: vars_list = [f'{key}={value}' for key, value in vars(self).items()][:-2] vars_str = ', '.join(vars_list) return f'unknown: {self.fx_node.name}({vars_str}) type({self.fx_node.type})'
@final @dataclass class Unknown(CustomOp): ''' Represents an fx.Node that has no corresponding CustomNode class. ''' @classmethod def from_fx_node(cls, node: fx.Node) -> 'Unknown': pass def custom_string(self, value_map: dict[str, str]) -> str: pass
6
1
5
0
5
1
1
0.31
1
2
0
0
1
0
2
61
20
3
13
7
9
4
12
6
9
1
5
0
2
327,625
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.WorkgroupBarrier
from dataclasses import dataclass, field, fields @define_op('workgroup_barrier') @dataclass class WorkgroupBarrier(CustomOp): """ Represents a synchronization of all threads in a workgroup. Threads will wait on a WorkgroupBarrier until all the threads in the workgroup has called a WorkgroupBarrier(does not have to be in the same location). """ @property def has_side_effects(self) -> bool: return True
@define_op('workgroup_barrier') @dataclass class WorkgroupBarrier(CustomOp): ''' Represents a synchronization of all threads in a workgroup. Threads will wait on a WorkgroupBarrier until all the threads in the workgroup has called a WorkgroupBarrier(does not have to be in the same location). ''' @property def has_side_effects(self) -> bool: pass
5
1
2
0
2
0
1
1.5
1
1
0
0
1
0
1
60
11
1
4
3
1
6
3
2
1
1
5
0
1
327,626
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/ops/wave_ops.py
wave_lang.kernel.ops.wave_ops.Write
from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, TypeVar, final from ..lang.wave_types import IndexMapping, Memory, Register from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass, field, fields import torch.fx as fx @define_op('write') @dataclass class Write(CustomOp): register_: fx.Proxy memory: fx.Proxy elements_per_thread: Optional[Any] = None mapping: Optional[IndexMapping] = None mapping_dynamic_vals: tuple[fx.Node, ...] = () bounds: Optional[dict[IndexSymbol, IndexExpr]] = None source: Optional[tuple[IndexExpr]] = None target: Optional[tuple[IndexExpr]] = None @property def indexing_dims(self) -> list[IndexSymbol]: from ..wave.utils.general_utils import infer_dim if self.mapping is not None: return list(self.mapping.input_shape) shape = list(self.memory_type.symbolic_shape) dims = [infer_dim(expr) for expr in shape] return dims def infer_type(self, *args): from ..wave.utils.general_utils import infer_dim address_space = self.memory_type.address_space dtype = self.memory_type.dtype memory_shape = list(self.memory_type.symbolic_shape) dim_to_shape = {infer_dim(expr): expr for expr in memory_shape} shape = [dim_to_shape.get(dim, dim) for dim in self.indexing_dims] if self.source: shape = get_shape_from_bindings(args[0], self.source) self.type = Memory[*shape, address_space, dtype] @property def memory_type(self) -> 'Memory': return get_custom(self.memory).type @property def register_type(self) -> 'Register': return get_custom(self.register_).type @property def register_index(self) -> dict[IndexSymbol, IndexSequence]: custom = get_custom(self.register_) return custom.index def transform_index_backwards(self, index: dict[IndexSymbol, IndexSequence], arg: fx.Node) -> dict[IndexSymbol, IndexSequence]: """ Propagate index backwards. Dynamic values potentially can have non-identity mapping, so we need to update index when walking from the node to dyn val arguments. E.g. if `index` is $idx and dynamic_val_mappings={N: j // ELEMS_PER_THREAD} resulted arg index will be $idx // ELEMS_PER_THREAD. """ if arg in self.mapping_dynamic_vals: assert self.mapping.is_input_identity() i = self.mapping_dynamic_vals.index(arg) iters = self.mapping.iters mapping = self.mapping.dynamic_val_mappings[i] subs = {k: index[v] for k, v in zip(iters, self.mapping.input_mapping.keys())} return {k: IndexSequence.from_expr(mapping[k], subs) for k in arg.type.symbolic_shape if k in mapping} return index def get_derived_indices(self) -> list[tuple[dict[IndexSymbol, IndexSequence], fx.Node]]: def transform_idx(arg): return {k: v for k, v in self.transform_index_backwards(self.index, arg).items() if v.start != 0} return [(arg, transform_idx(arg)) for arg in self.mapping_dynamic_vals] def has_identity_mapping(self) -> bool: """Check if mapping between input register and output memory is identity.""" mapping = self.mapping if mapping is None: return True mem_shape = get_custom(self.memory).type.symbolic_shape if mapping.is_identity() and mapping.output_shape == mem_shape: return True return False def is_contiguous_vec(self) -> bool: """Check if op can be lowered to contiguous vector ops If False we will have to lower it to gather""" if self.has_identity_mapping(): return True mapping = self.mapping mem_shape = get_custom(self.memory).type.symbolic_shape from ..wave.utils.mapping_utils import check_is_mapping_contiguous return check_is_mapping_contiguous(mapping=mapping, symbolic_shape=mem_shape, index=self.index, elements_per_thread=self.elements_per_thread, is_read=False)
@define_op('write') @dataclass class Write(CustomOp): @property def indexing_dims(self) -> list[IndexSymbol]: pass def infer_type(self, *args): pass @property def memory_type(self) -> 'Memory': pass @property def register_type(self) -> 'Register': pass @property def register_index(self) -> dict[IndexSymbol, IndexSequence]: pass def transform_index_backwards(self, index: dict[IndexSymbol, IndexSequence], arg: fx.Node) -> dict[IndexSymbol, IndexSequence]: ''' Propagate index backwards. Dynamic values potentially can have non-identity mapping, so we need to update index when walking from the node to dyn val arguments. E.g. if `index` is $idx and dynamic_val_mappings={N: j // ELEMS_PER_THREAD} resulted arg index will be $idx // ELEMS_PER_THREAD. ''' pass def get_derived_indices(self) -> list[tuple[dict[IndexSymbol, IndexSequence], fx.Node]]: pass def transform_idx(arg): pass def has_identity_mapping(self) -> bool: '''Check if mapping between input register and output memory is identity.''' pass def is_contiguous_vec(self) -> bool: '''Check if op can be lowered to contiguous vector ops If False we will have to lower it to gather''' pass
17
3
11
1
8
2
2
0.18
1
7
2
0
9
2
9
68
122
22
85
46
63
15
61
37
47
3
5
1
16
327,627
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/assumptions.py
wave_lang.kernel.wave.assumptions.Assumption
from dataclasses import dataclass from .._support.indexing import IndexExpr @dataclass class Assumption: """ Assumptions are sympy assumptions that can be used to make decisions during code generation. These can be statements such as bounds on sympy variables. For example, we can state that Assumption(M < 64) and then later make queries based on this assumption, such as evaluate(M > 70) -> False evaluate(M < 32) -> None (because we cannot say one way or the other) evaluate(M < 70) -> True """ expr: IndexExpr
@dataclass class Assumption: ''' Assumptions are sympy assumptions that can be used to make decisions during code generation. These can be statements such as bounds on sympy variables. For example, we can state that Assumption(M < 64) and then later make queries based on this assumption, such as evaluate(M > 70) -> False evaluate(M < 32) -> None (because we cannot say one way or the other) evaluate(M < 70) -> True ''' pass
2
1
0
0
0
0
0
5.5
0
0
0
0
0
0
0
0
18
5
2
1
1
11
2
1
1
0
0
0
0
327,628
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/barriers.py
wave_lang.kernel.wave.barriers.MemoryAccessType
from enum import Enum, auto class MemoryAccessType(Enum): """Enum to classify memory access operations.""" NONE = auto() READ = auto() WRITE = auto() READ_WRITE = auto()
class MemoryAccessType(Enum): '''Enum to classify memory access operations.''' pass
1
1
0
0
0
0
0
0.2
1
0
0
0
0
0
0
49
7
1
5
5
4
1
5
5
4
0
4
0
0
327,629
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/barriers.py
wave_lang.kernel.wave.barriers.SharedMemoryBarrierInfo
import torch.fx as fx from typing import Optional from dataclasses import dataclass @dataclass class SharedMemoryBarrierInfo: is_async: bool = False last_node: Optional[fx.Node] = None
@dataclass class SharedMemoryBarrierInfo: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
3
0
3
3
2
0
3
3
2
0
0
0
0
327,630
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/cache.py
wave_lang.kernel.wave.cache.WaveCache
from ..compiler.kernel_codegen import KernelBufferUsage from .utils.classes import KernelLaunchInfo from typing import Callable, Optional from dataclasses import asdict, dataclass @dataclass class WaveCache: """ Dataclass/Struct that stores necessary information S.T we can reconstruct and call the "cached" kernel. """ kernel_sig: tuple[KernelBufferUsage] vmfb: bytes asm: str kernel_launch_info: Optional[KernelLaunchInfo] = None
@dataclass class WaveCache: ''' Dataclass/Struct that stores necessary information S.T we can reconstruct and call the "cached" kernel. ''' pass
2
1
0
0
0
0
0
0.8
0
0
0
0
0
0
0
0
10
1
5
2
4
4
5
2
4
0
0
0
0
327,631
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/cache.py
wave_lang.kernel.wave.cache.WaveCacheManager
import os from pathlib import Path from .constraints import Constraint, TilingConstraint, WaveConstraint from collections import OrderedDict, deque import json import hashlib from typing import Callable, Optional from .compile_options import WaveCompileOptions import glob from ..compiler.kernel_codegen import KernelBufferUsage import inspect import functools from .utils.classes import KernelLaunchInfo from dataclasses import asdict, dataclass import threading import shutil class WaveCacheManager(object): """ Wave cache manager has two main components/cache: 1. Session/Online cache - This is the main cache that our compiler and runtime will load from and store to. It is essentially a dict that uses the kernel hash as keys and the WaveCache as values. We added LRU functionality with limits for number of kernel cached here, because this lives on RAM, and we wouldn't want to run OOM. 2. File/Offline cache - This cache is essential for loading saved/compiled cache between sessions/runs. This is done by storing vital kernel information(vmfb, kernel_sig, and mlir) to base_dir/kernel_hash directory. If said kernel is queried during a new run and does not exist on session/online cache yet, we'd load files from the kernel_hash directory and reconstruct the WaveCache from it. """ def __init__(self, base_dir): self.base_dir = Path(base_dir) self.file_cache: set[str] = set() self.session_cache: OrderedDict[str, WaveCache] = OrderedDict() self.lock = threading.Lock() self.update_file_cache() self.cache_hits = 0 self.cache_misses = 0 def get_hash(self, constraints: list[Constraint], kernel_fn: Callable, options: WaveCompileOptions) -> str: """ Get a unique identifier for a given kernel. """ fns = get_nested_functions(kernel_fn) arg_dtypes = extract_arg_types(kernel_fn) processed_constraints = anonymize_constraints(constraints) key = [arg_dtypes, processed_constraints, options.postprocess, options.canonicalize, options.func_name, options.subs, options.dynamic_symbols, options.schedule, options.use_scheduling_barriers, options.multi_buffer_count, options.backend, options.target, options.iree_preprocessing_pass_pipeline, options.override_mlir, options.optimization_level, options.denorm_fp_math_f32, options.waves_per_eu, options.iree_launch_async, options.use_buffer_ops, options.use_fast_math, options.use_global_to_shared, options.minimize_shared_allocs, options.reorder_allocs, options.override_schedule] for fn in fns: try: kernel_src = inspect.getsource(fn) free_vars = extract_free_vars(fn) index_mappings = extract_mappings(fn) except: return None key += [kernel_src, index_mappings, free_vars] if options.run_bench: key += [options.benchmark_batch_size] return hashlib.sha256(str(key).encode('utf-8')).hexdigest() def update_file_cache(self): """ Search for saved/cached kernels in cache_base_directory and inform the cache manager for what are available. """ if not self.base_dir.exists(): return for entry in self.base_dir.iterdir(): if entry.name not in self.file_cache: self.file_cache.add(entry.name) def store_kernel_to_file(self, kernel_hash: str, vmfb: bytes, kernel_sig: tuple[KernelBufferUsage], module_str: str, kernel_launch_info: KernelLaunchInfo): """ Stores/save compiled kernels into self.base_dir/kernel_hash including it's MLIR, VMFB, and kernel signature. If wave runtime is enabled, also copies the hsaco binary and stores the kernel launch information. """ cur_cache_dir = self.base_dir / kernel_hash os.makedirs(cur_cache_dir, exist_ok=True) cur_cache_basefile = cur_cache_dir / kernel_hash cur_vmfb_path = cur_cache_basefile.with_suffix('.vmfb') cur_module_path = cur_cache_basefile.with_suffix('.mlir') cur_kernelsig_path = cur_cache_basefile.with_suffix('.json') cur_vmfb_path.write_bytes(vmfb) cur_module_path.write_text(module_str) kernel_sig_str = json.dumps([usage.name for usage in kernel_sig]) cur_kernelsig_path.write_text(kernel_sig_str) cur_hsaco_path = glob.glob(str(get_temp_binary_dir() / '*.hsaco')) if cur_hsaco_path: cur_hsaco_path = cur_hsaco_path[0] shutil.copy(cur_hsaco_path, cur_cache_basefile.with_suffix('.hsaco')) cur_kernel_info_path = cur_cache_basefile.with_suffix('.kernel_info.json') kernel_launch_info_dict = asdict(kernel_launch_info) del kernel_launch_info_dict['grid'] kernel_info_str = json.dumps(kernel_launch_info_dict) cur_kernel_info_path.write_text(kernel_info_str) @staticmethod @functools.lru_cache def load_kernel_from_file(base_dir, kernel_hash): """ Loads the queried kernel(including VMFB, and kernel signature) from local cache file/directory. """ cur_cache_dir = base_dir / kernel_hash vmfb = None kernel_sig_str = None if not os.path.exists(cur_cache_dir): raise ValueError('Failed to find queried cached kernel.') cur_cache_basefile = cur_cache_dir / kernel_hash cur_vmfb_path = cur_cache_basefile.with_suffix('.vmfb') cur_kernelsig_path = cur_cache_basefile.with_suffix('.json') cur_asm_path = cur_cache_basefile.with_suffix('.mlir') vmfb = cur_vmfb_path.read_bytes() kernel_sig_str = json.loads(cur_kernelsig_path.read_text()) kernel_sig = [KernelBufferUsage[usage] for usage in kernel_sig_str] asm = cur_asm_path.read_text() cur_kernel_info_path = cur_cache_basefile.with_suffix('.kernel_info.json') kernel_info_str = json.loads(cur_kernel_info_path.read_text()) kernel_info_str['grid'] = eval(kernel_info_str['grid_str']) kernel_launch_info = KernelLaunchInfo(**kernel_info_str) return WaveCache(kernel_sig, vmfb, asm, kernel_launch_info) def store_kernel_to_session(self, kernel_hash: str, cached_kernel: WaveCache): """ LRU style storing of kernel into session cache. Set most recently generated kernel to top of session cache, and if len of cache exceed limit, we'd pop least recently used """ self.session_cache[kernel_hash] = cached_kernel self.session_cache.move_to_end(kernel_hash) if len(self.session_cache) > WAVE_CACHE_LIMIT: self.session_cache.popitem(last=False) def store_kernel(self, vmfb: bytes, module_str: str, options: WaveCompileOptions): """ Save given kernel(vmfb, kernel_sig, and MLIR) into session_cache and file/offline cache. """ if not WAVE_CACHE_ON or not options.kernel_hash: return with self.lock: self.store_kernel_to_file(options.kernel_hash, vmfb, options.kernel_usages, module_str, options.kernel_launch_info) if not WAVE_ALWAYS_COMPILE: self.store_kernel_to_session(options.kernel_hash, WaveCache(options.kernel_usages, vmfb, module_str, options.kernel_launch_info)) def load_kernel(self, kernel_hash: str): """ LRU style loading of kernel from session cache and move queried kernel to top of LRU if it exist. If it only exist in file/offline cache, we'll load from local files, reconstruct WaveCache and then store into session_cache.If it does not exist in session cache nor offline/file cache, then we return "None" and ask compiler to compile from scratch. """ if WAVE_ALWAYS_COMPILE or not kernel_hash or (not WAVE_CACHE_ON): return None with self.lock: if kernel_hash in self.session_cache: self.session_cache.move_to_end(kernel_hash) self.cache_hits += 1 elif kernel_hash in self.file_cache: cached_kernel = self.load_kernel_from_file(self.base_dir, kernel_hash) self.store_kernel_to_session(kernel_hash, cached_kernel) self.cache_hits += 1 else: self.cache_misses += 1 return self.session_cache.get(kernel_hash, None)
class WaveCacheManager(object): ''' Wave cache manager has two main components/cache: 1. Session/Online cache - This is the main cache that our compiler and runtime will load from and store to. It is essentially a dict that uses the kernel hash as keys and the WaveCache as values. We added LRU functionality with limits for number of kernel cached here, because this lives on RAM, and we wouldn't want to run OOM. 2. File/Offline cache - This cache is essential for loading saved/compiled cache between sessions/runs. This is done by storing vital kernel information(vmfb, kernel_sig, and mlir) to base_dir/kernel_hash directory. If said kernel is queried during a new run and does not exist on session/online cache yet, we'd load files from the kernel_hash directory and reconstruct the WaveCache from it. ''' def __init__(self, base_dir): pass def get_hash(self, constraints: list[Constraint], kernel_fn: Callable, options: WaveCompileOptions) -> str: ''' Get a unique identifier for a given kernel. ''' pass def update_file_cache(self): ''' Search for saved/cached kernels in cache_base_directory and inform the cache manager for what are available. ''' pass def store_kernel_to_file(self, kernel_hash: str, vmfb: bytes, kernel_sig: tuple[KernelBufferUsage], module_str: str, kernel_launch_info: KernelLaunchInfo): ''' Stores/save compiled kernels into self.base_dir/kernel_hash including it's MLIR, VMFB, and kernel signature. If wave runtime is enabled, also copies the hsaco binary and stores the kernel launch information. ''' pass @staticmethod @functools.lru_cache def load_kernel_from_file(base_dir, kernel_hash): ''' Loads the queried kernel(including VMFB, and kernel signature) from local cache file/directory. ''' pass def store_kernel_to_session(self, kernel_hash: str, cached_kernel: WaveCache): ''' LRU style storing of kernel into session cache. Set most recently generated kernel to top of session cache, and if len of cache exceed limit, we'd pop least recently used ''' pass def store_kernel_to_file(self, kernel_hash: str, vmfb: bytes, kernel_sig: tuple[KernelBufferUsage], module_str: str, kernel_launch_info: KernelLaunchInfo): ''' Save given kernel(vmfb, kernel_sig, and MLIR) into session_cache and file/offline cache. ''' pass def load_kernel_from_file(base_dir, kernel_hash): ''' LRU style loading of kernel from session cache and move queried kernel to top of LRU if it exist. If it only exist in file/offline cache, we'll load from local files, reconstruct WaveCache and then store into session_cache.If it does not exist in session cache nor offline/file cache, then we return "None" and ask compiler to compile from scratch. ''' pass
11
8
26
0
19
6
3
0.42
1
13
5
0
7
6
8
8
238
14
158
65
130
66
94
47
85
4
1
2
22
327,632
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/codegen/emitter.py
wave_lang.kernel.wave.codegen.emitter.WaveEmitter
from ..compile_options import WaveCompileOptions from ...compiler.base import NDEBUG, CodegenError from dataclasses import dataclass from ..._support.tracing import CapturedTrace from ...lang.wave_types import IndexSymbol from ..utils.general_utils import get_hardware_constraint from ...compiler.kernel_codegen import BindingType, BoundKernelSignature import sys from ..._support.indexing import IndexExpr, IndexingContext, xor from wave_lang.support.ir_imports import AffineExpr, AffineMap, Attribute, DenseElementsAttr, FloatAttr, IndexType, InsertionPoint, IntegerAttr, IntegerType, IrType, Location, MemRefType, OpResult, ShapedType, Value, VectorType, affine_d, arith_d, func_d, gpu_d, vector_d from ...compiler.builder import IRProxyValue, ScalarBuilder from typing import Any, Callable, ClassVar, List, Optional, Type import torch.fx as fx from ..constraints import Constraint, HardwareConstraint, TilingConstraint @dataclass class WaveEmitter: """Emits a warp function as a `func` with a signature derived from the gm.""" root_sig: BoundKernelSignature trace: CapturedTrace constraints: list[Constraint] options: WaveCompileOptions grid_type: Type['Grid'] ip: InsertionPoint = None OP_HANDLERS: ClassVar[dict[str, Callable[['WaveEmitter', fx.Node], None]]] = {} _node_values: ClassVar[dict[fx.Node, List[IRProxyValue]]] = {} def __post_init__(self): self.ip = InsertionPoint(self.root_sig.entry_block) self.dynamic_symbols = self.options.dynamic_symbols def emit_program_invariants(self): grid_type = self.grid_type self.workgroup_ids = [gpu_d.block_id(gpu_d.Dimension.x, upper_bound=_get_upper_bound(grid_type.dims[0])), gpu_d.block_id(gpu_d.Dimension.y, upper_bound=_get_upper_bound(grid_type.dims[1])), gpu_d.block_id(gpu_d.Dimension.z, upper_bound=_get_upper_bound(grid_type.dims[2]))] threads_per_block = self.hardware_constraint.threads_per_block self.thread_ids = [gpu_d.thread_id(gpu_d.Dimension.x, upper_bound=_get_upper_bound(threads_per_block[0])), gpu_d.thread_id(gpu_d.Dimension.y, upper_bound=_get_upper_bound(threads_per_block[1])), gpu_d.thread_id(gpu_d.Dimension.z, upper_bound=_get_upper_bound(threads_per_block[2]))] self.induction_vars: dict[IndexSymbol, Value] = {} self.dynamic_dims: dict[IndexSymbol, Value] = {} for bind, arg in zip(self.root_sig.sig.bindings, self.root_sig.entry_block.arguments): if bind.binding_type == BindingType.SYMBOL_VALUE: self.dynamic_dims[bind.symbol_type] = arg def emit(self, graph: Optional[fx.Graph]=None): with self.ip, Location.unknown(): self.emit_program_invariants() self._emit_graph(graph if graph is not None else self.trace.get_root_graph()) def finish(self): with self.ip, Location.unknown(): func_d.ReturnOp([]) def _emit_graph(self, graph: fx.Graph): """Emits the given graph at the current insertion point.""" for node in graph.nodes: if node.op == 'call_function' or node.op == 'call_method': self._emit_function_call_node(node) if node.op == 'output': return node.args def _emit_function_call_node(self, node: fx.Node): target_op = node.tkw_op_name try: handler = self.OP_HANDLERS[target_op] except KeyError: raise CodegenError(f'No handler registered for op {target_op}') location = getattr(node, 'location', None) ir_location = location.to_mlir() if location else Location.unknown() with ir_location: try: handler(self, node) except: print(f'Error handling {node}', file=sys.stderr) raise def lookup_node_values(self, node: fx.Node) -> List[Value]: assert NDEBUG or isinstance(node, fx.Node) values = self._node_values.get(node) if values is None: ip = InsertionPoint.current while not isinstance(ip.block.owner, func_d.FuncOp): ip = InsertionPoint(ip.block.owner) with ip: values = [self.root_sig.resolve_by_reference(('node', node))] self._node_values[node] = values values = [v.ir_value if isinstance(v, IRProxyValue) else v for v in values] return values def bind_node_proxy(self, node: fx.Node, proxy: IRProxyValue): """Binds a node's result to a Python/IR proxy object.""" assert NDEBUG or (isinstance(node, fx.Node) and isinstance(proxy, IRProxyValue)) self._node_values[node] = [proxy] def bind_node_proxies(self, node: fx.Node, proxies: List[IRProxyValue]): assert NDEBUG or (isinstance(node, fx.Node) and all((isinstance(p, IRProxyValue) for p in proxies))) self._node_values[node] = proxies def get_induction_vars_and_syms(self) -> tuple[list[OpResult], list[IndexExpr]]: induction_var_syms = [] induction_vars = [] if self.induction_vars: for constraint in self.constraints: if isinstance(constraint, TilingConstraint): if constraint.dim in self.induction_vars: induction_var_syms.append(constraint.induction_var) induction_vars.append(self.induction_vars[constraint.dim]) return (induction_vars, induction_var_syms) @property def hardware_constraint(self) -> HardwareConstraint: return get_hardware_constraint(self.constraints)
@dataclass class WaveEmitter: '''Emits a warp function as a `func` with a signature derived from the gm.''' def __post_init__(self): pass def emit_program_invariants(self): pass def emit_program_invariants(self): pass def finish(self): pass def _emit_graph(self, graph: fx.Graph): '''Emits the given graph at the current insertion point.''' pass def _emit_function_call_node(self, node: fx.Node): pass def lookup_node_values(self, node: fx.Node) -> List[Value]: pass def bind_node_proxy(self, node: fx.Node, proxy: IRProxyValue): '''Binds a node's result to a Python/IR proxy object.''' pass def bind_node_proxies(self, node: fx.Node, proxies: List[IRProxyValue]): pass def get_induction_vars_and_syms(self) -> tuple[list[OpResult], list[IndexExpr]]: pass @property def hardware_constraint(self) -> HardwareConstraint: pass
14
3
10
0
9
0
2
0.04
0
10
5
0
11
5
11
11
128
17
108
33
95
4
75
32
63
5
0
4
26
327,633
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/compile.py
wave_lang.kernel.wave.compile.WaveKernel
from ..compiler import host_codegen, kernel_codegen from .utils.general_utils import wave_dtype_to_torch from typing import Any, Optional, Callable, Sequence from .profiling import benchmark_module from wave_lang.runtime.launch import Launchable from .compile_options import WaveCompileOptions from .debug_log_hoist import DebugArgInfo from itertools import chain import iree.runtime as rt from .utils.run_utils import write_file, print_bench_result, invoke_with_wave_runtime, get_benchmark_flags from wave_lang.kernel.lang import IndexSymbol import torch class WaveKernel: """ Represents a wave kernel that can be invoked by the user. """ def __init__(self, options: WaveCompileOptions, executable: Any, asm: str, gpu_binary_path: Optional[str], bound_scalar_symbols: dict[IndexSymbol, int], symbols_args_map: dict[IndexSymbol, tuple[int, int]], trace: Optional['CapturedTrace']=None, debug_outputs: Optional[Sequence[DebugArgInfo]]=None, debug_handlers: Optional[Sequence[Any]]=None): self.options = options self.executable = executable self.asm = asm self.trace = trace if gpu_binary_path: import wave_runtime wave_runtime.load_hip_functions() self.gpu_binary, self.gpu_func = wave_runtime.load_binary(gpu_binary_path, options.kernel_launch_info.func_name) else: self.gpu_func = None self.bound_scalar_symbols = bound_scalar_symbols self.symbols_args_map = symbols_args_map self.debug_outputs = debug_outputs self.debug_handlers = debug_handlers if not options.wave_runtime: is_async = options.iree_launch_async and (not options.run_bench) self.func_name = options.func_name + ('$async' if is_async else '') def loader(device): vm_instance = device.vm_instance return rt.VmModule.copy_buffer(vm_instance, self.executable) self.launchable = Launchable.from_vm_module(loader, entry_point=self.func_name) def get_trace(self) -> Optional['CapturedTrace']: """Returns the trace used to generate this kernel. If this is a cached kernel, the trace is not available. """ return self.trace def __call__(self, *args, **kwargs): return self.invoke(*args, **kwargs) def invoke(self, *args, **kwargs): """ Invokes the wave kernel with the given arguments. Returns the assembly code of the compiled kernel. """ scalar_args = [] kernel_inputs, kernel_outputs = ([], []) usage_idx = 0 for arg in args: if not isinstance(arg, torch.Tensor): scalar_args.append(arg) continue usage = self.options.kernel_usages[usage_idx] usage_idx += 1 if usage == kernel_codegen.KernelBufferUsage.INPUT: kernel_inputs.append(arg) if usage == kernel_codegen.KernelBufferUsage.OUTPUT: kernel_outputs.append(arg) debug_args = [] debug_logs = kwargs.get('debug_logs', {}) debug_extra_dimensions = {} def get_dynamic_dimension_actual(sym): if sym in debug_extra_dimensions: return debug_extra_dimensions[sym] arg_idx, dim = self.symbols_args_map[sym] return args[arg_idx].shape[dim] if self.debug_outputs: for info_dict in self.debug_outputs: extra_iter_dims = info_dict.get('extra_iteration_dimensions', None) if extra_iter_dims: for dim_symbol, _, size in extra_iter_dims: debug_extra_dimensions[dim_symbol] = size if self.debug_outputs: for info_dict in self.debug_outputs: shape = [self.options.subs.get(symdim, None) or get_dynamic_dimension_actual(symdim) for symdim in info_dict['symbolic_shape']] memory = torch.zeros(shape, dtype=wave_dtype_to_torch(info_dict['dtype']), device='cuda') log_info = {'value': memory, 'symbolic_shape': info_dict['symbolic_shape'], 'iteration_dimensions': [dim for dim, _, _ in info_dict['extra_iteration_dimensions']]} debug_args.append(memory) debug_logs[info_dict['symbol_name']] = log_info kernel_outputs = kernel_outputs + debug_args dynamic_symbols = [] for sym in self.options.dynamic_symbols: dynamic_symbols.append(get_dynamic_dimension_actual(sym)) if self.options.wave_runtime: invoke_with_wave_runtime(self.options, kernel_inputs, kernel_outputs, scalar_args, self.bound_scalar_symbols, dynamic_symbols, self.gpu_func) else: tensors = [t.data for t in chain(kernel_inputs, kernel_outputs)] self.launchable(*tensors, *scalar_args) if self.options.run_bench: benchmark_flags = get_benchmark_flags(self.options) benchmark_results = benchmark_module(self.options, [t.data for t in kernel_inputs], [t.data for t in kernel_outputs], self.executable, self.func_name, **benchmark_flags) print_bench_result(benchmark_results, self.options.benchmark_results_file) if self.debug_outputs: for info_dict, (label, debug_log) in zip(self.debug_outputs, debug_logs.items()): if (printer := info_dict.get('printer', None)): printer(label, debug_log['value']) for handler in self.debug_handlers or []: handler(debug_logs) return self.asm
class WaveKernel: ''' Represents a wave kernel that can be invoked by the user. ''' def __init__(self, options: WaveCompileOptions, executable: Any, asm: str, gpu_binary_path: Optional[str], bound_scalar_symbols: dict[IndexSymbol, int], symbols_args_map: dict[IndexSymbol, tuple[int, int]], trace: Optional['CapturedTrace']=None, debug_outputs: Optional[Sequence[DebugArgInfo]]=None, debug_handlers: Optional[Sequence[Any]]=None): pass def loader(device): pass def get_trace(self) -> Optional['CapturedTrace']: '''Returns the trace used to generate this kernel. If this is a cached kernel, the trace is not available. ''' pass def __call__(self, *args, **kwargs): pass def invoke(self, *args, **kwargs): ''' Invokes the wave kernel with the given arguments. Returns the assembly code of the compiled kernel. ''' pass def get_dynamic_dimension_actual(sym): pass
7
3
28
3
23
3
5
0.16
0
11
4
1
4
12
4
4
170
21
129
55
110
20
80
43
72
18
0
4
27
327,634
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/compile.py
wave_lang.kernel.wave.compile.WaveKernelWithProfile
class WaveKernelWithProfile(WaveKernel): def __call__(self, *args, **kwargs): return invoke_with_profile(self.options, self.invoke, *args, **kwargs)
class WaveKernelWithProfile(WaveKernel): def __call__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
5
4
1
3
2
1
0
3
2
1
1
1
0
1
327,635
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/compile_options.py
wave_lang.kernel.wave.compile_options.WaveCompileOptions
from .utils.classes import KernelLaunchInfo from typing import Any, Optional from ..compiler.kernel_codegen import KernelBufferUsage from .scheduling.schedule_enums import SchedulingType from .._support.indexing import IndexSymbol from dataclasses import dataclass, field from .._support.location_config import LocationCaptureConfig @dataclass class WaveCompileOptions: """ Options for compiling the wave kernel. """ postprocess: Optional[str] = None canonicalize: bool = True func_name: str = 'isolated_benchmark' subs: dict[str | IndexSymbol, Any] = field(default_factory=list) dynamic_symbols: list[str] = field(default_factory=list) schedule: bool = SchedulingType.NONE use_scheduling_barriers: bool = False multi_buffer_count: Optional[int] = None kernel_launch_info: KernelLaunchInfo = field(default_factory=KernelLaunchInfo) kernel_usages: tuple[KernelBufferUsage] = None backend: str = 'rocm' target: str = 'gfx942' iree_preprocessing_pass_pipeline: str = None run_bench: bool = False benchmark_batch_size: int = None benchmark_repetitions: int = None benchmark_results_file: str = None capture_trace: bool = False bench_with_constant_weights: bool = False profile_python_wrapper: bool = False profile_python_cprofile: bool = True profile_python_warmup: int = 1 profile_python_repetitions: int = 1000 kernel_hash: str = None create_vmfb_file: str = None override_mlir: str = None dump_binaries: str = None dump_intermediates: str = False compile_to_mlir: bool = False location_capture_config: LocationCaptureConfig = field(default_factory=LocationCaptureConfig) use_local_scope: bool = False use_water_leak_check: bool | str = False optimization_level: bool = True denorm_fp_math_f32: str = None waves_per_eu: int = None wave_runtime: bool = False iree_launch_async: bool = True use_buffer_ops: bool = False use_fast_math: bool = False use_global_to_shared: bool = False minimize_shared_allocs: bool = True reorder_allocs: bool = True override_schedule: Optional[str] = None dump_schedule: Optional[str] = None mlir_print_ir_after_all: bool = False print_ir_after: list[str] = field(default_factory=list) print_ir_before: list[str] = field(default_factory=list) profile_pass: list[str] = field(default_factory=list) print_trace_begin: bool = False print_grid: bool = False print_signature: bool = False print_mlir: bool = False print_mlir_file: Optional[str] = None print_pass_times: bool = False
@dataclass class WaveCompileOptions: ''' Options for compiling the wave kernel. ''' pass
2
1
0
0
0
0
0
0.3
0
0
0
0
0
0
0
0
83
11
57
55
56
17
55
55
54
0
0
0
0
327,636
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.Constraint
from dataclasses import dataclass from abc import ABC, abstractmethod from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @dataclass class Constraint(ABC): """ Base class for constraints. Every constraint reduces to the following form: Variables: [x0, x1, ...., xN] Bounds: [lb0 <= x0 <= ub0, ..., lbN <= xN <= ubN] Equality Constraints: [f0(x0, ..., xN) = 0, f1(x0, ..., xN) = 0, ...] Inequality Constraints: [g0(x0, ..., xN) <= 0, g1(x0, ..., xN) <= 0, ...] """ @abstractmethod def apply(self) -> IndexSequence: """Apply the constraint and get the resulting index sequence.""" ...
@dataclass class Constraint(ABC): ''' Base class for constraints. Every constraint reduces to the following form: Variables: [x0, x1, ...., xN] Bounds: [lb0 <= x0 <= ub0, ..., lbN <= xN <= ubN] Equality Constraints: [f0(x0, ..., xN) = 0, f1(x0, ..., xN) = 0, ...] Inequality Constraints: [g0(x0, ..., xN) <= 0, g1(x0, ..., xN) <= 0, ...] ''' @abstractmethod def apply(self) -> IndexSequence: '''Apply the constraint and get the resulting index sequence.''' pass
4
2
3
0
2
1
1
2.25
1
1
1
2
1
0
1
21
14
1
4
3
1
9
3
2
1
1
4
0
1
327,637
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.DeviceConstraint
from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass from .utils.symbol_utils import get_min_expr, subs_idxc from sympy import Integer, Piecewise, ceiling, floor @dataclass class DeviceConstraint(DistributionConstraint): """ A constraint of the form `tkw.DeviceConstraint(M, DEVICE_M, <device dimension>)` specifies that we want to distribute dimension M along the device with a tile size of DEVICE_M. This translates to an index constraint for all tensors of the shape [M, ?] -> index += (device_id * DEVICE_M, 0), where device_id is the id of the device on which the tensor is located. Device id is a tuple of (dev_id_x, devid_y, dev_id_z) where dev_id_x is the id of the device in the x dimension, dev_id_y is the id of the device in the y dimension and dev_id_z is the id of the device in the z dimension. The device id is used to compute the index offset for the tensor along the specified dimension. Device constraints can be applied for multiple dimensions. For example, constraint += [tkw.DeviceConstraint(M, DEVICE_M, 0)] constraint += [tkw.DeviceConstraint(N, DEVICE_N, 1)] specifies that we want to distribute DEVICE_M x DEVICE_N tiles of the tensor across the devices, where DEVICE_M and DEVICE_N are the tile sizes for dimensions M and N respectively. """ dim: IndexExpr tile_size: Optional[IndexExpr] = None device_dim: int = 0 def __post_init__(self): self.dev_dim = None match self.device_dim: case 0: self.dev_dim = DEVICE_DIM_0 case 1: self.dev_dim = DEVICE_DIM_1 case 2: self.dev_dim = DEVICE_DIM_2 case _: raise ValueError('Invalid workgroup dimension. Expected 0, 1 or 2.') def apply(self) -> IndexSequence: """ Apply the device constraint and return the index sequence. For single device_id: returns device_id * tile_size """ if self.dev_dim is None: raise ValueError('Index is being computed without setting device dimension') return IndexSequence(self.dev_dim * self.tile_size, 1) @property def count(self) -> IndexExpr: """ Returns an expression for the total number of devices for the specific device_dim. """ return ceiling(self.dim / self.tile_size) @property def work_bound(self) -> IndexExpr: """ Returns the work bound for device constraint. For device constraints, the work bound is simply the device's starting position plus its tile size, representing the range of data this device will process. """ return self.apply().start + self.tile_size @property def dim_bound(self) -> IndexExpr: """ Returns the actual dimension size being distributed. """ return self.dim def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: """ Returns the index bound for safe memory access. For device constraints, bounds are needed when vector shapes don't align with tile sizes to prevent out-of-bounds access. """ bound = None if vector_shape is not None and vector_shape > 1 and (subs_idxc(self.tile_size) % vector_shape != 0): bound = self.apply().start + self.tile_size return bound
@dataclass class DeviceConstraint(DistributionConstraint): ''' A constraint of the form `tkw.DeviceConstraint(M, DEVICE_M, <device dimension>)` specifies that we want to distribute dimension M along the device with a tile size of DEVICE_M. This translates to an index constraint for all tensors of the shape [M, ?] -> index += (device_id * DEVICE_M, 0), where device_id is the id of the device on which the tensor is located. Device id is a tuple of (dev_id_x, devid_y, dev_id_z) where dev_id_x is the id of the device in the x dimension, dev_id_y is the id of the device in the y dimension and dev_id_z is the id of the device in the z dimension. The device id is used to compute the index offset for the tensor along the specified dimension. Device constraints can be applied for multiple dimensions. For example, constraint += [tkw.DeviceConstraint(M, DEVICE_M, 0)] constraint += [tkw.DeviceConstraint(N, DEVICE_N, 1)] specifies that we want to distribute DEVICE_M x DEVICE_N tiles of the tensor across the devices, where DEVICE_M and DEVICE_N are the tile sizes for dimensions M and N respectively. ''' def __post_init__(self): pass def apply(self) -> IndexSequence: ''' Apply the device constraint and return the index sequence. For single device_id: returns device_id * tile_size ''' pass @property def count(self) -> IndexExpr: ''' Returns an expression for the total number of devices for the specific device_dim. ''' pass @property def work_bound(self) -> IndexExpr: ''' Returns the work bound for device constraint. For device constraints, the work bound is simply the device's starting position plus its tile size, representing the range of data this device will process. ''' pass @property def dim_bound(self) -> IndexExpr: ''' Returns the actual dimension size being distributed. ''' pass def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: ''' Returns the index bound for safe memory access. For device constraints, bounds are needed when vector shapes don't align with tile sizes to prevent out-of-bounds access. ''' pass
11
6
10
1
5
4
2
1.14
1
3
1
0
6
1
6
30
94
15
37
14
24
42
26
12
18
4
6
1
11
327,638
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.DistributionConstraint
from dataclasses import dataclass from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @dataclass class DistributionConstraint(Constraint): """ Base class for constraints that distribute a dimension across a workgroup or reduction loop. """ @property def work_bound(self) -> IndexExpr: """ Returns the work bound for the constraint. It may be different from the dimension of the tensor if the dimensions is not divisible by the tile size. """ raise NotImplementedError('Subclasses must implement this method') @property def dim_bound(self) -> IndexExpr: """ Returns the dimension bound for the constraint, which is usually an actual dimension of the tensor. """ raise NotImplementedError('Subclasses must implement this method') def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: """ Returns the index bound for the constraint, which is usually an actual dimension of the tensor. If bounds is not needed (i.e. tile/vector sizes are perfectly aligned to the tensor dimension), return None. """ raise NotImplementedError('Subclasses must implement this method')
@dataclass class DistributionConstraint(Constraint): ''' Base class for constraints that distribute a dimension across a workgroup or reduction loop. ''' @property def work_bound(self) -> IndexExpr: ''' Returns the work bound for the constraint. It may be different from the dimension of the tensor if the dimensions is not divisible by the tile size. ''' pass @property def dim_bound(self) -> IndexExpr: ''' Returns the dimension bound for the constraint, which is usually an actual dimension of the tensor. ''' pass def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: ''' Returns the index bound for the constraint, which is usually an actual dimension of the tensor. If bounds is not needed (i.e. tile/vector sizes are perfectly aligned to the tensor dimension), return None. ''' pass
7
4
8
1
2
5
1
2.11
1
2
0
4
3
0
3
24
33
5
9
6
3
19
7
4
3
1
5
0
3
327,639
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.GenericDot
from dataclasses import dataclass from sympy import Integer, Piecewise, ceiling, floor from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @dataclass class GenericDot: """ mma implemented through vector dot products intead of hw intrinsics. `out_vec_size`: size of the output matrix vector `k_vec_size`: size of the reduction dimension vector `k_mult`: number of reduction dimension vectors """ out_vec_size: int = 1 k_vec_size: int = 4 k_mult: int = 1 along_dim: MMAOperand = MMAOperand.N def __post_init__(self): if self.along_dim != MMAOperand.M and self.along_dim != MMAOperand.N: raise ValueError(f"Invalid 'along_dim': {self.along_dim}. Must be 'MMAOperand.M' or 'MMAOperand.N'.") def get_shape(self, threads_per_wave: int) -> tuple[int, int, int]: m = self.out_vec_size n = threads_per_wave // self.k_mult k = self.k_vec_size * self.k_mult if self.along_dim == MMAOperand.N: return (m, n, k) else: return (n, m, k) def get_index_offset(self, lane: IndexExpr, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: m = Piecewise((lane % self.out_vec_size, ~MMA_ACC), (0, MMA_ACC)) n = lane // self.k_mult k = lane % self.k_mult * self.k_vec_size if self.along_dim == MMAOperand.N: return (m, n, k) else: return (n, m, k) def get_index_size(self, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: m = Piecewise((1, ~MMA_ACC), (self.out_vec_size, MMA_ACC)) n = 1 k = self.k_vec_size if self.along_dim == MMAOperand.N: return (m, n, k) else: return (n, m, k) def get_index_stride(self, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: m = Piecewise((1, ~MMA_ACC), (threads_per_wave // self.k_mult, MMA_ACC)) n = 1 k = self.k_vec_size if self.along_dim == MMAOperand.N: return (m, n, k) else: return (n, m, k) def __hash__(self): return hash((self.out_vec_size, self.k_vec_size, self.k_mult, self.along_dim))
@dataclass class GenericDot: ''' mma implemented through vector dot products intead of hw intrinsics. `out_vec_size`: size of the output matrix vector `k_vec_size`: size of the reduction dimension vector `k_mult`: number of reduction dimension vectors ''' def __post_init__(self): pass def get_shape(self, threads_per_wave: int) -> tuple[int, int, int]: pass def get_index_offset(self, lane: IndexExpr, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: pass def get_index_size(self, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: pass def get_index_stride(self, threads_per_wave: int) -> tuple[IndexExpr, IndexExpr, IndexExpr]: pass def __hash__(self): pass
8
1
8
0
8
0
2
0.12
0
4
1
0
6
0
6
6
64
8
50
29
37
6
38
23
31
2
0
1
11
327,640
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.HardwareConstraint
from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from .._support.dtype import DataType from dataclasses import dataclass from sympy import Integer, Piecewise, ceiling, floor @dataclass class HardwareConstraint(Constraint): """ A constraint of the form tkw.HardwareConstraint(threads_per_wave = N, mma_type = 'MFMA_F32_16x16x16_F16') specifies that the hardware supports N threads per wave and that we want all mma operations in the microkernel to be mapped to a hardware mma instruction of shape (16x16x16). This translates to a hardware specific index constraint. Not all computation graphs have mma operators in them. In these situations, the user can specify the vector shape they want to tile to by specifying the vector shapes dictionary which maps a tensor dimension to its corresponding tile size. Both mma constraints and vector shapes can be specified, but the mapping from symbols to shapes should be injective. """ threads_per_wave: int waves_per_block: Optional[tuple[int, int, int]] = None mma_type: Optional[MMAType | ScaledMMAType] = MMAType.F32_16x16x16_F16 vector_shapes: Optional[dict[IndexSymbol, int]] = None max_bits_per_load: int = 128 def max_elems_per_load(self, element_type: DataType) -> int: return self.max_bits_per_load // element_type.bitwidth() def get_thread_id_from_workgroup_dim(self, workgroup_dim: int) -> IndexSymbol: match workgroup_dim: case 0: return THREAD_0 case 1: return THREAD_1 case 2: return THREAD_2 case _: raise ValueError('Invalid workgroup dimension. Expected 0, 1 or 2.') def mma_matrix_shapes(self, mma_type: Optional[MMAType | ScaledMMAType]) -> tuple[int]: if mma_type is None: mma_type = self.mma_type match mma_type: case GenericDot(): return mma_type.get_shape(self.threads_per_wave) case MMAType.F32_16x16x16_F16 | MMAType.I32_16x16x16_I8: return (16, 16, 16) case MMAType.F32_32x32x8_F16 | MMAType.I32_32x32x8_I8: return (32, 32, 8) case MMAType.F32_16x16x32_F8 | MMAType.F32_16x16x32_BF16 | MMAType.F32_16x16x32_F16 | MMAType.F32_16x16x32_K8_F16 | MMAType.F32_16x16x32_K4_F8 | MMAType.I32_16x16x32_I8: return (16, 16, 32) case MMAType.F32_32x32x16_F8 | MMAType.F32_32x32x16_BF16 | MMAType.F32_32x32x16_F16 | MMAType.F32_32x32x16_K8_F16 | MMAType.F32_32x32x16_K4_F8 | MMAType.I32_32x32x16_I8: return (32, 32, 16) case ScaledMMAType.F32_16x16x128_F8F6F4: return (16, 16, 128) case ScaledMMAType.F32_32x32x64_F8F6F4: return (32, 32, 64) case _: raise ValueError(f'Unsupported MMA type: {mma_type}') def mma_index_offset(self, mma_type: Optional[MMAType | ScaledMMAType]): lane = self.linearized_thread_id % self.threads_per_wave if mma_type is None: mma_type = self.mma_type match mma_type: case GenericDot(): offset = mma_type.get_index_offset(lane, self.threads_per_wave) case MMAType.F32_16x16x16_F16 | MMAType.I32_16x16x16_I8: offset = [Piecewise((lane % 16, ~MMA_ACC), (4 * floor(lane / 16), MMA_ACC)), lane % 16, 4 * floor(lane / 16)] case MMAType.F32_32x32x8_F16 | MMAType.I32_32x32x8_I8: offset = [Piecewise((lane % 32, ~MMA_ACC), (8 * floor(GPR_NUM / 4) % 32 + 4 * floor(lane / 32) + GPR_NUM % 4, MMA_ACC)), lane % 32, 4 * floor(lane / 32)] case MMAType.F32_16x16x32_F8 | MMAType.F32_16x16x32_BF16 | MMAType.F32_16x16x32_F16 | MMAType.F32_16x16x32_K8_F16 | MMAType.F32_16x16x32_K4_F8 | MMAType.I32_16x16x32_I8: offset = [Piecewise((lane % 16, ~MMA_ACC), (4 * floor(lane / 16), MMA_ACC)), lane % 16, 8 * floor(lane / 16)] if mma_type == MMAType.F32_16x16x32_K4_F8: offset = [Piecewise((lane % 16, ~MMA_ACC), (4 * floor(lane / 16), MMA_ACC)), lane % 16, 16 * floor(GPR_NUM / 4) + 4 * floor(lane / 16) + GPR_NUM % 4] case MMAType.F32_32x32x16_F8 | MMAType.F32_32x32x16_BF16 | MMAType.F32_32x32x16_F16 | MMAType.F32_32x32x16_K8_F16 | MMAType.F32_32x32x16_K4_F8 | MMAType.I32_32x32x16_I8: offset = [Piecewise((lane % 32, ~MMA_ACC), (8 * floor(GPR_NUM / 4) % 32 + 4 * floor(lane / 32) + GPR_NUM % 4, MMA_ACC)), lane % 32, 8 * floor(lane / 32)] if mma_type == MMAType.F32_32x32x16_K4_F8: offset = [Piecewise((lane % 32, ~MMA_ACC), (8 * floor(GPR_NUM / 4) % 32 + 4 * floor(lane / 32) + GPR_NUM % 4, MMA_ACC)), lane % 32, 8 * floor(GPR_NUM / 4) + 4 * floor(lane / 32) + GPR_NUM % 4] case ScaledMMAType.F32_16x16x128_F8F6F4: offset = [Piecewise((lane % 16, ~MMA_ACC), (4 * floor(lane / 16), MMA_ACC)), lane % 16, Piecewise((64 * floor(GPR_NUM / 16) + 16 * floor(lane / 16) + GPR_NUM % 16, ~(MMA_LHS_SCALE | MMA_RHS_SCALE | MMA_SCALE_FP4)), (32 * floor(lane / 16), MMA_LHS_SCALE | MMA_RHS_SCALE | MMA_SCALE_FP4))] case ScaledMMAType.F32_32x32x64_F8F6F4: offset = [Piecewise((lane % 32, ~MMA_ACC), (8 * floor(GPR_NUM / 4) % 32 + 4 * floor(lane / 32) + GPR_NUM % 4, MMA_ACC)), lane % 32, 32 * floor(lane / 32)] case _: raise ValueError('Unsupported MMA type') return offset @property def threads_per_block(self) -> tuple[int]: return (self.waves_per_block[0] * self.threads_per_wave,) + self.waves_per_block[1:] @property def linearized_thread_id(self) -> IndexExpr: thread_ids = [THREAD_0, THREAD_1, THREAD_2] threads_per_block = [1, self.threads_per_block[0], self.threads_per_block[0] * self.threads_per_block[1]] return sum([x * y for x, y in zip(thread_ids, threads_per_block)]) def subs_vector_shapes(self, index_map: dict[IndexSymbol, int]): if self.vector_shapes is None: return for vector_dim, vector_size in self.vector_shapes.items(): if isinstance(vector_size, IndexExpr): self.vector_shapes[vector_dim] = vector_size.subs(index_map) def apply(self): assert False, 'Call either apply_read_write_thread_mapping or apply_mma_mapping' def apply_read_write_thread_mapping(self, dim: IndexSymbol, workgroup_dim: int, elements_per_thread: int | IndexSymbol, stride: int) -> IndexSequence: thread_id = self.get_thread_id_from_workgroup_dim(workgroup_dim) threads_per_dim = self.threads_per_wave if workgroup_dim == 0 else 1 thread_id = thread_id % threads_per_dim return IndexSequence(thread_id * elements_per_thread, elements_per_thread, stride) def apply_mma_mapping(self, dim: IndexSymbol, constraint_index: int | MMAOperand, mma_type: MMAType | ScaledMMAType) -> IndexSequence: if mma_type is None: mma_type = self.mma_type offset = self.mma_index_offset(mma_type) match mma_type: case GenericDot(): size = mma_type.get_index_size(self.threads_per_wave) stride = mma_type.get_index_stride(self.threads_per_wave) case MMAType.F32_16x16x16_F16 | MMAType.I32_16x16x16_I8: size = [Piecewise((1, ~MMA_ACC), (4, MMA_ACC)), 1, 4] stride = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 1] case MMAType.F32_32x32x8_F16 | MMAType.I32_32x32x8_I8: size = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 4] stride = [Piecewise((1, ~MMA_ACC), (32, MMA_ACC)), 1, 1] case MMAType.F32_16x16x32_F8 | MMAType.F32_16x16x32_BF16 | MMAType.F32_16x16x32_F16 | MMAType.F32_16x16x32_K8_F16 | MMAType.F32_16x16x32_K4_F8 | MMAType.I32_16x16x32_I8: size = [Piecewise((1, ~MMA_ACC), (4, MMA_ACC)), 1, 8] stride = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 1] case MMAType.F32_32x32x16_F8 | MMAType.F32_32x32x16_BF16 | MMAType.F32_32x32x16_F16 | MMAType.F32_32x32x16_K8_F16 | MMAType.F32_32x32x16_K4_F8 | MMAType.I32_32x32x16_I8: size = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 8] stride = [Piecewise((1, ~MMA_ACC), (32, MMA_ACC)), 1, 1] case ScaledMMAType.F32_16x16x128_F8F6F4: size = [Piecewise((1, ~MMA_ACC), (4, MMA_ACC)), 1, 32] stride = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 1] case ScaledMMAType.F32_32x32x64_F8F6F4: size = [Piecewise((1, ~MMA_ACC), (16, MMA_ACC)), 1, 32] stride = [Piecewise((1, ~MMA_ACC), (32, MMA_ACC)), 1, 1] case _: raise ValueError('Unsupported MMA type') assert isinstance(constraint_index, MMAOperand), f'Invalid MMA operand {constraint_index}' return IndexSequence(offset[constraint_index.value], size[constraint_index.value], stride[constraint_index.value])
@dataclass class HardwareConstraint(Constraint): ''' A constraint of the form tkw.HardwareConstraint(threads_per_wave = N, mma_type = 'MFMA_F32_16x16x16_F16') specifies that the hardware supports N threads per wave and that we want all mma operations in the microkernel to be mapped to a hardware mma instruction of shape (16x16x16). This translates to a hardware specific index constraint. Not all computation graphs have mma operators in them. In these situations, the user can specify the vector shape they want to tile to by specifying the vector shapes dictionary which maps a tensor dimension to its corresponding tile size. Both mma constraints and vector shapes can be specified, but the mapping from symbols to shapes should be injective. ''' def max_elems_per_load(self, element_type: DataType) -> int: pass def get_thread_id_from_workgroup_dim(self, workgroup_dim: int) -> IndexSymbol: pass def mma_matrix_shapes(self, mma_type: Optional[MMAType | ScaledMMAType]) -> tuple[int]: pass def mma_index_offset(self, mma_type: Optional[MMAType | ScaledMMAType]): pass @property def threads_per_block(self) -> tuple[int]: pass @property def linearized_thread_id(self) -> IndexExpr: pass def subs_vector_shapes(self, index_map: dict[IndexSymbol, int]): pass def apply(self): pass def apply_read_write_thread_mapping(self, dim: IndexSymbol, workgroup_dim: int, elements_per_thread: int | IndexSymbol, stride: int) -> IndexSequence: pass def apply_mma_mapping(self, dim: IndexSymbol, constraint_index: int | MMAOperand, mma_type: MMAType | ScaledMMAType) -> IndexSequence: pass
14
1
31
0
30
7
4
0.28
1
11
6
0
10
0
10
31
350
17
308
40
228
85
85
29
70
11
5
2
43
327,641
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.IteratorBindings
from dataclasses import dataclass from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @dataclass class IteratorBindings: """Manages binding of target dimensions to iterators""" def __init__(self, bindings: dict[IndexSymbol, IndexSymbol]): self.bindings = bindings def __repr__(self): return f'IteratorBindings({self.bindings})'
@dataclass class IteratorBindings: '''Manages binding of target dimensions to iterators''' def __init__(self, bindings: dict[IndexSymbol, IndexSymbol]): pass def __repr__(self): pass
4
1
2
0
2
0
1
0.2
0
1
0
0
2
1
2
2
8
2
5
4
2
1
5
4
2
1
0
0
2
327,642
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.MMAOperand
from enum import Enum class MMAOperand(Enum): M = 0 N = 1 K = 2
class MMAOperand(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
4
0
4
4
3
0
4
4
3
0
4
0
0
327,643
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.MMAType
from enum import Enum class MMAType(Enum): F32_16x16x16_F16 = 4128 F32_32x32x8_F16 = 4129 F32_16x16x32_K8_F16 = 4130 F32_32x32x16_K8_F16 = 4131 I32_16x16x16_I8 = 4288 I32_32x32x8_I8 = 4289 F32_16x16x32_F8 = 4656 F32_32x32x16_F8 = 4657 F32_16x16x32_K4_F8 = 4658 F32_32x32x16_K4_F8 = 4659 I32_16x16x32_I8 = 4800 I32_32x32x16_I8 = 4801 F32_32x32x16_BF16 = 4896 F32_16x16x32_BF16 = 4897 F32_32x32x16_F16 = 4898 F32_16x16x32_F16 = 4899
class MMAType(Enum): pass
1
0
0
0
0
0
0
0.15
1
0
0
0
0
0
0
49
16
1
13
13
12
2
13
13
12
0
4
0
0
327,644
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.ReorderingConstraint
from dataclasses import dataclass from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol @dataclass class ReorderingConstraint: """ A constraint of the form `tkw.ReorderingConstraint(new_wg0, 0)` specifies how workgroups are mapped to data along workgroup dim 0, according to the 'new_wg0' expression. The internal indexing of waves and threads within the workgroup do not change. The assumption is that each workgroup dimension has already been distributed by each WorkgroupConstraint, and since a ReorderingConstraint only shifts the positioning of workgroups after this, this class does not extend DistributionConstraint. """ reordered_equation: IndexExpr workgroup_dim: int def __post_init__(self): self.wg_dim = None match self.workgroup_dim: case 0 | 1 | 2: self.wg_dim = get_workgroup_symbol(self.workgroup_dim) case _: raise ValueError('Invalid workgroup dimension. Expected 0, 1, 2')
@dataclass class ReorderingConstraint: ''' A constraint of the form `tkw.ReorderingConstraint(new_wg0, 0)` specifies how workgroups are mapped to data along workgroup dim 0, according to the 'new_wg0' expression. The internal indexing of waves and threads within the workgroup do not change. The assumption is that each workgroup dimension has already been distributed by each WorkgroupConstraint, and since a ReorderingConstraint only shifts the positioning of workgroups after this, this class does not extend DistributionConstraint. ''' def __post_init__(self): pass
3
1
7
0
7
0
2
0.9
0
1
0
0
1
1
1
1
21
2
10
3
7
9
8
4
5
2
0
0
2
327,645
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.ScaledMMAType
from enum import Enum class ScaledMMAType(Enum): F32_16x16x128_F8F6F4 = 4928 F32_32x32x64_F8F6F4 = 4929
class ScaledMMAType(Enum): pass
1
0
0
0
0
0
0
0.33
1
0
0
0
0
0
0
49
4
0
3
3
2
1
3
3
2
0
4
0
0
327,646
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.TilingConstraint
from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass from .utils.symbol_utils import get_min_expr, subs_idxc from sympy import Integer, Piecewise, ceiling, floor @dataclass class TilingConstraint(DistributionConstraint): """ A constraint of the form `tkw.TilingConstraint(K, BLOCK_K)` specifies that we want to tile the K dimension with a tile size of BLOCK_K. This adds an index constraint to the K-th dimension of a tensor of the form BLOCK_K * i, where i is the induction variable associated with the loop around dimension K. """ dim: IndexExpr tile_size: Optional[IndexExpr] = None induction_var: Optional[IndexExpr] = None iters: Optional[IndexExpr] = None start: IndexExpr = Integer(0) def __post_init__(self): if self.tile_size is None: self.tile_size = 1 def __eq__(self, value): if not isinstance(value, TilingConstraint): return False return self.dim == value.dim and self.tile_size == value.tile_size and (self.induction_var == value.induction_var) and (self.iters == value.iters) @property def count(self) -> IndexExpr: """ Returns an expression for the number of iterations in the loop. """ if self.iters: return self.iters return ceiling(self.dim / self.tile_size) def apply(self) -> IndexSequence: if self.induction_var is None: raise ValueError('Index is being computed without setting induction variable') return IndexSequence(self.start + self.induction_var * self.tile_size, 1) @property def work_bound(self) -> IndexExpr: return self.start + self.count * self.tile_size @property def dim_bound(self) -> IndexExpr: return self.dim def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: bound = None if subs_idxc(self.work_bound) != subs_idxc(self.dim_bound): bound = self.dim_bound if vector_shape is not None and vector_shape > 1 and (subs_idxc(self.tile_size) % vector_shape != 0): tile_bound = self.apply().start + self.tile_size bound = get_min_expr(bound, tile_bound) return bound
@dataclass class TilingConstraint(DistributionConstraint): ''' A constraint of the form `tkw.TilingConstraint(K, BLOCK_K)` specifies that we want to tile the K dimension with a tile size of BLOCK_K. This adds an index constraint to the K-th dimension of a tensor of the form BLOCK_K * i, where i is the induction variable associated with the loop around dimension K. ''' def __post_init__(self): pass def __eq__(self, value): pass @property def count(self) -> IndexExpr: ''' Returns an expression for the number of iterations in the loop. ''' pass def apply(self) -> IndexSequence: pass @property def work_bound(self) -> IndexExpr: pass @property def dim_bound(self) -> IndexExpr: pass def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: pass
12
2
7
0
5
1
2
0.28
1
3
1
0
7
0
7
31
70
10
47
17
36
13
33
14
25
3
6
1
13
327,647
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.WaveConstraint
from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass from .utils.symbol_utils import get_min_expr, subs_idxc from sympy import Integer, Piecewise, ceiling, floor @dataclass class WaveConstraint(DistributionConstraint): """ A constraint of the form `tkw.WaveConstraint(K, WAVE_K)` specifies that we want distribute the K dimension among multiple waves which each wave operating on a tile size of WAVE_K. The assumption is that the K dimension has already been distributed among workgroups. If the K dimension has been distributed among workgroups with a tile size of BLOCK_K, then the number of waves along the K dimension is given by BLOCK_K // WAVE_K. This constraint adds an index constraint to the K-th dimension of a a tensor of the form WAVE_K * wave_id. The index of the wave is determined by the following mapping: workgroup id 0 -> wave/thread id x workgroup id 1 -> wave/thread id y workgroup id 2 -> wave/thread id z (If the tensor dimension has been distributed along workgroup dimension {0, 1, 2}, then the corresponding thread id is {x, y, z}). Because we represent the number of threads per block as [wave_id_0 * threads_per_wave, wave_id_1, wave_id_2], special care is required when computing wave_id_0. Specifically, wave_id_0 = floor(thread_id_0 / threads_per_wave) wave_id_1 = thread_id_1 wave_id_2 = thread_id_2 """ dim: IndexExpr tile_size: IndexExpr wave_id: Optional[IndexExpr | int] = None wg_constraint: Optional[WorkgroupConstraint] = None def apply(self) -> IndexSequence: if self.wave_id is None: raise ValueError('Index is being computed without setting wave id') return IndexSequence(self.tile_size * self.wave_id, 1) def set_wave_id_from_hardware_and_workgroup_constraint(self, hardware_constraint: HardwareConstraint, workgroup_constraint: WorkgroupConstraint): """ The wave_id is the same as the thread_id, with the exception of wave_id[0] = thread_id[0] / threads_per_wave This is a convention that we adopt. """ old_wave_id = self.wave_id assert self.dim == workgroup_constraint.dim, 'Dimension mismatch' self.wave_id = hardware_constraint.get_thread_id_from_workgroup_dim(workgroup_constraint.workgroup_dim) if workgroup_constraint.workgroup_dim == 0: self.wave_id = floor(self.wave_id / hardware_constraint.threads_per_wave) assert old_wave_id is None or self.wave_id == old_wave_id, f'Conflicting preset wave_id old: {old_wave_id} new: {self.wave_id}' self.wg_constraint = workgroup_constraint def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: bound = None if vector_shape is not None and vector_shape > 1 and (subs_idxc(self.tile_size) % vector_shape != 0): bound = self.wg_constraint.apply().start + self.apply().start + self.tile_size return bound @property def waves_per_block(self) -> IndexExpr: if not self.wg_constraint: raise ValueError('Wave constraint has no workgroup constraint') return ceiling(self.wg_constraint.tile_size / self.tile_size) @property def workgroup_dim(self) -> int: if not self.wg_constraint: raise ValueError('Wave constraint has no workgroup constraint') return self.wg_constraint.workgroup_dim
@dataclass class WaveConstraint(DistributionConstraint): ''' A constraint of the form `tkw.WaveConstraint(K, WAVE_K)` specifies that we want distribute the K dimension among multiple waves which each wave operating on a tile size of WAVE_K. The assumption is that the K dimension has already been distributed among workgroups. If the K dimension has been distributed among workgroups with a tile size of BLOCK_K, then the number of waves along the K dimension is given by BLOCK_K // WAVE_K. This constraint adds an index constraint to the K-th dimension of a a tensor of the form WAVE_K * wave_id. The index of the wave is determined by the following mapping: workgroup id 0 -> wave/thread id x workgroup id 1 -> wave/thread id y workgroup id 2 -> wave/thread id z (If the tensor dimension has been distributed along workgroup dimension {0, 1, 2}, then the corresponding thread id is {x, y, z}). Because we represent the number of threads per block as [wave_id_0 * threads_per_wave, wave_id_1, wave_id_2], special care is required when computing wave_id_0. Specifically, wave_id_0 = floor(thread_id_0 / threads_per_wave) wave_id_1 = thread_id_1 wave_id_2 = thread_id_2 ''' def apply(self) -> IndexSequence: pass def set_wave_id_from_hardware_and_workgroup_constraint(self, hardware_constraint: HardwareConstraint, workgroup_constraint: WorkgroupConstraint): ''' The wave_id is the same as the thread_id, with the exception of wave_id[0] = thread_id[0] / threads_per_wave This is a convention that we adopt. ''' pass def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: pass @property def waves_per_block(self) -> IndexExpr: pass @property def workgroup_dim(self) -> int: pass
9
2
10
0
8
1
2
0.65
1
5
3
0
5
0
5
29
86
10
46
16
34
30
30
10
24
2
6
1
10
327,648
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/constraints.py
wave_lang.kernel.wave.constraints.WorkgroupConstraint
from typing import Callable, Optional from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from dataclasses import dataclass from .utils.symbol_utils import get_min_expr, subs_idxc from sympy import Integer, Piecewise, ceiling, floor @dataclass class WorkgroupConstraint(DistributionConstraint): """ A constraint of the form `tkw.WorkgroupConstraint(M, BLOCK_M, 0)` specifies that we want to distribute dimension M along workgroup dim 0 with a tile size of BLOCK_M resulting in M // BLOCK_M workgroups along that dimension. This translates to an index constraint for all tensors of the shape [M, ?] -> index += (workgroup_id_0 * BLOCK_M, 0) """ dim: IndexExpr tile_size: IndexExpr workgroup_dim: int apply_fn: Optional[Callable] = None primary: Optional[bool] = True iters: Optional[IndexExpr | int] = None per_device_dim: Optional[IndexExpr] = None def __post_init__(self): self.wg_dim = None match self.workgroup_dim: case 0 | 1 | 2 | 3 | 4: self.wg_dim = get_workgroup_symbol(self.workgroup_dim) case _: raise ValueError('Invalid workgroup dimension. Expected 0, 1, 2, 3 or 4.') self.per_device_dim = self.dim @property def count(self) -> IndexExpr: """ Returns an expression for the total number of workgroups for the specific workgroup_dim. """ if self.iters: return self.iters return ceiling(self.per_device_dim / self.tile_size) def set_per_device_dim(self, per_device_dim: IndexExpr): """ Sets the per device dimensions for the workgroup constraint. This is used to determine the total number of workgroups per device. """ self.per_device_dim = per_device_dim def apply(self) -> IndexSequence: if self.apply_fn: return IndexSequence(self.apply_fn(self.wg_dim), 1) return IndexSequence(self.wg_dim * self.tile_size, 1) @property def work_bound(self) -> IndexExpr: return self.count * self.tile_size @property def dim_bound(self) -> IndexExpr: return self.per_device_dim def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: bound = None if subs_idxc(self.work_bound) != subs_idxc(self.dim_bound): bound = self.dim_bound if vector_shape is not None and vector_shape > 1 and (subs_idxc(self.tile_size) % vector_shape != 0): tile_bound = self.apply().start + self.tile_size bound = get_min_expr(bound, tile_bound) return bound
@dataclass class WorkgroupConstraint(DistributionConstraint): ''' A constraint of the form `tkw.WorkgroupConstraint(M, BLOCK_M, 0)` specifies that we want to distribute dimension M along workgroup dim 0 with a tile size of BLOCK_M resulting in M // BLOCK_M workgroups along that dimension. This translates to an index constraint for all tensors of the shape [M, ?] -> index += (workgroup_id_0 * BLOCK_M, 0) ''' def __post_init__(self): pass @property def count(self) -> IndexExpr: ''' Returns an expression for the total number of workgroups for the specific workgroup_dim. ''' pass def set_per_device_dim(self, per_device_dim: IndexExpr): ''' Sets the per device dimensions for the workgroup constraint. This is used to determine the total number of workgroups per device. ''' pass def apply(self) -> IndexSequence: pass @property def work_bound(self) -> IndexExpr: pass @property def dim_bound(self) -> IndexExpr: pass def get_index_bound(self, vector_shape: Optional[int]) -> Optional[IndexExpr]: pass
12
3
7
1
5
1
2
0.36
1
3
1
0
7
1
7
31
76
12
47
18
35
17
36
16
27
3
6
1
12
327,649
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/debug_log_hoist.py
wave_lang.kernel.wave.debug_log_hoist.DebugArgInfo
from .._support.dtype import DataType from .._support.indexing import IndexSymbol from typing import TypedDict, Any class DebugArgInfo(TypedDict): symbol_name: str debug_output_arg_id: int dtype: DataType symbolic_shape: tuple[IndexSymbol, ...] printer: Any
class DebugArgInfo(TypedDict): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
6
0
6
1
5
0
6
1
5
0
1
0
0
327,650
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/gather_to_shared.py
wave_lang.kernel.wave.gather_to_shared.GatherToSharedConfig
from dataclasses import dataclass from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol, xor @dataclass class GatherToSharedConfig: materialized_shape: list[IndexSymbol] elements_per_thread: int expected_number_of_loads: int
@dataclass class GatherToSharedConfig: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
1
3
0
4
1
3
0
0
0
0
327,651
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/in_thread_transpose.py
wave_lang.kernel.wave.in_thread_transpose.TransposeConfig
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from typing import Optional, Sequence from dataclasses import dataclass @dataclass class TransposeConfig: """ Configuration for in-thread transpose. """ load_elems_per_thread: int expected_number_of_loads: int expected_number_of_stores: int store_elems_per_thread: int src_symbolic_shape: Sequence[IndexSymbol] dst_symbolic_shape: Sequence[IndexSymbol] materialized_shape: Sequence[IndexSymbol]
@dataclass class TransposeConfig: ''' Configuration for in-thread transpose. ''' pass
2
1
0
0
0
0
0
0.38
0
0
0
0
0
0
0
0
12
1
8
1
7
3
8
1
7
0
0
0
0
327,652
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/minimize_global_loads.py
wave_lang.kernel.wave.minimize_global_loads.SharedReadMetadata
from .._support.indexing import IndexExpr, IndexSequence, IndexSymbol from ..lang.wave_types import IndexMapping from dataclasses import dataclass @dataclass class SharedReadMetadata: index: dict[IndexSymbol, IndexSequence] mapping: IndexMapping memory_shape: tuple[int | IndexExpr]
@dataclass class SharedReadMetadata: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
1
3
0
4
1
3
0
0
0
0
327,653
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/nn/linear.py
wave_lang.kernel.wave.nn.linear.WaveLinear
from torch import nn import torch import math class WaveLinear(nn.Module): """Fork of nn.Linear implementation but modified to handle Wave Kernel""" def __init__(self, in_features, out_features, bias=True, device=None, dtype=None): device = device or torch.device('cuda:0') dtype = dtype or torch.float16 if device.type != 'cuda': raise ValueError(f'{self.__class__.__name__} only support GPU device.') if dtype not in LINEAR_SUPPORTED_DTYPE: raise ValueError(f'{self.__class__.__name__} does not support dtype: {dtype}.') factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.empty((out_features, in_features), **factory_kwargs)) if bias: self.bias = nn.Parameter(torch.empty(out_features, **factory_kwargs)) else: self.register_parameter('bias', None) self.reset_parameters() self.kernel = get_linear_kernel([in_features, out_features], use_bias=bias) def reset_parameters(self) -> None: nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(self.bias, -bound, bound) def forward(self, input: torch.Tensor) -> torch.Tensor: assert len(input.shape) >= 2 input_len = input.shape[-2] batch = input.shape[0:-2] flat_batch = math.prod(batch) out_features = self.weight.shape[0] output_shape = [flat_batch, input_len, out_features] output = torch.empty(output_shape, dtype=self.weight.dtype, device=self.weight.device) if self.bias is None: self.kernel(input.view(flat_batch, input_len, input.shape[-1]), self.weight, output) else: self.kernel(input.view(flat_batch, input_len, input.shape[-1]), self.weight, self.bias, output) return output.view(*batch, input_len, out_features) def extra_repr(self) -> str: return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
class WaveLinear(nn.Module): '''Fork of nn.Linear implementation but modified to handle Wave Kernel''' def __init__(self, in_features, out_features, bias=True, device=None, dtype=None): pass def reset_parameters(self) -> None: pass def forward(self, input: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
5
1
17
2
13
2
3
0.17
1
3
0
0
4
5
4
4
72
10
53
19
48
9
38
19
33
4
1
1
10
327,654
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/nn/quant_linear.py
wave_lang.kernel.wave.nn.quant_linear.WaveQuantLinear
from torch import nn import torch import warnings import math class WaveQuantLinear(nn.Module): """Fork of nn.Linear implementation but modified to handle Wave Kernel""" def __init__(self, in_features, out_features, quant_params, bias=True, device=None, dtype=None): device = device or torch.device('cuda:0') dtype = dtype or torch.float16 if device.type != 'cuda': raise ValueError(f'{self.__class__.__name__} only support GPU device.') if dtype not in LINEAR_SUPPORTED_DTYPE: raise ValueError(f'{self.__class__.__name__} does not support dtype: {dtype}.') factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.empty((out_features, in_features), **factory_kwargs)) if bias: self.bias = nn.Parameter(torch.empty(out_features, **factory_kwargs)) else: self.register_parameter('bias', None) self.reset_parameters() self.weight_scale, self.input_scale, self.qdtype = extract_quant_params(quant_params) if self.weight_scale.numel() != 1 or self.input_scale.numel() != 1: raise ValueError('Only per-tensor quantization is currently supported') if self.qdtype != torch.float8_e4m3fnuz: warnings.warn('Untested quantization type') self.kernel = get_quant_linear_kernel([in_features, out_features], dtype, [self.weight_scale, self.input_scale, self.qdtype], use_bias=bias) if bias: raise ValueError('Bias is currently not supported') def reset_parameters(self) -> None: nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(self.bias, -bound, bound) def forward(self, input: torch.Tensor) -> torch.Tensor: assert len(input.shape) >= 2 input_len = input.shape[-2] batch = input.shape[0:-2] flat_batch = math.prod(batch) out_features = self.weight.shape[0] output_shape = [flat_batch, input_len, out_features] output = torch.empty(output_shape, dtype=self.weight.dtype, device=self.weight.device) if self.bias is None: self.kernel(input.view(flat_batch, input_len, input.shape[-1]), self.weight, output) else: self.kernel(input.view(flat_batch, input_len, input.shape[-1]), self.weight, self.bias, output) return output.view(*batch, input_len, out_features) def extra_repr(self) -> str: return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
class WaveQuantLinear(nn.Module): '''Fork of nn.Linear implementation but modified to handle Wave Kernel''' def __init__(self, in_features, out_features, quant_params, bias=True, device=None, dtype=None): pass def reset_parameters(self) -> None: pass def forward(self, input: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
5
1
22
1
19
2
3
0.12
1
3
0
0
4
8
4
4
93
9
75
28
62
9
45
20
40
7
1
1
13
327,655
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/schedule_reordering.py
wave_lang.kernel.wave.schedule_reordering.CompatibleBlockSize
from dataclasses import dataclass @dataclass class CompatibleBlockSize: block_m: int block_n: int block_k: int bitwidth: int mma_type: type
@dataclass class CompatibleBlockSize: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0
6
1
5
0
6
1
5
0
0
0
0
327,656
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/schedule_reordering.py
wave_lang.kernel.wave.schedule_reordering.InsertionMode
from enum import Enum class InsertionMode(Enum): BEFORE = 0 AFTER = 1
class InsertionMode(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
3
0
3
3
2
0
3
3
2
0
4
0
0
327,657
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/schedule_reordering.py
wave_lang.kernel.wave.schedule_reordering.InsertionPoint
from typing import Iterable, Dict, List from .utils.general_utils import flatten_list, get_hardware_constraint, is_shared_read, topological_sort_with_dependencies import torch.fx as fx class InsertionPoint(object): """ Helper class to keep track of movements/insertion of ops into very precise/specific locations before or after an another op (referred here as "anchor op".) """ def __init__(self, mode: InsertionMode, op: fx.Node, anchor_op: fx.Node | Iterable[fx.Node]): if not isinstance(mode, InsertionMode): raise ValueError('Unexpected insetion mode.') self.mode = mode if isinstance(anchor_op, fx.Node): self.anchor_op = anchor_op elif isinstance(anchor_op, Iterable): if mode == InsertionMode.AFTER: self.anchor_op = flatten_list(anchor_op)[-1] else: self.anchor_op = flatten_list(anchor_op)[0] else: raise ValueError('Unexpected src type') if not isinstance(op, fx.Node): raise ValueError('Unexpected op type.') self.op = op @property def graph(self): return self.anchor_op.graph
class InsertionPoint(object): ''' Helper class to keep track of movements/insertion of ops into very precise/specific locations before or after an another op (referred here as "anchor op".) ''' def __init__(self, mode: InsertionMode, op: fx.Node, anchor_op: fx.Node | Iterable[fx.Node]): pass @property def graph(self): pass
4
1
12
1
10
1
4
0.32
1
2
1
0
2
3
2
2
33
4
22
9
16
7
16
6
13
6
1
2
7
327,658
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/schedule_reordering.py
wave_lang.kernel.wave.schedule_reordering.SchedReorderStrategy
from enum import Enum class SchedReorderStrategy(Enum): NONE = 0 TWO_PP_CLUSTER = 544 MXFP4_PP_CLUSTER = 257
class SchedReorderStrategy(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
4
0
4
4
3
0
4
4
3
0
4
0
0
327,659
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/four_stage_pipelined_scheduling.py
wave_lang.kernel.wave.scheduling.four_stage_pipelined_scheduling.FourStageScheduler
from .graph_utils import Edge, sort_graph_by_edge_weight from .scheduler_utils import get_scheduling_stage, BaseScheduler, is_single_mma_source, is_mma_node import torch.fx as fx class FourStageScheduler(BaseScheduler): """ Four Stage Pipelined Scheduler Precondition: Only a single MMA instruction group is allowed for this scheduling approach Convert vanilla schedule of: for i = 0 to N: a = READ_GLOBAL i WRITE_SHARED a barrier b = READ_SHARED COMPUTE b let SM be shared memory, then SM[0] SM[1] are the multibuffers into mega pipelined schedule: a_0 = READ_GLOBAL 0 WRITE_SHARED a_0 SM[0] a_1 = READ_GLOBAL 1 b_0 = READ_SHARED SM[0] WRITE_SHARED a_1 SM[1] a_2 = READ_GLOBAL 2 for i = 0 to N -3: COMPUTE b_i b_{i+1} = READ_SHARED SM[i+1 %2] WRITE_SHARED a_{i+2} SM[i%2] a_{i+3} = READ_GLOBAL i+3 barrier COMPUTE b_{n-2} b_{n-1} = READ_SHARED SM[n-1 %2] WRITE_SHARED a_{n} SM[n % 2] COMPUTE b_{n-1} b_{n} = READ_SHARED SM[n %2] COMPUTE b_n """ def four_stage_scheduling(self, graph: fx.Graph, edges: list[Edge]) -> tuple[dict[fx.Node, int], bool]: """ Classify node to different stages. Based on its stage, program schedules the node to a specific cycle for each node. This function also checks that the sorted nodes move contiguously through expected stages. """ sorted_nodes = sort_graph_by_edge_weight(graph.nodes, edges) schedule = {} current_stage = get_scheduling_stage(sorted_nodes[0], _operation_stage_table) all_mma_nodes = list() current_stage_idx = 0 for node in sorted_nodes: if is_mma_node(node): all_mma_nodes.append(node) node_stage = get_scheduling_stage(node, _operation_stage_table) if node_stage in [current_stage, FourStageStage.SCHEDULING_NOOP]: schedule[node] = current_stage_idx elif FourStageStage.is_valid_transition(current_stage, node_stage): current_stage_idx += 1 schedule[node] = current_stage_idx current_stage = node_stage else: logger.warning(f'No valid transition from {current_stage} to {node_stage} for node {node}') return ({}, False) if not is_single_mma_source(all_mma_nodes): logger.warning('Structure of kernel is different than expected, only one MMA is present') return ({}, False) return (schedule, True) def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: """ 1. Identify which nodes are part of the global_read/local_write/local_read/compute phase 2. Set nodes to clock (0,1,2,3) based on phase. 3. Set initiation interval to 1. """ self.schedule, success = self.four_stage_scheduling(self.graph, self.edges) self._initiation_interval = 1 return (self.schedule, success)
class FourStageScheduler(BaseScheduler): ''' Four Stage Pipelined Scheduler Precondition: Only a single MMA instruction group is allowed for this scheduling approach Convert vanilla schedule of: for i = 0 to N: a = READ_GLOBAL i WRITE_SHARED a barrier b = READ_SHARED COMPUTE b let SM be shared memory, then SM[0] SM[1] are the multibuffers into mega pipelined schedule: a_0 = READ_GLOBAL 0 WRITE_SHARED a_0 SM[0] a_1 = READ_GLOBAL 1 b_0 = READ_SHARED SM[0] WRITE_SHARED a_1 SM[1] a_2 = READ_GLOBAL 2 for i = 0 to N -3: COMPUTE b_i b_{i+1} = READ_SHARED SM[i+1 %2] WRITE_SHARED a_{i+2} SM[i%2] a_{i+3} = READ_GLOBAL i+3 barrier COMPUTE b_{n-2} b_{n-1} = READ_SHARED SM[n-1 %2] WRITE_SHARED a_{n} SM[n % 2] COMPUTE b_{n-1} b_{n} = READ_SHARED SM[n %2] COMPUTE b_n ''' def four_stage_scheduling(self, graph: fx.Graph, edges: list[Edge]) -> tuple[dict[fx.Node, int], bool]: ''' Classify node to different stages. Based on its stage, program schedules the node to a specific cycle for each node. This function also checks that the sorted nodes move contiguously through expected stages. ''' pass def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: ''' 1. Identify which nodes are part of the global_read/local_write/local_read/compute phase 2. Set nodes to clock (0,1,2,3) based on phase. 3. Set initiation interval to 1. ''' pass
3
3
24
2
17
6
4
1.26
1
7
2
0
2
2
2
5
94
17
34
14
29
43
26
12
23
6
1
2
7
327,660
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/four_stage_pipelined_scheduling.py
wave_lang.kernel.wave.scheduling.four_stage_pipelined_scheduling.FourStageStage
from enum import Enum, auto class FourStageStage(Enum): GLOBAL_LOAD = auto() LOCAL_STORE = auto() LOCAL_LOAD = auto() COMPUTE = auto() SCHEDULING_NOOP = -1 @staticmethod def is_valid_transition(from_stage: 'FourStageStage', to_stage: 'FourStageStage') -> bool: if from_stage == to_stage: return True return (from_stage, to_stage) in _four_stage_stage_transition_table
class FourStageStage(Enum): @staticmethod def is_valid_transition(from_stage: 'FourStageStage', to_stage: 'FourStageStage') -> bool: pass
3
0
6
0
6
0
2
0.08
1
1
0
0
0
0
1
50
15
1
13
10
8
1
10
7
8
2
4
1
2
327,661
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/graph_utils.py
wave_lang.kernel.wave.scheduling.graph_utils.Edge
import torch.fx as fx from dataclasses import dataclass @dataclass class Edge: _from: fx.Node = None _to: fx.Node = None weight: EdgeWeight = None
@dataclass class Edge: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
0
0
0
327,662
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/graph_utils.py
wave_lang.kernel.wave.scheduling.graph_utils.EdgeWeight
from dataclasses import dataclass @dataclass class EdgeWeight: iteration_difference: int = 0 delay: int = 0
@dataclass class EdgeWeight: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
3
0
3
3
2
0
3
3
2
0
0
0
0
327,663
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/loop_reconstruction.py
wave_lang.kernel.wave.scheduling.loop_reconstruction.PipelineStage
from enum import Enum class PipelineStage(Enum): PROLOGUE = 0 KERNEL = 1 EPILOGUE = 2
class PipelineStage(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
4
0
4
4
3
0
4
4
3
0
4
0
0
327,664
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/loop_reconstruction_utils.py
wave_lang.kernel.wave.scheduling.loop_reconstruction_utils.ArgumentContext
from ...ops.wave_ops import GatherToLDS, GetResult, IterArg, Iterate, Write, get_custom from typing import Optional, Sequence import torch.fx as fx class ArgumentContext: """ The argument context is used to store the mapping of arguments for each modulo pipelining stage. """ def __init__(self, results: list[fx.Node], iter_args: list[fx.Node], init_args: list[fx.Node], num_stages: int) -> None: self.argument_map: list[list[dict[fx.Node, fx.Node]]] = [[{} for _ in range(num_stages)] for _ in range(num_stages)] self.results = results self.iter_args = iter_args self.init_args = init_args self.num_stages = num_stages self.num_iterations = num_stages self.result_to_iter_arg: dict[fx.Node, fx.Node] = {} self.result_to_init_arg: dict[fx.Node, fx.Node] = {} for result, iter_arg in zip(results, iter_args): self.result_to_iter_arg[result] = iter_arg for result, init_arg in zip(results, init_args): self.result_to_init_arg[result] = init_arg def map_arg_all(self, from_: fx.Node, to_: fx.Node | Sequence[fx.Node]) -> None: """ Maps the given argument from one to another into the argument context for all stages and for all iterations. """ if isinstance(to_, Sequence): count = len(to_) for iteration in range(self.num_iterations): for stage in range(self.num_stages): self.argument_map[iteration][stage][from_] = to_[iteration % count] else: for iteration in range(self.num_iterations): for stage in range(self.num_stages): self.argument_map[iteration][stage][from_] = to_ def map_arg_all_after_iteration(self, from_: fx.Node, to_: fx.Node, iteration: int) -> None: """ Maps the given argument from one to another into the argument context for all stages after the specified iteration. """ for iteration in range(iteration + 1, self.num_iterations): for stage in range(self.num_stages): self.argument_map[iteration][stage][from_] = to_ def map_arg_all_iterations(self, stage: int, from_: fx.Node, to_: fx.Node) -> None: """ Maps the given argument from one to another into the argument context for all stages and for all iterations. """ for iteration in range(self.num_iterations): self.argument_map[iteration][stage][from_] = to_ def get_mapped_results(self, get_results: list[GetResult]) -> list[fx.Node]: """ Gets the mapped results from the last iteration. If the result is not in the last iteration, then get it from the get result nodes. """ mapped_results = [] for result, get_result in zip(self.results, get_results): stage = result.scheduling_parameters['stage'] if result not in self.argument_map[self.num_iterations - 1][stage]: mapped_results.append(get_result.fx_node) else: mapped_results.append(self.argument_map[self.num_iterations - 1][stage][result]) return mapped_results def get_kernel_iteration(self, stage: int) -> int: """ Get the iteration from the stage for the kernel. """ return self.num_stages - 1 - stage def get_kernel_results(self) -> list[fx.Node]: """ Gets the mapped results for the kernel. Here there exists a fixed relationship between the iteration and stage. """ mapped_results = [] for result in self.results: stage = result.scheduling_parameters['stage'] iteration = self.get_kernel_iteration(stage) mapped_results.append(self.argument_map[iteration][stage][result]) return mapped_results def __setitem__(self, key: tuple[int, fx.Node], value: fx.Node) -> None: """ Sets the argument mapping for the given stage. """ assert isinstance(key, tuple), 'Argument context key must be a tuple' iteration, stage, from_ = key assert iteration < len(self.argument_map), f'Iteration {iteration} not yet initialized' assert stage < len(self.argument_map), f'Stage {stage} not yet initialized' self.argument_map[iteration][stage][from_] = value def __getitem__(self, value: tuple[int, fx.Node]) -> fx.Node: """ Gets the argument mapping for the given stage. """ assert isinstance(value, tuple), 'Argument context key must be a tuple' iteration, stage, key = value assert iteration < len(self.argument_map), f'Iteration {iteration} not yet initialized' assert stage < len(self.argument_map), f'Stage {stage} not yet initialized' return self.argument_map[iteration][stage].get(key, None) def __contains__(self, key: fx.Node | tuple[int, fx.Node]) -> bool: """ Checks if the argument context contains the given node at a specified iteration and stage or at all iterations and stages. """ if isinstance(key, tuple): iteration, stage, key = key return key in self.argument_map[iteration][stage] return any((key in self.argument_map[iteration][stage] for iteration in range(self.num_iterations) for stage in range(self.num_stages))) def lookup(self, key: fx.Node) -> Optional[fx.Node]: """ Looks up the argument mapping for the given node. """ for iteration in range(self.num_iterations - 1, -1, -1): for stage in range(self.num_stages): if key in self.argument_map[iteration][stage]: return self.argument_map[iteration][stage][key] return None def contains_in_iteration(self, iteration: int, key: fx.Node) -> bool: """ Checks if the argument context contains the given node at a specified iteration. """ return any((key in self.argument_map[iteration][stage] for stage in range(self.num_stages))) def get_from_iteration(self, iteration: int, key: fx.Node, stage: int) -> fx.Node: """ Gets the argument mapping for the given iteration with preference to the given stage. """ if stage and key in self.argument_map[iteration][stage]: return self.argument_map[iteration][stage][key] for stage in range(self.num_stages): if key in self.argument_map[iteration][stage]: return self.argument_map[iteration][stage][key] return None def dump(self): """ Dump the argument context to the logger. """ for iteration in range(self.num_iterations): for stage in range(self.num_stages): logger.debug(f'Iteration: {iteration}, Stage: {stage}') for key, value in self.argument_map[iteration][stage].items(): logger.debug(f' {key} -> {value}')
class ArgumentContext: ''' The argument context is used to store the mapping of arguments for each modulo pipelining stage. ''' def __init__(self, results: list[fx.Node], iter_args: list[fx.Node], init_args: list[fx.Node], num_stages: int) -> None: pass def map_arg_all(self, from_: fx.Node, to_: fx.Node | Sequence[fx.Node]) -> None: ''' Maps the given argument from one to another into the argument context for all stages and for all iterations. ''' pass def map_arg_all_after_iteration(self, from_: fx.Node, to_: fx.Node, iteration: int) -> None: ''' Maps the given argument from one to another into the argument context for all stages after the specified iteration. ''' pass def map_arg_all_iterations(self, stage: int, from_: fx.Node, to_: fx.Node) -> None: ''' Maps the given argument from one to another into the argument context for all stages and for all iterations. ''' pass def get_mapped_results(self, get_results: list[GetResult]) -> list[fx.Node]: ''' Gets the mapped results from the last iteration. If the result is not in the last iteration, then get it from the get result nodes. ''' pass def get_kernel_iteration(self, stage: int) -> int: ''' Get the iteration from the stage for the kernel. ''' pass def get_kernel_results(self) -> list[fx.Node]: ''' Gets the mapped results for the kernel. Here there exists a fixed relationship between the iteration and stage. ''' pass def __setitem__(self, key: tuple[int, fx.Node], value: fx.Node) -> None: ''' Sets the argument mapping for the given stage. ''' pass def __getitem__(self, value: tuple[int, fx.Node]) -> fx.Node: ''' Gets the argument mapping for the given stage. ''' pass def __contains__(self, key: fx.Node | tuple[int, fx.Node]) -> bool: ''' Checks if the argument context contains the given node at a specified iteration and stage or at all iterations and stages. ''' pass def lookup(self, key: fx.Node) -> Optional[fx.Node]: ''' Looks up the argument mapping for the given node. ''' pass def contains_in_iteration(self, iteration: int, key: fx.Node) -> bool: ''' Checks if the argument context contains the given node at a specified iteration. ''' pass def get_from_iteration(self, iteration: int, key: fx.Node, stage: int) -> fx.Node: ''' Gets the argument mapping for the given iteration with preference to the given stage. ''' pass def dump(self): ''' Dump the argument context to the logger. ''' pass
15
14
11
0
8
3
3
0.46
0
7
0
0
14
8
14
14
178
17
110
54
87
51
85
45
70
6
0
3
37
327,665
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/modulo_scheduling.py
wave_lang.kernel.wave.scheduling.modulo_scheduling.ModuloScheduler
import numpy as np import torch.fx as fx from .graph_utils import Edge, all_pairs_longest_paths_evaluated, all_pairs_longest_paths_unevaluated, find_cycles_in_scc, find_strongly_connected_components, topological_sort, topological_sort_nodes from .scheduler_utils import BaseScheduler from typing import Callable class ModuloScheduler(BaseScheduler): """ Vanilla Modulo Scheduler. References: [1] Aho, Alfred V., et al. "Compilers: Principles, Techniques, and Tools." """ def __init__(self, graph: fx.Graph, edges: list[Edge], resources: list[int]) -> None: super().__init__(graph, edges, resources) self.cached_edges_to = {} self.cached_edges_from = {} def get_edge(self, from_node: fx.Node, to_node: fx.Node) -> Edge: """ Returns the edge between two nodes. """ for edge in self.edges: if edge._from == from_node and edge._to == to_node: return edge return None def get_edges_from_scheduled_node(self, edges: list[tuple[fx.Node, fx.Node]], to_node: fx.Node) -> list[tuple[fx.Node, fx.Node]]: """ Returns the edges that originate from a scheduled node and end in the specified node from the list of provided edges. """ if to_node not in self.cached_edges_to: self.cached_edges_to[to_node] = [(from_, to_node) for from_, to_ in edges if to_ == to_node] return [(from_, to_node) for from_, _ in self.cached_edges_to[to_node] if from_ in self.schedule] def get_edges_to_scheduled_node(self, edges: list[tuple[fx.Node, fx.Node]], from_node: fx.Node) -> list[tuple[fx.Node, fx.Node]]: """ Returns the edges that end in a scheduled node and originate from the specified node from the list of provided edges. """ if from_node not in self.cached_edges_from: self.cached_edges_from[from_node] = [(from_node, to_) for from_, to_ in edges if from_ == from_node] return [(from_node, to_) for _, to_ in self.cached_edges_from[from_node] if to_ in self.schedule] def all_scc_scheduled(self, sccs: dict[fx.Node, list[fx.Node]]) -> bool: """ Checks if all strongly connected components have been scheduled. """ for scc in sccs.values(): for node in scc: if node not in self.schedule: return False return True def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: """ Schedule the graph using the Modulo Scheduler. Returns a schedule which maps each node to a cycle. """ sccs = find_strongly_connected_components(self.graph, self.seed) logger.debug(f'Found {len(sccs)} strongly connected components.') for leader, nodes in sccs.items(): logger.debug(f'Leader: {leader} owns {nodes} with finishing times {[x.f for x in nodes]}.') self.e_prime = self.find_edges(lambda edge: edge.weight.iteration_difference == 0) T0 = int(max(self.compute_resource_ii(), self.compute_recurrence_ii(sccs))) T_max_range = 3 * T0 success = False self.e_star_symbolic = all_pairs_longest_paths_unevaluated(self.graph, self.edges) for T in range(T0, T0 + T_max_range): logger.debug(f'Trying initiation interval: {T}.') self.RT = np.zeros((T, len(self.resources))) self.e_star = all_pairs_longest_paths_evaluated(self.graph, self.e_star_symbolic, T) logger.debug(f'All Pairs Longest Paths: {self.e_star}.') self.schedule: dict[fx.Node, int] = {} for _, scc in topological_sort(sccs).items(): logger.debug(f'Scheduling SCC: {scc}.') s0 = {} for node in scc: candidate_edges = self.get_edges_from_scheduled_node(self.e_star.keys(), node) s0[node] = 0 if candidate_edges: s0[node] = max((self.e_star[from_node, to_node] + self.schedule[from_node] for from_node, to_node in candidate_edges)) first = min(s0, key=s0.get) s0 = s0[first] for s in range(s0, s0 + T): if self.scc_scheduled(self.RT, T, scc, first, s): logger.debug(f'Scheduled SCC: {scc} at time slot: {s}.') logger.debug(f'Current RRT:\n {self.RT}.') break else: logger.debug(f'Failed to schedule SCC: {scc}.') break if self.all_scc_scheduled(sccs): success = True logger.debug(f'Successfully scheduled all SCCs with initiation interval: {T}.') break else: raise Exception('Failed to schedule the graph.') self._initiation_interval = T return (self.schedule, success) def scc_scheduled(self, RT: np.array, T: int, scc: list[fx.Node], first: int, s: int) -> bool: """ Tries to schedule the strongly connected component at time slot s. The nodes in the scc are scheduled in topological order based on the edges in E'. """ RT_prime = np.array(RT) if not self.node_scheduled(RT_prime, T, first, s): logger.debug(f'Failed to schedule first node: {first}.') return False for node in topological_sort_nodes(scc, self.e_prime, [first]): logger.debug(f'Trying to schedule node: {node}.') sl = max([self.schedule[from_node] + self.e_star[from_node, to_node] for from_node, to_node in self.get_edges_from_scheduled_node(self.e_star.keys(), node)]) su = min([self.schedule[to_node] - self.e_star[from_node, to_node] for from_node, to_node in self.get_edges_to_scheduled_node(self.e_star.keys(), node)]) logger.debug(f'Lower bound: {sl}, Upper bound: {su}.') for s in range(sl, min(su, sl + T - 1) + 1): if self.node_scheduled(RT_prime, T, node, s): logger.debug(f'Scheduled node: {node} at time slot: {s}.') break else: logger.debug(f'Failed to schedule node: {node}.') return False RT[:] = np.array(RT_prime) return True def node_scheduled(self, RT: np.array, T: int, node: fx.Node, s: int) -> bool: """ Checks for possible resource conflicts in the steady-state. """ RT_prime = np.array(RT) for i in range(node.rrt.shape[0]): RT_prime[(s + i) % T] += node.rrt[i] if np.all(RT_prime <= self.resources): logger.debug(f'Scheduled node: {node} at time slot: {s}.') self.schedule[node] = s RT[:] = np.array(RT_prime) return True return False def compute_resource_ii(self) -> int: """ Compute the resource constrained initiation interval. """ usage = np.zeros(len(self.resources)) for node in self.graph.nodes: usage += np.sum(node.rrt, axis=0) usage /= self.resources logger.debug(f'Resource constrained initiation interval: {np.max(usage)}.') return np.max(usage) def compute_recurrence_ii(self, scc: dict[fx.Node, list[fx.Node]]) -> int: """ Compute the recurrence constrained initiation interval. """ cycles = find_cycles_in_scc(scc) rec_ii = -1 for cycle in cycles: delay, iteration_delay = (0, 0) for from_node, to_node in zip(cycle[:-1], cycle[1:]): edge = self.get_edge(from_node, to_node) if edge is None: continue delay += edge.weight.delay iteration_delay += edge.weight.iteration_difference rec_ii = max(rec_ii, delay / iteration_delay) logger.debug(f'Recurrence constrained initiation interval: {rec_ii}.') return rec_ii def find_edges(self, filter: Callable[[Edge], bool]) -> list[Edge]: filtered = [] for edge in self.edges: if filter(edge): filtered.append(edge) return filtered @property def resource_reservations(self) -> np.array: """ Returns the resource reservations of the schedule. """ return self.RT
class ModuloScheduler(BaseScheduler): ''' Vanilla Modulo Scheduler. References: [1] Aho, Alfred V., et al. "Compilers: Principles, Techniques, and Tools." ''' def __init__(self, graph: fx.Graph, edges: list[Edge], resources: list[int]) -> None: pass def get_edge(self, from_node: fx.Node, to_node: fx.Node) -> Edge: ''' Returns the edge between two nodes. ''' pass def get_edges_from_scheduled_node(self, edges: list[tuple[fx.Node, fx.Node]], to_node: fx.Node) -> list[tuple[fx.Node, fx.Node]]: ''' Returns the edges that originate from a scheduled node and end in the specified node from the list of provided edges. ''' pass def get_edges_to_scheduled_node(self, edges: list[tuple[fx.Node, fx.Node]], from_node: fx.Node) -> list[tuple[fx.Node, fx.Node]]: ''' Returns the edges that end in a scheduled node and originate from the specified node from the list of provided edges. ''' pass def all_scc_scheduled(self, sccs: dict[fx.Node, list[fx.Node]]) -> bool: ''' Checks if all strongly connected components have been scheduled. ''' pass def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: ''' Schedule the graph using the Modulo Scheduler. Returns a schedule which maps each node to a cycle. ''' pass def scc_scheduled(self, RT: np.array, T: int, scc: list[fx.Node], first: int, s: int) -> bool: ''' Tries to schedule the strongly connected component at time slot s. The nodes in the scc are scheduled in topological order based on the edges in E'. ''' pass def node_scheduled(self, RT: np.array, T: int, node: fx.Node, s: int) -> bool: ''' Checks for possible resource conflicts in the steady-state. ''' pass def compute_resource_ii(self) -> int: ''' Compute the resource constrained initiation interval. ''' pass def compute_recurrence_ii(self, scc: dict[fx.Node, list[fx.Node]]) -> int: ''' Compute the recurrence constrained initiation interval. ''' pass def find_edges(self, filter: Callable[[Edge], bool]) -> list[Edge]: pass @property def resource_reservations(self) -> np.array: ''' Returns the resource reservations of the schedule. ''' pass
14
11
19
1
15
3
3
0.24
1
10
1
0
12
8
12
15
246
20
182
73
152
44
124
52
111
9
1
4
39
327,666
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/optimize_schedule.py
wave_lang.kernel.wave.scheduling.optimize_schedule.OptimizationAlgorithm
from enum import Enum, auto class OptimizationAlgorithm(Enum): HILL_CLIMBING = auto()
class OptimizationAlgorithm(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
2
0
2
2
1
0
2
2
1
0
4
0
0
327,667
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/optimize_schedule.py
wave_lang.kernel.wave.scheduling.optimize_schedule.OptimizationResult
from typing import Callable, Dict, List, Optional, Tuple from dataclasses import dataclass @dataclass class OptimizationResult: schedule: Dict latency: float iterations: int algorithm: OptimizationAlgorithm improvement_history: List[float]
@dataclass class OptimizationResult: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0
6
1
5
0
6
1
5
0
0
0
0
327,668
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/optimize_schedule.py
wave_lang.kernel.wave.scheduling.optimize_schedule.ScheduleOptimizer
import random from .verifier import ScheduleValidator as ScheduleModifier from wave_lang.kernel.wave.scheduling.resources import Operation, get_custom_operation_type from wave_lang.kernel.wave.tuner.utils import format_latency_us, latency_to_us import numpy as np from wave_lang.kernel.ops.wave_ops import get_custom from typing import Callable, Dict, List, Optional, Tuple import logging class ScheduleOptimizer: def __init__(self, validator: ScheduleModifier, measure_fn: Callable[[Dict], float], algorithm: OptimizationAlgorithm=OptimizationAlgorithm.HILL_CLIMBING, logger: Optional[logging.Logger]=None, progress_file: Optional[str]=None, tuning_logger=None, random_seed: Optional[int]=None): """Initialize the schedule optimizer. Args: validator: A ScheduleModifier instance that validates and modifies schedules measure_fn: A function that takes a schedule and returns its latency algorithm: The optimization algorithm to use logger: Optional logger for tracking optimization progress progress_file: Optional path to progress file tuning_logger: Optional tuning logger for saving schedules random_seed: Optional seed for reproducible random number generation """ self.validator = validator self.measure_fn = measure_fn self.algorithm = algorithm self.logger = logger self.progress_file = progress_file self.tuning_logger = tuning_logger self.current_best_schedule = None self.current_best_latency = float('inf') self.improvement_history = [] self.current_iteration = 0 self.rng = random.Random(random_seed) if random_seed is not None else random if self.progress_file is not None: with open(self.progress_file, 'w') as f: f.write('iteration,latency_us,is_improvement,is_best\n') def _write_progress(self, iteration: int, latency: float, is_improvement: bool, is_best: bool) -> None: """Write progress to file if progress_file is set. Args: iteration: Current iteration number latency: Achieved latency is_improvement: Whether this is an improvement is_best: Whether this is the best latency so far """ if self.progress_file is not None: latency_us = latency_to_us(latency) with open(self.progress_file, 'a') as f: f.write(f'{iteration},{latency_us},{is_improvement},{is_best}\n') def _log_iteration(self, schedule: Dict, latency: float, is_improvement: bool) -> None: """Log an optimization iteration. Args: schedule: Current schedule latency: Achieved latency is_improvement: Whether this is an improvement """ if self.logger is None: return latency_str = format_latency_us(latency) if is_improvement: self.logger.info(f'Iteration {self.current_iteration}: Found improvement! Latency: {latency_str}') else: self.logger.debug(f'Iteration {self.current_iteration}: No improvement. Latency: {latency_str}') def _log_summary(self) -> None: """Log a summary of the optimization process.""" if self.logger is None: return self.logger.info('\nOptimization Summary:') best_latency_str = format_latency_us(self.current_best_latency) improvement_history = [latency_to_us(h) for h in self.improvement_history] self.logger.info(f'Best latency: {best_latency_str}') self.logger.info(f'Total iterations: {self.current_iteration}') self.logger.info(f'Improvement history: {improvement_history}') def _measure_with_logging(self, schedule: Dict) -> float: """Measure schedule latency with logging. Args: schedule: Schedule to measure Returns: Measured latency """ latency = self.measure_fn(schedule) self.current_iteration += 1 return latency def _initialize_optimization(self, verbose: bool) -> Tuple[Dict, float]: """Initialize the optimization process with the current best schedule. Args: verbose: Whether to print progress information Returns: Tuple of (current_best_schedule, current_best_latency) """ if verbose and self.logger: self.logger.info('Starting Hill Climbing Optimization...') current_best_schedule, _ = self.validator.get_current_schedule_state() current_best_latency = self._measure_with_logging(current_best_schedule) self.improvement_history = [current_best_latency] self._write_progress(0, current_best_latency, True, True) if verbose and self.logger: self.logger.info(f'Initial Best Latency: {format_latency_us(current_best_latency)} for schedule: {{ { {n.name: s for n, s in current_best_schedule.items()}} }}') return (current_best_schedule, current_best_latency) def _get_schedulable_nodes(self) -> List: """Get list of nodes that can be scheduled (non-NOOP operations). Returns: List of schedulable nodes """ return [n for n in self.validator.nodes if get_custom_operation_type_val(get_custom(n)) != Operation.NOOP] def _select_random_move(self, schedulable_nodes: List, current_best_schedule: Dict) -> Tuple: """Select a random node and generate a new target cycle for it. Args: schedulable_nodes: List of nodes that can be moved current_best_schedule: Current best schedule Returns: Tuple of (node_to_move, new_target_cycle) """ node_to_move = self.rng.choice(schedulable_nodes) original_cycle = current_best_schedule[node_to_move] delta_cycle = self.rng.randrange(-self.validator.T, self.validator.T + 1) if delta_cycle == 0: delta_cycle = self.rng.choice([-1, 1]) new_target_cycle = max(0, original_cycle + delta_cycle) return (node_to_move, new_target_cycle) def _evaluate_move(self, node_to_move, new_target_cycle: int, current_best_latency: float, verbose: bool) -> Tuple[bool, float, Optional[Dict], Optional[np.ndarray]]: """Evaluate a potential move and determine if it's an improvement. Args: node_to_move: Node to move new_target_cycle: New target cycle for the node current_best_latency: Current best latency verbose: Whether to print progress information Returns: Tuple of (is_improvement, new_latency, new_schedule, resource_table) or (False, current_latency, None, None) if invalid """ if verbose and self.logger: self.logger.info(f' Attempting to move node {node_to_move.name} to cycle {new_target_cycle}') is_valid_move, candidate_schedule, error_message = self.validator.attempt_move(node_to_move, new_target_cycle) if not is_valid_move or candidate_schedule is None: return (False, current_best_latency, None, None) candidate_latency = self._measure_with_logging(candidate_schedule) if candidate_latency == float('inf'): return (False, current_best_latency, None, None) is_improvement = candidate_latency < current_best_latency if self.tuning_logger is not None: self.tuning_logger.log_iteration(self.current_iteration, candidate_schedule, candidate_latency, is_improvement) else: self._log_iteration(candidate_schedule, candidate_latency, is_improvement) self._write_progress(self.current_iteration, candidate_latency, is_improvement, is_improvement) if is_improvement and verbose and self.logger: self.logger.info(f' *** Improvement found! New latency: {format_latency_us(candidate_latency)} (old: {format_latency_us(current_best_latency)}) ***') _, resource_table = self.validator.get_current_schedule_state() return (is_improvement, candidate_latency, candidate_schedule, resource_table) def _update_best_solution(self, is_improvement: bool, candidate_latency: float, candidate_schedule: Dict, candidate_rt: Optional[np.ndarray], current_best_schedule: Dict, current_best_latency: float) -> Tuple[Dict, float, int]: """Update the best solution if an improvement is found. Args: is_improvement: Whether the candidate is an improvement candidate_latency: Latency of the candidate schedule candidate_schedule: Candidate schedule candidate_rt: Candidate resource table current_best_schedule: Current best schedule current_best_latency: Current best latency Returns: Tuple of (new_best_schedule, new_best_latency, no_improvement_streak) """ if is_improvement: if candidate_rt is not None: self.validator.commit_move(candidate_schedule, candidate_rt) self.improvement_history.append(candidate_latency) return (candidate_schedule, candidate_latency, 0) else: return (current_best_schedule, current_best_latency, 1) def _log_final_results(self, current_best_latency: float, current_best_schedule: Dict, verbose: bool) -> None: """Log the final optimization results. Args: current_best_latency: Final best latency current_best_schedule: Final best schedule verbose: Whether to print progress information """ if verbose and self.logger: self.logger.info('\nOptimization Finished.') self.logger.info(f'Final Best Latency: {format_latency_us(current_best_latency)}') self.logger.info(f'Final Best Schedule: {{ { {n.name: s for n, s in current_best_schedule.items()}} }}') def _run_hill_climbing(self, max_iterations: int=100, max_no_improvement: int=20, verbose: bool=True) -> OptimizationResult: """Run hill climbing optimization algorithm. Args: max_iterations: Maximum number of iterations to run max_no_improvement: Maximum number of iterations without improvement before stopping verbose: Whether to print progress information Returns: OptimizationResult containing the best schedule and optimization metrics """ current_best_schedule, current_best_latency = self._initialize_optimization(verbose) no_improvement_streak = 0 iteration = 0 while iteration < max_iterations: if verbose and self.logger: self.logger.info(f'\nIteration {iteration + 1}/{max_iterations}') schedulable_nodes = self._get_schedulable_nodes() if not schedulable_nodes: if verbose and self.logger: self.logger.info(' No schedulable (non-NOOP) nodes to move. Stopping.') break node_to_move, new_target_cycle = self._select_random_move(schedulable_nodes, current_best_schedule) is_improvement, candidate_latency, candidate_schedule, candidate_rt = self._evaluate_move(node_to_move, new_target_cycle, current_best_latency, verbose) current_best_schedule, current_best_latency, streak_increment = self._update_best_solution(is_improvement, candidate_latency, candidate_schedule, candidate_rt, current_best_schedule, current_best_latency) no_improvement_streak += streak_increment if no_improvement_streak >= max_no_improvement: if verbose and self.logger: self.logger.info(f'\nStopping early: No improvement in {max_no_improvement} iterations.') break iteration += 1 self._log_final_results(current_best_latency, current_best_schedule, verbose) self.current_best_latency = current_best_latency self.current_iteration = iteration self._log_summary() return OptimizationResult(schedule=current_best_schedule, latency=current_best_latency, iterations=iteration, algorithm=OptimizationAlgorithm.HILL_CLIMBING, improvement_history=self.improvement_history) def optimize(self, max_iterations: int=100, max_no_improvement: int=20, verbose: bool=True) -> OptimizationResult: """Run the selected optimization algorithm. Args: max_iterations: Maximum number of iterations to run max_no_improvement: Maximum number of iterations without improvement before stopping verbose: Whether to print progress information Returns: OptimizationResult containing the best schedule and optimization metrics """ if self.algorithm == OptimizationAlgorithm.HILL_CLIMBING: return self._run_hill_climbing(max_iterations=max_iterations, max_no_improvement=max_no_improvement, verbose=verbose) else: raise ValueError(f'Unsupported optimization algorithm: {self.algorithm}')
class ScheduleOptimizer: def __init__(self, validator: ScheduleModifier, measure_fn: Callable[[Dict], float], algorithm: OptimizationAlgorithm=OptimizationAlgorithm.HILL_CLIMBING, logger: Optional[logging.Logger]=None, progress_file: Optional[str]=None, tuning_logger=None, random_seed: Optional[int]=None): '''Initialize the schedule optimizer. Args: validator: A ScheduleModifier instance that validates and modifies schedules measure_fn: A function that takes a schedule and returns its latency algorithm: The optimization algorithm to use logger: Optional logger for tracking optimization progress progress_file: Optional path to progress file tuning_logger: Optional tuning logger for saving schedules random_seed: Optional seed for reproducible random number generation ''' pass def _write_progress(self, iteration: int, latency: float, is_improvement: bool, is_best: bool) -> None: '''Write progress to file if progress_file is set. Args: iteration: Current iteration number latency: Achieved latency is_improvement: Whether this is an improvement is_best: Whether this is the best latency so far ''' pass def _log_iteration(self, schedule: Dict, latency: float, is_improvement: bool) -> None: '''Log an optimization iteration. Args: schedule: Current schedule latency: Achieved latency is_improvement: Whether this is an improvement ''' pass def _log_summary(self) -> None: '''Log a summary of the optimization process.''' pass def _measure_with_logging(self, schedule: Dict) -> float: '''Measure schedule latency with logging. Args: schedule: Schedule to measure Returns: Measured latency ''' pass def _initialize_optimization(self, verbose: bool) -> Tuple[Dict, float]: '''Initialize the optimization process with the current best schedule. Args: verbose: Whether to print progress information Returns: Tuple of (current_best_schedule, current_best_latency) ''' pass def _get_schedulable_nodes(self) -> List: '''Get list of nodes that can be scheduled (non-NOOP operations). Returns: List of schedulable nodes ''' pass def _select_random_move(self, schedulable_nodes: List, current_best_schedule: Dict) -> Tuple: '''Select a random node and generate a new target cycle for it. Args: schedulable_nodes: List of nodes that can be moved current_best_schedule: Current best schedule Returns: Tuple of (node_to_move, new_target_cycle) ''' pass def _evaluate_move(self, node_to_move, new_target_cycle: int, current_best_latency: float, verbose: bool) -> Tuple[bool, float, Optional[Dict], Optional[np.ndarray]]: '''Evaluate a potential move and determine if it's an improvement. Args: node_to_move: Node to move new_target_cycle: New target cycle for the node current_best_latency: Current best latency verbose: Whether to print progress information Returns: Tuple of (is_improvement, new_latency, new_schedule, resource_table) or (False, current_latency, None, None) if invalid ''' pass def _update_best_solution(self, is_improvement: bool, candidate_latency: float, candidate_schedule: Dict, candidate_rt: Optional[np.ndarray], current_best_schedule: Dict, current_best_latency: float) -> Tuple[Dict, float, int]: '''Update the best solution if an improvement is found. Args: is_improvement: Whether the candidate is an improvement candidate_latency: Latency of the candidate schedule candidate_schedule: Candidate schedule candidate_rt: Candidate resource table current_best_schedule: Current best schedule current_best_latency: Current best latency Returns: Tuple of (new_best_schedule, new_best_latency, no_improvement_streak) ''' pass def _log_final_results(self, current_best_latency: float, current_best_schedule: Dict, verbose: bool) -> None: '''Log the final optimization results. Args: current_best_latency: Final best latency current_best_schedule: Final best schedule verbose: Whether to print progress information ''' pass def _run_hill_climbing(self, max_iterations: int=100, max_no_improvement: int=20, verbose: bool=True) -> OptimizationResult: '''Run hill climbing optimization algorithm. Args: max_iterations: Maximum number of iterations to run max_no_improvement: Maximum number of iterations without improvement before stopping verbose: Whether to print progress information Returns: OptimizationResult containing the best schedule and optimization metrics ''' pass def optimize(self, max_iterations: int=100, max_no_improvement: int=20, verbose: bool=True) -> OptimizationResult: '''Run the selected optimization algorithm. Args: max_iterations: Maximum number of iterations to run max_no_improvement: Maximum number of iterations without improvement before stopping verbose: Whether to print progress information Returns: OptimizationResult containing the best schedule and optimization metrics ''' pass
14
13
29
4
17
8
3
0.45
0
10
3
0
13
11
13
13
392
62
228
92
173
102
121
47
107
7
0
3
37
327,669
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/prefetch_scheduling.py
wave_lang.kernel.wave.scheduling.prefetch_scheduling.MMAGroup
from ...ops.wave_ops import get_custom, Read, Write, MMA, ScaledMMA, IterArg, Reshape, Extract from ..utils.classes import AttentionOperationType import torch.fx as fx class MMAGroup: """Groups MMA operations and their dependencies for prefetch scheduling.""" VALID_SUFFIXES = {'0', '1'} def __init__(self, mma_ops: list[fx.Node]): self.global_reads = set() self.shared_reads = set() self.shared_writes = set() self.mma_ops = set(mma_ops) def add_nodes(self, nodes: list[fx.Node]): for node in nodes: custom = get_custom(node) if isinstance(custom, Read): if custom.memory is None: self.global_reads.add(node) else: self.shared_reads.add(node) elif isinstance(custom, Write): self.shared_writes.add(node) def _get_operation_type(self, suffix: str, operation_category: str) -> AttentionOperationType: """Get the appropriate operation type enum for a given suffix and category.""" operation_mapping = {'0': {'global_reads': AttentionOperationType.GLOBAL_LOAD_0, 'shared_reads': AttentionOperationType.LOCAL_LOAD_0, 'shared_writes': AttentionOperationType.LOCAL_STORE_0, 'mma_ops': AttentionOperationType.MMA_0}, '1': {'global_reads': AttentionOperationType.GLOBAL_LOAD_1, 'shared_reads': AttentionOperationType.LOCAL_LOAD_1, 'shared_writes': AttentionOperationType.LOCAL_STORE_1, 'mma_ops': AttentionOperationType.MMA_1}} if suffix not in self.VALID_SUFFIXES: raise ValueError(f'Invalid suffix: {suffix}. Must be one of {self.VALID_SUFFIXES}.') if operation_category not in operation_mapping[suffix]: raise ValueError(f'Invalid operation category: {operation_category}') return operation_mapping[suffix][operation_category] def get_all_operation_types(self, suffix: str) -> dict[str, AttentionOperationType]: """Get all operation types for a given suffix.""" operation_mapping = {'0': {'global_reads': AttentionOperationType.GLOBAL_LOAD_0, 'shared_reads': AttentionOperationType.LOCAL_LOAD_0, 'shared_writes': AttentionOperationType.LOCAL_STORE_0, 'mma_ops': AttentionOperationType.MMA_0}, '1': {'global_reads': AttentionOperationType.GLOBAL_LOAD_1, 'shared_reads': AttentionOperationType.LOCAL_LOAD_1, 'shared_writes': AttentionOperationType.LOCAL_STORE_1, 'mma_ops': AttentionOperationType.MMA_1}} if suffix not in self.VALID_SUFFIXES: raise ValueError(f'Invalid suffix: {suffix}. Must be one of {self.VALID_SUFFIXES}.') return operation_mapping[suffix] def annotate(self, suffix: str): """Annotate nodes with prefetch stage using enum values.""" for node in self.global_reads: node.meta['prefetch_stage'] = self._get_operation_type(suffix, 'global_reads').value for node in self.shared_reads: node.meta['prefetch_stage'] = self._get_operation_type(suffix, 'shared_reads').value for node in self.shared_writes: node.meta['prefetch_stage'] = self._get_operation_type(suffix, 'shared_writes').value for node in self.mma_ops: node.meta['prefetch_stage'] = self._get_operation_type(suffix, 'mma_ops').value def __repr__(self): return f'MMAGroup(\nglobal_reads={self.global_reads},\nshared_reads={self.shared_reads},\nshared_writes={self.shared_writes},\nmma_ops={self.mma_ops})'
class MMAGroup: '''Groups MMA operations and their dependencies for prefetch scheduling.''' def __init__(self, mma_ops: list[fx.Node]): pass def add_nodes(self, nodes: list[fx.Node]): pass def _get_operation_type(self, suffix: str, operation_category: str) -> AttentionOperationType: '''Get the appropriate operation type enum for a given suffix and category.''' pass def get_all_operation_types(self, suffix: str) -> dict[str, AttentionOperationType]: '''Get all operation types for a given suffix.''' pass def annotate(self, suffix: str): '''Annotate nodes with prefetch stage using enum values.''' pass def __repr__(self): pass
7
4
6
0
6
0
2
0
0
2
0
0
3
4
3
3
20
2
18
10
14
0
16
10
12
5
0
3
7
327,670
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/prefetch_scheduling.py
wave_lang.kernel.wave.scheduling.prefetch_scheduling.PrefetchAttentionScheduler
from .scheduler_utils import get_scheduling_stage, BaseScheduler import torch.fx as fx from ..utils.graph_utils import capture_backward_slice from typing import Sequence from ...ops.wave_ops import get_custom, Read, Write, MMA, ScaledMMA, IterArg, Reshape, Extract from ..utils.classes import AttentionOperationType class PrefetchAttentionScheduler(BaseScheduler): def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: """ Implements attention-specific prefetch scheduling with the following cycle assignments: - Cycle 0: Global loads and shared writes for GEMM0 - Cycle 1: Shared reads for GEMM0, shared writes for GEMM1, global loads for GEMM1 - Cycle 2: GEMM0 operations and softmax0 operations - Cycle 3: GEMM1 operations, softmax1 operations, and shared reads for GEMM1 operands """ schedule = {} mmas = [] for node in self.graph.nodes: custom_op = get_custom(node) if isinstance(custom_op, (MMA, ScaledMMA)): mmas.append(node) mma0, mma1 = self._partition_mmas(mmas) softmax_ops = self._identify_softmax_operations(mma0) softmax_ops.update(self._identify_softmax_operations([get_custom(x).acc for x in mma1], backward=True)) softmax_ops = sorted(softmax_ops, key=lambda x: x._sort_key) split_index = softmax_ops.index([x for x in softmax_ops if get_custom(x).tkw_op_name == 'sub'][-1]) softmax0 = softmax_ops[:split_index] softmax1 = softmax_ops[split_index:] for node in softmax0: node.meta['prefetch_stage'] = AttentionOperationType.SOFTMAX_0.value for node in softmax1: node.meta['prefetch_stage'] = AttentionOperationType.SOFTMAX_1.value if not mmas and (not softmax_ops): logger.warning('No MMAs or softmax operations found in graph') self.schedule = {} self._initiation_interval = 1 return ({}, False) mma0_group = self._analyze_mma_dependencies(mma0) mma1_group = self._analyze_mma_dependencies(mma1, exclude_shared_reads=mma0_group.shared_reads) mma0_group.annotate('0') mma1_group.annotate('1') schedule = _set_cycle(mma0_group.global_reads, 0, schedule) schedule = _set_cycle(mma0_group.shared_writes, 1, schedule) schedule = _set_cycle(mma0_group.shared_reads, 2, schedule) schedule = _set_cycle(mma1_group.global_reads, 2, schedule) schedule = _set_cycle(mma1_group.shared_writes, 3, schedule) schedule = _set_cycle(mma0_group.mma_ops, 4, schedule) schedule = _set_cycle(softmax0, 5, schedule) schedule = _set_cycle(softmax1, 6, schedule) schedule = _set_cycle(mma1_group.shared_reads, 6, schedule) schedule = _set_cycle(mma1_group.mma_ops, 7, schedule) schedule = self._schedule_remaining_nodes(schedule) self.schedule = schedule self._initiation_interval = 2 logger.info(f'PrefetchAttentionScheduler: Scheduled {len(schedule)} nodes') logger.info(f'GEMM0: {len(mma0)} operations, GEMM1: {len(mma1)} operations') logger.info(f'Softmax0: {len(softmax0)} operations, Softmax1: {len(softmax1)} operations') logger.info(f'Initiation interval: {self._initiation_interval}') return (schedule, True) def _partition_mmas(self, mmas: list[fx.Node]) -> tuple[list[fx.Node], list[fx.Node]]: """ Partitions MMAs into two groups: GEMM0 and GEMM1. GEMM0 is identified by checking if the lhs and rhs of any MMA are in different graphs. The other MMA is GEMM1. """ mma0 = [] mma1 = [] for mma_node in mmas: mma = get_custom(mma_node) lhs = get_custom(mma.lhs) rhs = get_custom(mma.rhs) if lhs.graph != rhs.graph: mma0.append(mma_node) else: mma1.append(mma_node) return (mma0, mma1) def _identify_softmax_operations(self, mma0_nodes: list[fx.Node], backward: bool=False) -> list[fx.Node]: """ Identifies softmax operations that are guaranteed to not be reads, writes, MMAs, or iter args. These operations are guaranteed to be after the GEMM0 operations and before the GEMM1 operations. """ from torch.utils import _pytree as pytree softmax_ops = set() end_node_types = (Read, Write, MMA, ScaledMMA, IterArg) start_nodes = set(mma0_nodes) visited = set() while start_nodes: node = start_nodes.pop() if node in visited: continue visited.add(node) neighbors = node.users.keys() if backward: neighbors, _ = pytree.tree_flatten(list(get_custom(node).node_args.values())) neighbors = [x.fx_node for x in neighbors] for user in neighbors: if user not in visited and (not isinstance(get_custom(user), end_node_types)): softmax_ops.add(user) start_nodes.add(user) return softmax_ops def _schedule_remaining_nodes(self, schedule: dict[fx.Node, int]) -> dict[fx.Node, int]: """ Schedules remaining nodes by having them inherit cycles from their arguments. Uses an iterative approach to handle dependencies that may not be resolved in the first pass. """ max_iters = 4 iter_count = 0 while iter_count < max_iters: for node in self.graph.nodes: if node in schedule: continue custom = get_custom(node) for arg in custom.node_args.values(): if isinstance(arg, Sequence): for elem in arg: if elem.fx_node in schedule: schedule[node] = schedule[elem.fx_node] break elif arg.fx_node in schedule: schedule[node] = schedule[arg.fx_node] break iter_count += 1 not_scheduled = [node for node in self.graph.nodes if node not in schedule] assert not not_scheduled, 'Not all nodes were scheduled' return schedule def _analyze_mma_dependencies(self, mma_ops: list[fx.Node], exclude_shared_reads: set[fx.Node]=set()) -> MMAGroup: """ Analyzes dependencies for a single MMA group to determine global reads and shared reads. """ mma_group = MMAGroup(mma_ops) for mma_node in mma_ops: mma = get_custom(mma_node) lhs = mma.lhs rhs = mma.rhs if lhs not in exclude_shared_reads and lhs.graph == mma.graph: backward_slice = capture_backward_slice(lhs, lambda x: isinstance(get_custom(x), (Read, Write, Reshape, Extract))) mma_group.add_nodes(backward_slice) if rhs not in exclude_shared_reads and rhs.graph == mma.graph: backward_slice = capture_backward_slice(rhs, lambda x: isinstance(get_custom(x), (Read, Write, Reshape, Extract))) mma_group.add_nodes(backward_slice) return mma_group
class PrefetchAttentionScheduler(BaseScheduler): def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: ''' Implements attention-specific prefetch scheduling with the following cycle assignments: - Cycle 0: Global loads and shared writes for GEMM0 - Cycle 1: Shared reads for GEMM0, shared writes for GEMM1, global loads for GEMM1 - Cycle 2: GEMM0 operations and softmax0 operations - Cycle 3: GEMM1 operations, softmax1 operations, and shared reads for GEMM1 operands ''' pass def _partition_mmas(self, mmas: list[fx.Node]) -> tuple[list[fx.Node], list[fx.Node]]: ''' Partitions MMAs into two groups: GEMM0 and GEMM1. GEMM0 is identified by checking if the lhs and rhs of any MMA are in different graphs. The other MMA is GEMM1. ''' pass def _identify_softmax_operations(self, mma0_nodes: list[fx.Node], backward: bool=False) -> list[fx.Node]: ''' Identifies softmax operations that are guaranteed to not be reads, writes, MMAs, or iter args. These operations are guaranteed to be after the GEMM0 operations and before the GEMM1 operations. ''' pass def _schedule_remaining_nodes(self, schedule: dict[fx.Node, int]) -> dict[fx.Node, int]: ''' Schedules remaining nodes by having them inherit cycles from their arguments. Uses an iterative approach to handle dependencies that may not be resolved in the first pass. ''' pass def _analyze_mma_dependencies(self, mma_ops: list[fx.Node], exclude_shared_reads: set[fx.Node]=set()) -> MMAGroup: ''' Analyzes dependencies for a single MMA group to determine global reads and shared reads. ''' pass
6
5
41
5
28
8
5
0.28
1
7
1
0
5
2
5
8
210
28
142
57
125
40
105
47
98
9
1
6
26
327,671
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/prefetch_scheduling.py
wave_lang.kernel.wave.scheduling.prefetch_scheduling.PrefetchScheduler
from .graph_utils import Edge, sort_graph_by_edge_weight from .scheduler_utils import get_scheduling_stage, BaseScheduler import torch.fx as fx class PrefetchScheduler(BaseScheduler): """ Prefetch Scheduler Convert vanilla schedule of: for i = 0 to N: a = READ_GLOBAL i WRITE_SHARED a barrier b = READ_SHARED COMPUTE b into prefetch schedule: a_0 = READ_GLOBAL 0 WRITE_SHARED a_0 for i = 0 to N - 1: a_{i+1} = READ_GLOBAL i + 1 // a_{i+1} is NOT blocked by this barrier because barriers only block shared memory transfers barrier b_i = READ_SHARED COMPUTE b_i barrier WRITE_SHARED a_{i+1} barrier b_N = READ_SHARED COMPUTE b_N """ def prefetch_scheduling(self, graph: fx.Graph, edges: list[Edge]): """ Classify node to different stages. Based on it's stage, program schedules clock for each node. This function also checks that sorted node "contiguously" move between stages. """ sorted_nodes = sort_graph_by_edge_weight(graph.nodes, edges) schedule = {} current_stage = get_scheduling_stage(sorted_nodes[0], _operation_stage_table) for node in sorted_nodes: node_stage = get_scheduling_stage(node, _operation_stage_table) logger.info(f'Node {node} is in stage {node_stage}') if node_stage == current_stage: schedule[node] = node_stage.value elif PrefetchStage.is_valid_transition(current_stage, node_stage): schedule[node] = node_stage.value current_stage = node_stage else: logger.warning(f'No valid transition from {current_stage} to {node_stage} for node {node}') return ({}, False) return (schedule, True) def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: """ 1. Identify which nodes are part of the global_read/local_write/local_read/compute phase 2. Set nodes to clock (0,1,2,3) based on phase. 2. Set initiation interval to generate valid 2 stage prefetch. """ self.schedule, success = self.prefetch_scheduling(self.graph, self.edges) if not success: logger.warning('Prefetch scheduling failed') return ({}, False) logger.info(f'Schedule: {self.schedule}') assert self.schedule, 'Schedule is empty' self._initiation_interval = 2 if self.num_stages != self._initiation_interval: logger.warning(f'Initiation interval {self._initiation_interval} does not match number of stages {self.num_stages}') return ({}, False) return (self.schedule, success)
class PrefetchScheduler(BaseScheduler): ''' Prefetch Scheduler Convert vanilla schedule of: for i = 0 to N: a = READ_GLOBAL i WRITE_SHARED a barrier b = READ_SHARED COMPUTE b into prefetch schedule: a_0 = READ_GLOBAL 0 WRITE_SHARED a_0 for i = 0 to N - 1: a_{i+1} = READ_GLOBAL i + 1 // a_{i+1} is NOT blocked by this barrier because barriers only block shared memory transfers barrier b_i = READ_SHARED COMPUTE b_i barrier WRITE_SHARED a_{i+1} barrier b_N = READ_SHARED COMPUTE b_N ''' def prefetch_scheduling(self, graph: fx.Graph, edges: list[Edge]): ''' Classify node to different stages. Based on it's stage, program schedules clock for each node. This function also checks that sorted node "contiguously" move between stages. ''' pass def schedule_graph(self) -> tuple[dict[fx.Node, int], bool]: ''' 1. Identify which nodes are part of the global_read/local_write/local_read/compute phase 2. Set nodes to clock (0,1,2,3) based on phase. 2. Set initiation interval to generate valid 2 stage prefetch. ''' pass
3
3
22
1
16
6
4
1.06
1
7
2
0
2
2
2
5
73
5
33
10
30
35
27
10
24
4
1
2
7
327,672
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/prefetch_scheduling.py
wave_lang.kernel.wave.scheduling.prefetch_scheduling.PrefetchStage
from enum import Enum class PrefetchStage(Enum): GLOBAL_LOAD = 0 LOCAL_STORE = 1 LOCAL_LOAD = 2 COMPUTE = 3 @staticmethod def is_valid_transition(from_stage: 'PrefetchStage', to_stage: 'PrefetchStage') -> bool: if from_stage == to_stage: return True return (from_stage, to_stage) in _prefetch_stage_transition_table
class PrefetchStage(Enum): @staticmethod def is_valid_transition(from_stage: 'PrefetchStage', to_stage: 'PrefetchStage') -> bool: pass
3
0
7
1
6
0
2
0
1
1
0
0
0
0
1
50
14
2
12
9
7
0
9
6
7
2
4
1
2
327,673
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/resources.py
wave_lang.kernel.wave.scheduling.resources.Operation
from enum import Enum class Operation(Enum): READ_SHARED = 'read_shared' WRITE_SHARED = 'write_shared' READ_GLOBAL = 'read_global' WRITE_GLOBAL = 'write_global' GLOBAL_TO_SHARED = 'global_to_shared' MMA = 'mma' ALU = 'alu' VALU = 'valu' SALU = 'salu' NOOP = 'noop' SHUFFLE = 'shuffle'
class Operation(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
12
0
12
12
11
0
12
12
11
0
4
0
0
327,674
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/schedule_enums.py
wave_lang.kernel.wave.scheduling.schedule_enums.SchedulingType
from enum import Enum class SchedulingType(Enum): NONE = 0 MODULO = 16 PREFETCH = 32 FOUR_STAGE = 33 PREFETCH_ATTENTION = 34
class SchedulingType(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
6
0
6
6
5
0
6
6
5
0
4
0
0
327,675
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/scheduler_utils.py
wave_lang.kernel.wave.scheduling.scheduler_utils.BaseScheduler
import torch.fx as fx import math from .graph_utils import Edge class BaseScheduler: def __init__(self, graph: fx.Graph, edges: list[Edge], resources: list[int]) -> None: self.graph = graph self.edges = edges self.resources = resources self.seed = 2024 @property def initiation_interval(self) -> int: """ Returns the initiation interval of the schedule. """ return self._initiation_interval @property def num_stages(self) -> int: """ Returns the number of stages in the kernel of the pipelined loop. """ max_cycle = max((t + 1 for t in self.schedule.values())) return math.ceil(max_cycle / self.initiation_interval)
class BaseScheduler: def __init__(self, graph: fx.Graph, edges: list[Edge], resources: list[int]) -> None: pass @property def initiation_interval(self) -> int: ''' Returns the initiation interval of the schedule. ''' pass @property def num_stages(self) -> int: ''' Returns the number of stages in the kernel of the pipelined loop. ''' pass
6
2
7
0
5
2
1
0.33
0
3
1
4
3
4
3
3
26
2
18
16
7
6
11
9
7
1
0
0
3
327,676
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/verifier.py
wave_lang.kernel.wave.scheduling.verifier.ResourceUsageTracker
from typing import Callable, Dict, List, Optional, Tuple, TypeAlias import torch.fx as fx import numpy as np class ResourceUsageTracker: """Tracks and validates resource usage across scheduling cycles.""" def __init__(self, resource_limits: np.ndarray, T: int, num_resource_types: int): self.resource_limits = np.array(resource_limits) self.T = T self.num_resource_types = num_resource_types self.RT_global = np.zeros((self.T, self.num_resource_types), dtype=int) def _get_node_duration(self, node: fx.Node, node_rrt_getter: NodeRRTGetter) -> int: node_rrt_val = node_rrt_getter(node) return node_rrt_val.shape[0] if node_rrt_val is not None and node_rrt_val.size > 0 else 0 def _apply_node_operation(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter, operation: Callable[[np.ndarray, np.ndarray], np.ndarray]) -> None: node_rrt_val = node_rrt_getter(node) if node_rrt_val is None or node_rrt_val.size == 0: return for i in range(node_rrt_val.shape[0]): cycle = (start_cycle + i) % self.T self.RT_global[cycle, :] = operation(self.RT_global[cycle, :], node_rrt_val[i, :]) def add_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> None: self._apply_node_operation(node, start_cycle, node_rrt_getter, lambda x, y: x + y) def remove_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> None: self._apply_node_operation(node, start_cycle, node_rrt_getter, lambda x, y: x - y) def can_add_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> bool: node_rrt_val = node_rrt_getter(node) if node_rrt_val is None or node_rrt_val.size == 0: return True for i in range(node_rrt_val.shape[0]): cycle = (start_cycle + i) % self.T if np.any(self.RT_global[cycle, :] + node_rrt_val[i, :] > self.resource_limits): return False return True
class ResourceUsageTracker: '''Tracks and validates resource usage across scheduling cycles.''' def __init__(self, resource_limits: np.ndarray, T: int, num_resource_types: int): pass def _get_node_duration(self, node: fx.Node, node_rrt_getter: NodeRRTGetter) -> int: pass def _apply_node_operation(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter, operation: Callable[[np.ndarray, np.ndarray], np.ndarray]) -> None: pass def add_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> None: pass def remove_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> None: pass def can_add_node(self, node: fx.Node, start_cycle: int, node_rrt_getter: NodeRRTGetter) -> bool: pass
7
1
9
0
9
0
2
0.02
0
3
0
0
6
4
6
6
60
6
53
30
34
1
29
18
22
4
0
2
12
327,677
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/verifier.py
wave_lang.kernel.wave.scheduling.verifier.ScheduleConstraintRepairer
from typing import Callable, Dict, List, Optional, Tuple, TypeAlias from .graph_utils import Edge import torch.fx as fx class ScheduleConstraintRepairer: """Repairs schedule violations by moving operations to satisfy resource and dependency constraints.""" def __init__(self, graph: ScheduleDependencyGraph, edges: List[Edge], T: int): self.graph = graph self.edges = edges self.T = T self.MAX_REPAIR_ITERATIONS = len(graph.nodes) def _repair_schedule(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter, forward: bool=True) -> Tuple[bool, Schedule]: """Repairs a schedule to satisfy dependency and resource constraints. The repair process iteratively moves nodes either forward or backward in time until: 1. All constraints are satisfied 2. No more valid moves can be made 3. Maximum repair iterations are reached Uses a directional constraint enforcement strategy: - Forward repair (forward=True): * Process nodes in ascending order * Enforce predecessor constraints only * Handle successor violations when processing successors - Backward repair (forward=False): * Process nodes in descending order * Enforce successor constraints only * Handle predecessor violations when processing predecessors This strategy maintains schedule validity while avoiding unnecessary cascading repairs. Args: schedule: Current schedule to repair resource_tracker: Resource tracker to validate resource constraints node_rrt_getter: Function to get resource requirements for nodes forward: If True, repair by moving nodes forward; if False, repair by moving nodes backward Returns: Tuple of (success, repaired_schedule) where: - success: True if repair was successful, False if repair failed - repaired_schedule: The repaired schedule if successful, or the last attempted schedule if failed """ repaired_schedule = dict(schedule) def get_constraint_cycles(node: fx.Node) -> Tuple[List[int], List[int]]: """Get cycles of predecessor and successor nodes that have edges to/from the current node.""" pred_cycles = [repaired_schedule[pred] for pred in self.graph.get_predecessors(node) if pred in repaired_schedule and self.graph.has_edge(pred, node)] succ_cycles = [repaired_schedule[succ] for succ in self.graph.get_successors(node) if succ in repaired_schedule and self.graph.has_edge(node, succ)] return (pred_cycles, succ_cycles) def should_move_node(node: fx.Node, pred_cycles: List[int], succ_cycles: List[int]) -> Tuple[bool, int]: """Determine if a node should be moved and calculate its target cycle.""" if forward: if pred_cycles and repaired_schedule[node] <= max(pred_cycles): return (True, max(pred_cycles) + 1) elif succ_cycles and repaired_schedule[node] >= min(succ_cycles): return (True, min(succ_cycles) - 1) return (False, 0) for _ in range(self.MAX_REPAIR_ITERATIONS): schedule_modified = False nodes_to_check = sorted(repaired_schedule.keys(), key=lambda n: repaired_schedule[n] if forward else -repaired_schedule[n]) for current_node in nodes_to_check: pred_cycles, succ_cycles = get_constraint_cycles(current_node) should_move, target_cycle = should_move_node(current_node, pred_cycles, succ_cycles) if should_move: if not self._try_move_node(current_node, target_cycle, repaired_schedule, resource_tracker, node_rrt_getter, forward): return (False, repaired_schedule) schedule_modified = True if not schedule_modified: break valid = self.validate_dependencies(repaired_schedule) return (valid, repaired_schedule) def _try_move_node(self, node: fx.Node, target_cycle: int, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter, forward: bool) -> bool: """Attempts to move a node to a target cycle while maintaining schedule validity. This method handles the actual movement of a node to a new cycle, including: 1. Temporarily removing the node from its current cycle 2. Finding a valid cycle to place the node 3. Validating resource and dependency constraints 4. Restoring the original state if the move fails For forward repair (forward=True): - Tries cycles from target_cycle up to target_cycle + T - Returns True on first valid cycle found - Returns False if no valid cycle is found For backward repair (forward=False): - Tries cycles from target_cycle down to target_cycle - T - Returns True on first valid cycle found - Returns False if no valid cycle is found Args: node: The node to move target_cycle: The desired cycle to move the node to schedule: The current schedule being modified resource_tracker: Resource tracker to validate resource constraints node_rrt_getter: Function to get resource requirements for nodes forward: If True, try cycles forward; if False, try cycles backward Returns: bool: True if the move was successful, False otherwise """ original_cycle = schedule[node] resource_tracker.remove_node(node, original_cycle, node_rrt_getter) start_cycle = target_cycle end_cycle = target_cycle + self.T if forward else target_cycle - self.T step = 1 if forward else -1 for try_cycle in range(start_cycle, end_cycle, step): if self._is_valid_move(node, try_cycle, schedule, resource_tracker, node_rrt_getter): schedule[node] = try_cycle resource_tracker.add_node(node, try_cycle, node_rrt_getter) return True schedule[node] = original_cycle resource_tracker.add_node(node, original_cycle, node_rrt_getter) return False def _is_valid_move(self, node: fx.Node, cycle: int, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> bool: if not resource_tracker.can_add_node(node, cycle, node_rrt_getter): return False for succ in self.graph.get_successors(node): if succ in schedule and schedule[succ] <= cycle and self.graph.has_edge(node, succ): return False for pred in self.graph.get_predecessors(node): if pred in schedule and schedule[pred] >= cycle and self.graph.has_edge(pred, node): return False return True def repair_forward(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> Tuple[bool, Schedule]: return self._repair_schedule(schedule, resource_tracker, node_rrt_getter, forward=True) def repair_backward(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> Tuple[bool, Schedule]: return self._repair_schedule(schedule, resource_tracker, node_rrt_getter, forward=False) def validate_dependencies(self, schedule: Schedule) -> bool: return all((schedule[edge._to] > schedule[edge._from] for edge in self.edges if edge._from in schedule and edge._to in schedule))
class ScheduleConstraintRepairer: '''Repairs schedule violations by moving operations to satisfy resource and dependency constraints.''' def __init__(self, graph: ScheduleDependencyGraph, edges: List[Edge], T: int): pass def _repair_schedule(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter, forward: bool=True) -> Tuple[bool, Schedule]: '''Repairs a schedule to satisfy dependency and resource constraints. The repair process iteratively moves nodes either forward or backward in time until: 1. All constraints are satisfied 2. No more valid moves can be made 3. Maximum repair iterations are reached Uses a directional constraint enforcement strategy: - Forward repair (forward=True): * Process nodes in ascending order * Enforce predecessor constraints only * Handle successor violations when processing successors - Backward repair (forward=False): * Process nodes in descending order * Enforce successor constraints only * Handle predecessor violations when processing predecessors This strategy maintains schedule validity while avoiding unnecessary cascading repairs. Args: schedule: Current schedule to repair resource_tracker: Resource tracker to validate resource constraints node_rrt_getter: Function to get resource requirements for nodes forward: If True, repair by moving nodes forward; if False, repair by moving nodes backward Returns: Tuple of (success, repaired_schedule) where: - success: True if repair was successful, False if repair failed - repaired_schedule: The repaired schedule if successful, or the last attempted schedule if failed ''' pass def get_constraint_cycles(node: fx.Node) -> Tuple[List[int], List[int]]: '''Get cycles of predecessor and successor nodes that have edges to/from the current node.''' pass def should_move_node(node: fx.Node, pred_cycles: List[int], succ_cycles: List[int]) -> Tuple[bool, int]: '''Determine if a node should be moved and calculate its target cycle.''' pass def _try_move_node(self, node: fx.Node, target_cycle: int, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter, forward: bool) -> bool: '''Attempts to move a node to a target cycle while maintaining schedule validity. This method handles the actual movement of a node to a new cycle, including: 1. Temporarily removing the node from its current cycle 2. Finding a valid cycle to place the node 3. Validating resource and dependency constraints 4. Restoring the original state if the move fails For forward repair (forward=True): - Tries cycles from target_cycle up to target_cycle + T - Returns True on first valid cycle found - Returns False if no valid cycle is found For backward repair (forward=False): - Tries cycles from target_cycle down to target_cycle - T - Returns True on first valid cycle found - Returns False if no valid cycle is found Args: node: The node to move target_cycle: The desired cycle to move the node to schedule: The current schedule being modified resource_tracker: Resource tracker to validate resource constraints node_rrt_getter: Function to get resource requirements for nodes forward: If True, try cycles forward; if False, try cycles backward Returns: bool: True if the move was successful, False otherwise ''' pass def _is_valid_move(self, node: fx.Node, cycle: int, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> bool: pass def repair_forward(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> Tuple[bool, Schedule]: pass def repair_backward(self, schedule: Schedule, resource_tracker: ResourceUsageTracker, node_rrt_getter: NodeRRTGetter) -> Tuple[bool, Schedule]: pass def validate_dependencies(self, schedule: Schedule) -> bool: pass
10
5
27
2
18
7
3
0.42
0
7
3
0
7
4
7
7
223
29
137
64
94
57
63
31
53
6
0
4
26
327,678
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/scheduling/verifier.py
wave_lang.kernel.wave.scheduling.verifier.ScheduleDependencyGraph
import torch.fx as fx from typing import Callable, Dict, List, Optional, Tuple, TypeAlias class ScheduleDependencyGraph: """Represents and manages the dependency relationships between scheduled operations.""" def __init__(self, nodes: List[fx.Node], edges: RawEdgesList=None): self.nodes = list(nodes) self.edges = edges self._adj = self._build_adjacency_list(edges, is_successors=True) self._pred_adj = self._build_adjacency_list(edges, is_successors=False) self._edge_set = set() if edges: self._edge_set = {(edge._from, edge._to) for edge in edges} def _build_adjacency_list(self, edges_input: RawEdgesList, is_successors: bool) -> Dict[fx.Node, List[fx.Node]]: adj_map = {node: [] for node in self.nodes} for edge in edges_input: if is_successors: if edge._from in adj_map and edge._to not in adj_map[edge._from]: adj_map[edge._from].append(edge._to) elif edge._to in adj_map and edge._from not in adj_map[edge._to]: adj_map[edge._to].append(edge._from) return adj_map def get_successors(self, node: fx.Node) -> List[fx.Node]: return self._adj.get(node, []) def get_predecessors(self, node: fx.Node) -> List[fx.Node]: return self._pred_adj.get(node, []) def has_edge(self, pred: fx.Node, succ: fx.Node) -> bool: return (pred, succ) in self._edge_set
class ScheduleDependencyGraph: '''Represents and manages the dependency relationships between scheduled operations.''' def __init__(self, nodes: List[fx.Node], edges: RawEdgesList=None): pass def _build_adjacency_list(self, edges_input: RawEdgesList, is_successors: bool) -> Dict[fx.Node, List[fx.Node]]: pass def get_successors(self, node: fx.Node) -> List[fx.Node]: pass def get_predecessors(self, node: fx.Node) -> List[fx.Node]: pass def has_edge(self, pred: fx.Node, succ: fx.Node) -> bool: pass
6
1
5
0
5
0
2
0.07
0
3
0
0
5
5
5
5
34
5
27
15
19
2
24
13
18
5
0
3
10
327,679
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/symbolic_constraints.py
wave_lang.kernel.wave.symbolic_constraints.SymbolicAlias
from typing import Callable from dataclasses import dataclass from wave_lang.kernel._support.indexing import IndexExpr, IndexSymbol from .constraints import Constraint, TilingConstraint, WaveConstraint, WorkgroupConstraint from .utils.symbol_utils import subs_idxc @dataclass class SymbolicAlias: """ A constraint of the form `tkw.SymbolicConstraint(K, SYMBOLIC_K)` specifies that the relationship between the source and target symbols is given by source = source_to_target(target). SymbolicAliases are modeled in the compiler as additional workgroup, wave, and tiling constraints that are derived from the source. They are ignored during expansion and utilize the same workgroup and wave ids as the target symbol. """ source: IndexSymbol | IndexExpr target: IndexSymbol | IndexExpr source_to_target: Callable[[IndexSymbol | IndexExpr], IndexSymbol | IndexExpr] def apply(self, target: IndexSymbol | IndexExpr) -> IndexSymbol | IndexExpr: return subs_idxc(self.source_to_target(target)) def create_new_constraints(self, constraints: list[Constraint]) -> list[Constraint]: """ Creates new constraints for the given constraints with the appropriate substitution of the indexing context. """ new_constraints = [] if not constraints: return new_constraints match constraints[0]: case WorkgroupConstraint(): def build_constraint(x, y, z): return WorkgroupConstraint(x, y, z) def id_fn(x): return x.workgroup_dim case WaveConstraint(): def build_constraint(x, y, z): return WaveConstraint(x, y, z) def id_fn(x): return x.wave_id case TilingConstraint(): def build_constraint(x, y, z): return TilingConstraint(x, y, z) def id_fn(x): return x.induction_var for constraint in constraints: if self.target == constraint.dim: tile_size = self.apply(constraint.tile_size) if tile_size.is_number and tile_size == 0: continue new_constraints.append(build_constraint(self.source, self.apply(constraint.tile_size), id_fn(constraint))) return new_constraints
@dataclass class SymbolicAlias: ''' A constraint of the form `tkw.SymbolicConstraint(K, SYMBOLIC_K)` specifies that the relationship between the source and target symbols is given by source = source_to_target(target). SymbolicAliases are modeled in the compiler as additional workgroup, wave, and tiling constraints that are derived from the source. They are ignored during expansion and utilize the same workgroup and wave ids as the target symbol. ''' def apply(self, target: IndexSymbol | IndexExpr) -> IndexSymbol | IndexExpr: pass def create_new_constraints(self, constraints: list[Constraint]) -> list[Constraint]: ''' Creates new constraints for the given constraints with the appropriate substitution of the indexing context. ''' pass def build_constraint(x, y, z): pass def id_fn(x): pass def build_constraint(x, y, z): pass def id_fn(x): pass def build_constraint(x, y, z): pass def id_fn(x): pass
10
2
17
1
15
2
5
0.39
0
5
4
0
2
0
2
2
51
5
33
8
28
13
24
9
20
8
0
3
9
327,680
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/templates/attention_common.py
wave_lang.kernel.wave.templates.attention_common.AttentionShape
from typing import Optional from dataclasses import dataclass, fields @dataclass(frozen=True) class AttentionShape: num_query_heads: int num_kv_heads: int head_size: int head_size_kv: int batch_size: Optional[int] = None num_seqs: Optional[int] = None max_seq_len: Optional[int] = None total_seq_len: Optional[int] = None context_len: Optional[int] = None fixed_seq_len_prefix: Optional[int] = None fixed_seq_len_extend: Optional[int] = None query_seq_len: Optional[int] = None kv_seq_len: Optional[int] = None block_size: Optional[int] = None def __iter__(self): for field in fields(AttentionShape): field_value = getattr(self, field.name) if field_value: yield field_value
@dataclass(frozen=True) class AttentionShape: def __iter__(self): pass
3
0
5
0
5
0
3
0.35
0
0
0
0
1
0
1
1
28
1
20
14
18
7
20
14
18
3
0
2
3
327,681
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/wave.py
wave_lang.kernel.wave.wave.LaunchableWave
from ..lang import Grid, Memory, SymbolBind from .decompose_dot_mma import decompose_dot_mma from .utils.general_utils import delinearize_index, get_hardware_constraint, partial, remove_files_with_extension from ..compiler import builder, dispatch_codegen, kernel_codegen from .utils.compile_utils import canonicalize_module, apply_transform from .debug_log_hoist import debug_log_hoist, debug_log_write_replace, DebugArgInfo from .type_inference import infer_types from .construct_index_mapping import construct_index_mapping from .symbolic_constraints import SymbolicAlias from .._support.tracing import CapturedTrace, CompiledContext, KernelRegionGraph, Launchable from .shared_memory_indexing import apply_shared_memory_indexing_corrections from .._support.location_config import LocationCaptureConfig from .scheduling.schedule import schedule_graph from .schedule_reordering import schedule_reordering from .._support.indexing import IndexExpr, IndexingContext, index_symbol from .constraints import Constraint, HardwareConstraint, ReorderingConstraint, TilingConstraint, WaveConstraint, WorkgroupConstraint, DeviceConstraint, get_grid_shape, get_device_layout from .promotion import compute_shared_memory_usage, promote_placeholders from .compile_options import WaveCompileOptions from wave_lang.support.ir_imports import Context, Module, Operation from .minimize_global_loads import minimize_global_loads from .codegen import WaveEmitter from sympy.utilities.lambdify import lambdastr from .memory_analysis.minimize_shared_allocs import minimize_shared_allocs from .cache import get_temp_binary_dir import torch.fx as fx from .in_thread_transpose import in_thread_transpose from .barriers import add_shared_memory_barriers import sympy from .hoisting import hoist_loop_invariant_ops from .global_to_shared_gathers import global_to_shared_gathers from .analysis.partition_strided_operators import partition_ops_with_gpr_offsets, partition_strided_operators from .workgroup_reordering import reorder_workgroups from .generate_bounds_exprs import generate_bounds_exprs from .analysis.index_sequence_analysis import set_node_indices, set_post_expansion_indices from typing import Any, Callable, Optional, Sequence, get_type_hints from .gather_to_shared import gather_to_shared, gather_to_shared_swizzling from .utils.symbol_utils import safe_subs, subs_idxc, get_induction_symbol from itertools import chain from ..ops.wave_ops import CustomOp, Iterate, get_custom from .utils.print_utils import print_trace, try_apply_pass from .expansion.expansion import add_get_results, expand_graph from ..ops import wave_ops from .decompose_vmma_ops import decompose_vmma_ops from .utils.graph_utils import initialize_iter_args, remove_chained_extractslice, remove_chained_getresult from .decompose_scan_ops import decompose_scan_ops import inspect from .decompose_reduce_ops import decompose_reduce_ops class LaunchableWave(Launchable): def __init__(self, constraints: Optional[list[Constraint]], name: str, eager_function: Callable[[Any], Any]): super().__init__(eager_function) self.constraints = constraints if constraints else [] self.induction_vars: dict[CustomOp, IndexExpr] = {} self._name = name self._f = eager_function self._sig = inspect.signature(eager_function) self.grid_type = Grid[tuple(get_grid_shape(self.workgroup_constraints, self.device_constraints))] self.device_layout = Grid[tuple(get_device_layout(self.device_constraints))] hints = get_type_hints(eager_function) self.bound_scalar_symbols = {index_symbol(name): i for i, (name, arg) in enumerate(hints.items()) if _is_symbol_bind(arg)} symbols_args_map = {} for arg_idx, arg in enumerate(hints.values()): if not _is_memory_arg(arg): continue for dim, symbol in enumerate(arg.symbolic_shape): if symbol in symbols_args_map: continue symbols_args_map[symbol] = (arg_idx, dim) self.symbols_args_map = symbols_args_map @property def device_constraints(self) -> list[DeviceConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, DeviceConstraint)] @property def workgroup_constraints(self) -> list[WorkgroupConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, WorkgroupConstraint)] @property def tiling_constraints(self) -> list[TilingConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, TilingConstraint)] @property def wave_constraints(self) -> list[WaveConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, WaveConstraint)] @property def hardware_constraints(self) -> list[HardwareConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, HardwareConstraint)] @property def reordering_constraints(self) -> list[ReorderingConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, ReorderingConstraint)] @property def symbolic_constraints(self) -> list[HardwareConstraint]: return [constraint for constraint in self.constraints if isinstance(constraint, SymbolicAlias)] def _validate_constraints(self): wave_map = {constraint.dim: subs_idxc(constraint.tile_size) for constraint in self.wave_constraints} workgroup_map = {constraint.dim: subs_idxc(constraint.tile_size) for constraint in self.workgroup_constraints} for dim in set(wave_map.keys()) | set(workgroup_map.keys()): wave_size = wave_map[dim] if dim in wave_map else None workgroup_size = workgroup_map[dim] if dim in workgroup_map else None assert workgroup_size is not None, f'expected non-empty tile size in `WorkgroupConstraint` for dimension {dim}' if wave_size is None: continue assert wave_size > 0, f'expected non-zero tile in `WaveConstraint` for dimension {dim}' assert workgroup_size > 0, f'expected non-zero tile in `WorkgroupConstraint` for dimension {dim}' assert workgroup_size >= wave_size, f'expected workgroup tile size to be the same or larger than wavefront tile size for dimension {dim}' assert workgroup_size % wave_size == 0, f'expected workgroup tile size to be an integral multiple of wavefront tile size for dimension {dim}' workgroup_dims = set([cons.workgroup_dim for cons in self.workgroup_constraints]) min_dim = min(workgroup_dims) max_dim = max(workgroup_dims) assert max_dim - min_dim + 1 == len(workgroup_dims), 'expected contiguous indices for `workgroup_dim` field in workgroup constraints' return def _trace(self, *, location_capture_config: Optional[LocationCaptureConfig]=None) -> CapturedTrace: region_graph = KernelRegionGraph(location_capture_config=location_capture_config, func=self._f) with CompiledContext(region_graph, grid_type=self.grid_type) as context: custom_ops: dict[str, wave_ops.CustomOp] = {cls.tkw_op_name: cls for _, cls in inspect.getmembers(wave_ops, inspect.isclass) if issubclass(cls, wave_ops.CustomOp) and hasattr(cls, 'tkw_op_name')} for name, op in custom_ops.items(): context.register_custom_op(name, op) with region_graph.subtracer() as subtracer: root_name, _ = subtracer.trace(self._f) trace = CapturedTrace(region_graph, root_name) return trace def create_induction_vars(self, trace: CapturedTrace) -> None: """ Creates induction variables for all the reductions in the graph and associates tiling constraints all the reduction dimensions with the appropriate induction variables. """ def is_reduction(node: fx.Node): custom = get_custom(node) return isinstance(custom, Iterate) reduction_nodes = trace.walk(is_reduction) for node in reduction_nodes: custom = get_custom(node) self.induction_vars[custom] = get_induction_symbol(custom.axis) for tiling_constraint in self.tiling_constraints: if tiling_constraint.dim == custom.axis: tiling_constraint.induction_var = self.induction_vars[custom] def initialize_wave_constraints(self) -> None: """ For each wave constraint, determines the appropriate wave id by looking for workgroup constraints along the same dimension and using information from the hardware constraints. """ self._validate_constraints() hardware_constraint = self.hardware_constraints[0] for wave_constraint in self.wave_constraints: for workgroup_constraint in self.workgroup_constraints: if wave_constraint.dim == workgroup_constraint.dim: wave_constraint.set_wave_id_from_hardware_and_workgroup_constraint(hardware_constraint, workgroup_constraint) if hardware_constraint.waves_per_block is None: waves_per_block = [1, 1, 1] for wave_constraint in self.wave_constraints: count = subs_idxc(wave_constraint.waves_per_block) waves_per_block[wave_constraint.workgroup_dim] = count hardware_constraint.waves_per_block = tuple(waves_per_block) def initialize_reductions(self, trace: CapturedTrace) -> None: """ For each reduction, initializes the reduction count by looking at the tiling constraints associated with the reduction. """ def is_reduction(node): return isinstance(get_custom(node), Iterate) for reduction in trace.walk(is_reduction): for tiling_constraint in self.tiling_constraints: if tiling_constraint.dim == get_custom(reduction).axis: reduction.count = subs_idxc(tiling_constraint.count) def get_workgroup_dims(self) -> list[int]: """ Returns the workgroup dimensions that are not aliased. """ aliased_dims = [x.source for x in self.constraints if isinstance(x, SymbolicAlias)] workgroup_dims = [x for x in self.workgroup_constraints if x.dim not in aliased_dims] return workgroup_dims def update_aliased_workgroup_constraints(self, workgroup_dims: dict[int, int]) -> None: """ This function updates the wg_dim for aliased workgroup constraints. """ aliased_dims = [x.source for x in self.constraints if isinstance(x, SymbolicAlias)] for constraint in self.workgroup_constraints: if constraint.dim in aliased_dims: constraint.wg_dim = workgroup_dims[constraint.workgroup_dim].wg_dim def initialize_workgroup_constraints(self) -> None: """ For kernels that distribute more than three dimensions among workgroups, we need to update the workgroup constraints for dimensions >= 2 with the appropriate workgroup index. """ workgroup_dims = self.get_workgroup_dims() dims_to_delinearize = [x for x in workgroup_dims if x.workgroup_dim >= 2] if all((x.workgroup_dim <= 2 for x in dims_to_delinearize)): return shape = [subs_idxc(x.count) for x in dims_to_delinearize if x.primary] new_workgroup_dims = delinearize_index(WORKGROUP_2, shape) for delinearize_dim in dims_to_delinearize: delinearize_dim.wg_dim = new_workgroup_dims[delinearize_dim.workgroup_dim - 2] self.update_aliased_workgroup_constraints(workgroup_dims) def initialize_symbolic_constraints(self) -> None: """ For each symbolic constraint, create new constraints for the related symbolic values with appropriate substitutions. """ new_wg_constraints, new_wave_constraints, new_tiling_constraints = ([], [], []) for symbolic_constraint in self.symbolic_constraints: new_wg_constraints += symbolic_constraint.create_new_constraints(self.workgroup_constraints) new_wave_constraints += symbolic_constraint.create_new_constraints(self.wave_constraints) new_tiling_constraints += symbolic_constraint.create_new_constraints(self.tiling_constraints) for wave_constraint in new_wave_constraints: for workgroup_constraint in new_wg_constraints: if wave_constraint.dim == workgroup_constraint.dim and wave_constraint.tile_size == workgroup_constraint.tile_size: new_wave_constraints.remove(wave_constraint) self.constraints += new_wg_constraints + new_wave_constraints + new_tiling_constraints idxc = IndexingContext.current() for constraint in self.symbolic_constraints: if subs_idxc(constraint.target).is_number: idxc._bind_symbol(constraint.source, subs_idxc(constraint.source_to_target(constraint.target))) def infer_grid_shape(self, idxc: IndexingContext): self.grid_type.dims = [1, 1, 1] max_workgroup_dim = 2 aliases = [x.source for x in self.constraints if isinstance(x, SymbolicAlias)] for constraint in self.workgroup_constraints: if constraint.dim in aliases: continue if not constraint.primary: continue dim = constraint.workgroup_dim if constraint.workgroup_dim < max_workgroup_dim else max_workgroup_dim self.grid_type.dims[dim] *= safe_subs(constraint.count, idxc.subs) def infer_device_layout(self, idxc: IndexingContext): self.device_layout.dims = [1, 1, 1] max_device_dim = 2 aliases = [x.source for x in self.constraints if isinstance(x, SymbolicAlias)] for constraint in self.device_constraints: if constraint.dim in aliases: continue dim = constraint.device_dim if constraint.device_dim < max_device_dim else max_device_dim self.device_layout.dims[dim] *= safe_subs(constraint.count, idxc.subs) def compile_to_mlir(self, trace: CapturedTrace, context: Context, module_op: Optional[Module]=None, options: WaveCompileOptions=None): entrypoint_name = self._name root_graph = trace.get_root_graph() kernel_sig = kernel_codegen.KernelSignature(self.device_constraints) kernel_sig.add_from_graph_placeholders(root_graph) kernel_sig.add_from_dynamic_symbols(options.dynamic_symbols) kernel_sig.add_grid(self.grid_type) kernel_sig.determine_input_output_buffers(root_graph) if options.print_signature: print(kernel_sig) mb = builder.ModuleBuilder(context=context, module_op=module_op) exe = dispatch_codegen.StreamExecutable(mb, name=entrypoint_name) workgroup_size = self.hardware_constraints[0].threads_per_block subgroup_size = self.hardware_constraints[0].threads_per_wave llvm_func_config = {} if options.denorm_fp_math_f32: llvm_func_config['denormal-fp-math-f32'] = options.denorm_fp_math_f32 if options.waves_per_eu: llvm_func_config['amdgpu-waves-per-eu'] = options.waves_per_eu dispatch_entrypoint = exe.define_entrypoint(entrypoint_name, kernel_sig, self.grid_type, workgroup_size, subgroup_size, options.dynamic_symbols, llvm_func_config) emitter = WaveEmitter(dispatch_entrypoint, trace, self.constraints, options, self.grid_type) try: emitter.emit(trace.get_root_graph()) except: logger.info('Error in emitter') asm = mb.module_op.get_asm() logger.info(asm) raise emitter.finish() if options.postprocess: apply_transform(mb.module_op, options.postprocess, options.subs) if options.canonicalize: canonicalize_module(mb.module_op) return (mb, trace, exe, kernel_sig, entrypoint_name) def build_initial_pass_pipeline(self, trace: CapturedTrace, options: WaveCompileOptions, debug_arg_info: list[DebugArgInfo], debug_handlers: list[Any], print_ir_before: Sequence[str]=[], print_ir_after: Sequence[str]=[]): idxc = IndexingContext.current() def finalize_indices(): idxc.finalize() def substitute_vector_shapes(): self.hardware_constraints[0].subs_vector_shapes(idxc.subs) return [partial(debug_log_hoist, trace, debug_handlers), partial(initialize_iter_args, trace), partial(self.create_induction_vars, trace), partial(self.initialize_reductions, trace), finalize_indices, substitute_vector_shapes, partial(add_get_results, trace), partial(infer_types, trace, self.constraints), partial(construct_index_mapping, trace, self.constraints), partial(debug_log_write_replace, trace, self.constraints, options, debug_arg_info), partial(promote_placeholders, trace, self.constraints, options.reorder_allocs), partial(set_node_indices, trace, self.constraints, print_ir_before, print_ir_after), partial(reorder_workgroups, trace, self.reordering_constraints), partial(expand_graph, trace, self.constraints), partial(set_post_expansion_indices, trace, self.constraints), partial(remove_chained_getresult, trace)] def _trace_and_get_kernel_signature(self, options: WaveCompileOptions, context: Optional[Context]=None, module_op: Optional[Operation]=None) -> tuple[builder.ModuleBuilder, CapturedTrace, dispatch_codegen.StreamExecutable, kernel_codegen.KernelSignature, str, WaveCompileOptions, Sequence[DebugArgInfo], Grid]: _warn_iree_is_too_old() if options.wave_runtime: remove_files_with_extension(get_temp_binary_dir(), '.hsaco') print_ir_after = options.print_ir_after print_ir_before = options.print_ir_before profile_pass = options.profile_pass if options.print_trace_begin: print(f'\n***Tracing kernel {self._name}***') debug_arg_info = [] debug_handlers = [] trace = self._trace(location_capture_config=options.location_capture_config) if 'all' in print_ir_after or 'all' in print_ir_before or 'trace' in print_ir_after or ('first' in print_ir_before): print(f'***After trace/Before first pass***\n') print_trace(trace) graph_passes = self.build_initial_pass_pipeline(trace, options, debug_arg_info, debug_handlers, print_ir_before, print_ir_after) graph_passes += [partial(decompose_vmma_ops, trace, self.constraints), partial(decompose_dot_mma, trace, self.constraints)] if options.optimization_level: graph_passes += [partial(hoist_loop_invariant_ops, trace, self.constraints), partial(gather_to_shared, trace, self.constraints, options), partial(gather_to_shared_swizzling, trace, self.constraints, options), partial(in_thread_transpose, trace, self.constraints), partial(global_to_shared_gathers, trace, self.constraints), partial(minimize_global_loads, trace, self.constraints)] graph_passes += [partial(apply_shared_memory_indexing_corrections, trace, self.constraints)] graph_passes += [partial(partition_ops_with_gpr_offsets, trace, self.constraints), partial(partition_strided_operators, trace, self.constraints), partial(remove_chained_extractslice, trace)] graph_passes += [partial(decompose_reduce_ops, trace, self.constraints), partial(decompose_scan_ops, trace, self.constraints)] scheduling_type = options.schedule use_scheduling_barriers = options.use_scheduling_barriers multi_buffer_count = options.multi_buffer_count if multi_buffer_count is not None: multi_buffer_count = max(1, options.multi_buffer_count) graph_passes.append(partial(schedule_graph, trace, self.constraints, use_scheduling_barriers, scheduling_type, options.override_schedule, options.dump_schedule, multi_buffer_count)) if options.optimization_level: graph_passes += [partial(schedule_reordering, trace, self.constraints, scheduling_type), partial(minimize_shared_allocs, trace, options.minimize_shared_allocs)] graph_passes += [partial(add_shared_memory_barriers, trace), partial(compute_shared_memory_usage, trace, options.kernel_launch_info), partial(generate_bounds_exprs, trace, self.constraints)] pass_times = {} for p in graph_passes: try_apply_pass(p, trace, print_ir_before, print_ir_after, profile_pass, pass_times) if options.print_pass_times: pass_times_list = sorted(pass_times.items(), key=lambda x: x[1], reverse=True) print(f'Pass times:') for k, v in pass_times_list: print(f' {k}: {v:.4f}s') if 'all' in print_ir_after or 'last' in print_ir_after: print(f'***After final pass {p.__name__}***\n') print_trace(trace) self.infer_grid_shape(IndexingContext.current()) self.infer_device_layout(IndexingContext.current()) if options.print_grid: print(f'Grid: {self.grid_type}') print(f'Device layout: {self.device_layout}') hw_constraint = get_hardware_constraint(self.constraints) grid_symbols = list(self.bound_scalar_symbols.keys()) + list(options.dynamic_symbols) options.kernel_launch_info.grid = sympy.lambdify([grid_symbols], self.grid_type.dims) options.kernel_launch_info.grid_str = lambdastr([grid_symbols], self.grid_type.dims) options.kernel_launch_info.blocks = [int(x) for x in hw_constraint.threads_per_block] options.kernel_launch_info.func_name = self._name idxc = IndexingContext.current() for sym, val in zip([THREAD_0, THREAD_1, THREAD_2, WORKGROUP_0, WORKGROUP_1, WORKGROUP_2], chain(hw_constraint.threads_per_block, self.grid_type.dims)): if safe_subs(val, idxc.subs) == 1: idxc.bind_constant(sym, 0) return (*self.compile_to_mlir(trace, context, module_op, options=options), options, debug_arg_info, debug_handlers, self.device_layout) def aot_execute(self, args, kwargs): raise NotImplementedError('AOT execution for wave not implemented yet.') def eager_execute(self, args, kwargs): raise NotImplementedError('Eager execution for wave not implemented yet.') def __repr__(self): return f'tk.wave @{self._name}[{self.grid_type}]'
null
37
7
23
2
19
2
3
0.12
1
28
16
0
23
8
23
23
624
81
484
160
415
59
238
114
211
14
1
3
80
327,682
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/wave_sim.py
wave_lang.kernel.wave.wave_sim._RegisterProxy
class _RegisterProxy: def __getitem__(self, indices: tuple[...]): shape = indices[:-1] dtype = indices[-1] return _ShapedRegister(shape, dtype)
class _RegisterProxy: def __getitem__(self, indices: tuple[...]): pass
2
0
4
0
4
0
1
0
0
2
1
0
1
0
1
1
5
0
5
4
3
0
5
4
3
1
0
0
1
327,683
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/wave_sim.py
wave_lang.kernel.wave.wave_sim._ShapedRegister
from typing import Any, Callable, Optional, TypeAlias import torch class _ShapedRegister: def __init__(self, shape: tuple[IndexExpr, ...], dtype: Any) -> None: self.shape = shape self.dtype = dtype def __call__(self, init: Any) -> 'Register': return torch.full(self.shape, init, dtype=self.dtype)
class _ShapedRegister: def __init__(self, shape: tuple[IndexExpr, ...], dtype: Any) -> None: pass def __call__(self, init: Any) -> 'Register': pass
3
0
3
0
3
0
1
0
0
2
0
0
2
2
2
2
7
1
6
5
3
0
6
5
3
1
0
0
2
327,684
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/wave_sim.py
wave_lang.kernel.wave.wave_sim._TklProxy
import torch class _TklProxy: f16 = torch.float16 f32 = torch.float32 Register = _RegisterProxy()
class _TklProxy: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
0
0
0
327,685
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/kernel/wave/wave_sim.py
wave_lang.kernel.wave.wave_sim._TkwProxy
class _TkwProxy: iterate = _iterate_proxy read = _read_proxy write = _write_proxy mma = _mma_proxy
class _TkwProxy: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
0
0
0
327,686
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device.Device
import torch import atexit from typing import Any, Callable, Dict, Optional, Union from iree.runtime import BufferUsage, ExternalTimepointFlags, HalBufferView, HalDevice, HalDriver, HalExternalTimepoint, MemoryType, SemaphoreCompatibility, VmInstance, VmModule, create_hal_module, get_driver class Device: """Represents a low-level device (HalDriver/HalDevice) and scheduling data. This is the type that user's interact with as a 'Device'. Devices can be handled loose-leaf or bound to a thread with a context manager. """ __slots__ = ['_s', '_main_timeline', '_main_timepoint', '_tx_timeline', '_tx_timepoint', '_fence_capacity', '_external_timepoints', '_device_interop', 'compile_target_flags', 'driver_id', 'export_torch_tensor', 'import_torch_tensor', 'instance_cache_key', 'type_cache_key'] _s: DeviceState import_torch_tensor: Callable[[torch.Tensor], HalBufferView] export_torch_tensor: Callable[[HalBufferView, torch.Tensor], torch.Tensor] driver_id: str instance_cache_key: str type_cache_key: str compile_target_flags: tuple[str, ...] def _try_clean_external_timepoints(self): while len(self._external_timepoints) > 0: if self._main_timeline.query() >= self._external_timepoints[0][1]: self._device_interop.destroy_timepoint_event(self._external_timepoints[0][0]) self._external_timepoints = self._external_timepoints[1:] else: break def setup_iree_action(self): self._try_clean_external_timepoints() timepoint_import = self._device_interop.get_timepoint_import() if timepoint_import is not None: self._main_timepoint += 1 self._main_timeline.import_timepoint(self._main_timepoint, timepoint_import) timepoint_export = HalExternalTimepoint() self._main_timepoint += 1 self._main_timeline.export_timepoint(self._main_timepoint, 3, 0, timepoint_export) return timepoint_export elif self.sync: self._main_timepoint += 1 return None def finalize_iree_action(self, external_timepoint: HalExternalTimepoint): if external_timepoint is not None: self._try_clean_external_timepoints() self._device_interop.wait_exported_timepoint(external_timepoint) self._external_timepoints.append((external_timepoint, self._main_timepoint)) elif self.sync: self._main_timeline.wait(self._main_timepoint) def __new__(cls, uri: Optional[str]=None, *, device_state: Optional[DeviceState]=None): if uri is not None: assert not device_state, 'device_state= cannot be given with explicit URI' try: existing = _CURRENT_THREAD.device_by_uri[uri] except (AttributeError, KeyError): ... else: return existing device_state = DeviceState.from_uri(uri) new_inst = super().__new__(cls) new_inst._s = device_state try: _CURRENT_THREAD.device_by_uri[uri] = new_inst except AttributeError: _CURRENT_THREAD.device_by_uri = {uri: new_inst} new_inst._initialize() return new_inst else: assert device_state, 'device_state= must be given if URI ommitted' new_inst = super().__new__(cls) new_inst._s = device_state new_inst._initialize() return new_inst def _initialize(self): d = self._s.device self._main_timeline = d.create_semaphore(0) self._main_timepoint = 0 self._tx_timeline = d.create_semaphore(0) self._tx_timepoint = 0 self._external_timepoints = [] self._fence_capacity = 2 driver_id = repr(d) colon_pos = driver_id.find(':') if colon_pos >= 0: driver_id = driver_id[0:colon_pos] self.driver_id = driver_id try: import_fn = TORCH_TENSOR_IMPORTERS[driver_id] export_fn = TORCH_TENSOR_EXPORTERS[driver_id] self._device_interop = IREE_SEMAPHPORE_INTEROP[driver_id](self._s.torch_stream is None) self.import_torch_tensor = lambda t: import_fn(self, t) self.export_torch_tensor = lambda bv, t: export_fn(self, bv, t) self.compile_target_flags = DEVICE_TARGET_COMPILE_FLAGS[driver_id] except KeyError as e: raise AssertionError(f"Unsupported TORCH_TENSOR_IMPORTERS for iree driver '{driver_id}'") from e self.instance_cache_key = repr(d) self._recompute_target_keys() atexit.register(self._try_clean_external_timepoints) def _recompute_target_keys(self): self.type_cache_key = f"{self.driver_id}:{';'.join(self.compile_target_flags)}" self.instance_cache_key = f"{self.driver_id}:{self._s.enumerated_info.get('device_id', None)}:{self._s.torch_stream}" @property def hal_device(self) -> HalDevice: return self._s.device @property def vm_instance(self) -> VmInstance: return self._s.instance @property def sync(self) -> bool: return self._s.torch_stream is None def create_hal_module(self) -> VmModule: s = self._s return create_hal_module(s.instance, s.device) def get_type_key_hash(self, *, hasher: Callable[[str], str]=DEFAULT_KEY_HASHER): return hasher(self.type_cache_key) @staticmethod def current() -> 'Device': try: return _CURRENT_THREAD.stack[-1] except (AttributeError, IndexError): raise NoCurrentDeviceError() def set(self) -> 'Device': """Sets this device as the current device without a context manager.""" try: _CURRENT_THREAD.stack.append(self) except AttributeError: _CURRENT_THREAD.stack = [self] return self def clear(self): """Clears the current device without a context manager.""" try: c = _CURRENT_THREAD.stack[-1] if _CURRENT_THREAD.stack[-1] is self: _CURRENT_THREAD.stack.pop() return except (AttributeError, IndexError): ... raise MismatchedDeviceSetClearError() def dump_device_info(self) -> str: return self._s.driver.dump_device_info(self._s.enumerated_device_id) def __repr__(self): return f'<Turbine Device: {self._s.device}>' def __enter__(self): try: _CURRENT_THREAD.stack.append(self) except AttributeError: _CURRENT_THREAD.stack = [self] def __exit__(self, type, value, traceback): _CURRENT_THREAD.stack.pop()
class Device: '''Represents a low-level device (HalDriver/HalDevice) and scheduling data. This is the type that user's interact with as a 'Device'. Devices can be handled loose-leaf or bound to a thread with a context manager. ''' def _try_clean_external_timepoints(self): pass def setup_iree_action(self): pass def finalize_iree_action(self, external_timepoint: HalExternalTimepoint): pass def __new__(cls, uri: Optional[str]=None, *, device_state: Optional[DeviceState]=None): pass def _initialize(self): pass def _recompute_target_keys(self): pass @property def hal_device(self) -> HalDevice: pass @property def vm_instance(self) -> VmInstance: pass @property def sync(self) -> bool: pass def create_hal_module(self) -> VmModule: pass def get_type_key_hash(self, *, hasher: Callable[[str], str]=DEFAULT_KEY_HASHER): pass @staticmethod def current() -> 'Device': pass def setup_iree_action(self): '''Sets this device as the current device without a context manager.''' pass def clear(self): '''Clears the current device without a context manager.''' pass def dump_device_info(self) -> str: pass def __repr__(self): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass
23
3
9
0
8
1
2
0.24
0
10
3
0
17
7
18
18
234
31
165
48
137
40
126
38
107
4
0
2
34
327,687
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device.DeviceState
import torch from typing import Any, Callable, Dict, Optional, Union from iree.runtime import BufferUsage, ExternalTimepointFlags, HalBufferView, HalDevice, HalDriver, HalExternalTimepoint, MemoryType, SemaphoreCompatibility, VmInstance, VmModule, create_hal_module, get_driver from functools import lru_cache class DeviceState: """State for an instantiated HAL device. Note that the IREE runtime internally manages a global cache of drivers for standard named-access (not custom-constructed) drivers. """ __slots__ = ['device', 'driver', 'instance', 'enumerated_info', 'torch_device', 'torch_stream', 'dlpack_device_type_code'] def __init__(self, *, driver: Union[str, HalDriver], device: Optional[HalDevice]=None, vm_instance: Optional[VmInstance]=None, enumerated_info: Optional[dict]=None, torch_device: Optional[torch.device]=None, torch_stream: Optional[int]=None, dlpack_device_type_code: int=0): self.instance = vm_instance or get_vm_instance() self.driver = driver if isinstance(driver, HalDriver) else get_driver(driver) self.device = device if device else self.driver.create_default_device() self.enumerated_info = enumerated_info or {} self.torch_device = torch_device self.torch_stream = torch_stream self.dlpack_device_type_code = dlpack_device_type_code @property def enumerated_device_id(self) -> int: try: return self.enumerated_info['device_id'] except KeyError as e: raise RuntimeError('No enumerated device_id for device') from e @property def enumerated_path(self) -> str: try: return self.enumerated_info['path'] except KeyError as e: raise RuntimeError('No enumerated path for device') from e @property def enumerated_name(self) -> str: try: return self.enumerated_info['name'] except KeyError as e: raise RuntimeError('No enumerated name for device') from e @staticmethod @lru_cache(maxsize=None) def from_uri(uri: str) -> 'DeviceState': driver = get_driver(uri) return DeviceState(driver=driver, device=driver.create_device_by_uri(uri))
class DeviceState: '''State for an instantiated HAL device. Note that the IREE runtime internally manages a global cache of drivers for standard named-access (not custom-constructed) drivers. ''' def __init__(self, *, driver: Union[str, HalDriver], device: Optional[HalDevice]=None, vm_instance: Optional[VmInstance]=None, enumerated_info: Optional[dict]=None, torch_device: Optional[torch.device]=None, torch_stream: Optional[int]=None, dlpack_device_type_code: int=0): pass @property def enumerated_device_id(self) -> int: pass @property def enumerated_path(self) -> str: pass @property def enumerated_name(self) -> str: pass @staticmethod @lru_cache(maxsize=None) def from_uri(uri: str) -> 'DeviceState': pass
11
1
7
0
7
0
2
0.08
0
5
0
0
4
7
5
5
62
7
51
32
30
4
28
15
22
3
0
1
10
327,688
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device.MismatchedDeviceSetClearError
class MismatchedDeviceSetClearError(AssertionError): def __init__(self): super().__init__('Calls to Device.set()/clear() are mismatched or unbalanced.')
class MismatchedDeviceSetClearError(AssertionError): def __init__(self): pass
2
0
2
0
2
0
1
0
1
1
0
0
1
0
1
12
3
0
3
2
1
0
3
2
1
1
4
0
1
327,689
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device.NoCurrentDeviceError
class NoCurrentDeviceError(Exception): def __init__(self): super().__init__("You accessed a method which requires a current device but none was set on this thread. Either pass an explicit 'device=' or set a current device via `with device:`")
class NoCurrentDeviceError(Exception): def __init__(self): pass
2
0
6
0
6
0
1
0
1
1
0
0
1
0
1
11
7
0
7
2
5
0
3
2
1
1
3
0
1
327,690
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device.UnsupportedTorchDeviceError
class UnsupportedTorchDeviceError(Exception): def __init__(self, torch_device): super().__init__(f'Attempt to use turbine with a torch.device that is not supported by this build: {torch_device}')
class UnsupportedTorchDeviceError(Exception): def __init__(self, torch_device): pass
2
0
4
0
4
0
1
0
1
1
0
0
1
0
1
11
5
0
5
2
3
0
3
2
1
1
3
0
1
327,691
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device._CudaSemaphoreInterop
from iree.runtime import BufferUsage, ExternalTimepointFlags, HalBufferView, HalDevice, HalDriver, HalExternalTimepoint, MemoryType, SemaphoreCompatibility, VmInstance, VmModule, create_hal_module, get_driver import torch class _CudaSemaphoreInterop: def __init__(self, sync): self.sync = sync pass def get_timepoint_import(self): torch.cuda.current_stream().synchronize() return None def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): pass def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): return True
class _CudaSemaphoreInterop: def __init__(self, sync): pass def get_timepoint_import(self): pass def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): pass def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): pass
5
0
3
0
3
1
1
0.18
0
0
0
0
4
1
4
4
16
3
11
6
6
2
11
6
6
1
0
0
4
327,692
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device._HipSemaphoreInterop
import torch import platform import ctypes from iree.runtime import BufferUsage, ExternalTimepointFlags, HalBufferView, HalDevice, HalDriver, HalExternalTimepoint, MemoryType, SemaphoreCompatibility, VmInstance, VmModule, create_hal_module, get_driver class _HipSemaphoreInterop: def __init__(self, sync): if platform.system() == 'Windows': self.library = ctypes.CDLL('amdhip64.dll') else: self.library = ctypes.CDLL('libamdhip64.so') self.library.hipEventCreate.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_int32] self.library.hipEventCreate.restype = ctypes.c_int32 self.library.hipEventRecord.argtypes = [ctypes.c_void_p, ctypes.c_void_p] self.library.hipEventRecord.restype = ctypes.c_int32 self.library.hipEventDestroy.argtypes = [ctypes.c_void_p] self.library.hipEventDestroy.restype = ctypes.c_int32 self.library.hipStreamWaitEvent.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint] self.library.hipStreamWaitEvent.restype = ctypes.c_int32 self.library.hipEventQuery.argtypes = [ctypes.c_void_p] self.library.hipEventQuery.restype = ctypes.c_int32 self.sync = sync def get_timepoint_import(self): if not self.sync: return None evt = ctypes.c_void_p(0) ret = self.library.hipEventCreate(evt, 2) if ret != 0: raise RuntimeError('Could not create hip event') ret = self.library.hipEventRecord(evt, ctypes.c_void_p(torch.cuda.current_stream().cuda_stream)) if ret != 0: raise RuntimeError('Could not record hip event') timepoint = HalExternalTimepoint() timepoint.compatibility = SemaphoreCompatibility.DEVICE_WAIT timepoint.flags = ExternalTimepointFlags.NONE timepoint.hip_event = evt.value return timepoint def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): if not self.sync: return ret = self.library.hipStreamWaitEvent(ctypes.c_void_p(torch.cuda.current_stream().cuda_stream), ctypes.c_void_p(timepoint.hip_event), 0) if ret != 0: raise RuntimeError('Could not wait on event') def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): if not self.sync: return True ret = self.library.hipEventDestroy(ctypes.c_void_p(timepoint.hip_event)) if ret != 0: raise RuntimeError(f'Could not destroy event got {ret}') return True
class _HipSemaphoreInterop: def __init__(self, sync): pass def get_timepoint_import(self): pass def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): pass def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): pass
5
0
15
1
14
0
3
0
0
3
0
0
4
2
4
4
65
7
58
12
53
0
44
12
39
4
0
1
12
327,693
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/device.py
wave_lang.runtime.device._NullSemaphoreInterop
from iree.runtime import BufferUsage, ExternalTimepointFlags, HalBufferView, HalDevice, HalDriver, HalExternalTimepoint, MemoryType, SemaphoreCompatibility, VmInstance, VmModule, create_hal_module, get_driver class _NullSemaphoreInterop: def __init__(self, sync): self.sync = sync def get_timepoint_import(self): return None def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): pass def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): return True
class _NullSemaphoreInterop: def __init__(self, sync): pass def get_timepoint_import(self): pass def wait_exported_timepoint(self, timepoint: HalExternalTimepoint): pass def destroy_timepoint_event(self, timepoint: HalExternalTimepoint): pass
5
0
2
0
2
0
1
0
0
0
0
0
4
1
4
4
12
3
9
6
4
0
9
6
4
1
0
0
4
327,694
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/launch.py
wave_lang.runtime.launch.Launchable
from typing import Any, Callable, Optional, Sequence, Tuple, Union from torch import Tensor from iree.runtime import HalBufferView, HalElementType, ParameterProvider, VmContext, VmFunction, VmModule, VmRef, VmVariantList, create_io_parameters_module from .device import Device, get_device_from_torch from wave_lang.support.logging import runtime_logger as logger from .invoke import invoke_vm_function import torch from pathlib import Path class Launchable: """Facilities for launching a compiled program (VMFB) on an attached device. Like the eager custom-op executor, this follows the usual PyTorch rules whereby the device that input tensors reside on dictates where the launch happens. Unlike that flow, this does not include any notion of jitting or caching. It also has APIs for using parameters, etc. You must manage all compilation/target settings yourself and you merely assert that a given binary is appropriate for launch on a device type. This has various limitations. """ def __init__(self, loader: Optional[_Loader], parameter_providers: Sequence[ParameterProvider]=(), is_async: bool=True): self._loader = loader self._providers = parameter_providers self._is_async = is_async self._target_binaries: dict[str, _TargetBinary] = {} self._target_vm_modules: dict[str, _NamedVmModule] = {} @staticmethod def jit_compile(source: Any, *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: str='main$async', file_cache_dir: Union[str, Path, None]=None) -> 'Launchable': """ Generates a launchable from a program source (e.g., mlir string). Set a file_cache_dir to enable storing/retrieving artifacts between sessions. """ callback = _jit_callback(source) if file_cache_dir is None else _caching_jit_callback(source, file_cache_dir) return Launchable.from_vm_module(callback, parameter_providers=parameter_providers, entry_point=entry_point) @staticmethod def from_file_cache_only(file_cache_dir: Union[str, Path], *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: str='main$async') -> 'Launchable': """Only loads vmfbs from the provided file_cache_dir. Will raise an error if not found.""" cache_dir = Path(file_cache_dir) if not cache_dir.is_dir(): raise ValueError(f'Specified cache_dir, {cache_dir}, does not exist.') def callback(device: Device): key_hash = device.get_type_key_hash() vmfb_path = Path(file_cache_dir) / f'{key_hash}.vmfb' if not vmfb_path.is_file(): raise RuntimeError(f'No vmfb found at {vmfb_path}. Please try running with jit compilation enabled, or verify {Path(file_cache_dir).parent} is the correct cache directory to use.') vm_instance = device.vm_instance logger.debug('Loading vmfb from cache: %s', str(vmfb_path)) vmfb = vmfb_path.read_bytes() return (entry_point, VmModule.copy_buffer(vm_instance, vmfb)) return Launchable(callback, parameter_providers=parameter_providers, is_async=entry_point.endswith('$async')) def preload(self, device: torch.device): """Pre-loads (or JIT compiles) for the given torch.device.""" turbine_device = get_device_from_torch(device) self._resolve_target_binary(turbine_device) def _assemble_target_binary_from_vm_module(self, turbine_device: Device, entry_point: str, main_module: VmModule) -> _TargetBinary: device_key = turbine_device.instance_cache_key vm_instance = turbine_device.vm_instance modules = [turbine_device.create_hal_module()] if self._providers: modules.append(create_io_parameters_module(vm_instance, *self._providers)) modules.append(main_module) vm_context = VmContext(vm_instance, modules) main_function = main_module.lookup_function(entry_point) logger.debug('Cached new binary for %s', device_key) self._target_binaries[device_key] = (vm_context, main_function) return (vm_context, main_function) @staticmethod def from_vm_module(vm_module_callback: Callable[[Device], VmModule], *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: str='main$async'): def loader(device: Device) -> _NamedVmModule: return (entry_point, vm_module_callback(device)) return Launchable(loader, parameter_providers, is_async=entry_point.endswith('$async')) def _resolve_target_binary(self, turbine_device: Device) -> _TargetBinary: device_key = turbine_device.instance_cache_key existing = self._target_binaries.get(device_key) if existing is not None: logger.debug('Launching cached binary for %s', device_key) return existing device_type_key = turbine_device.type_cache_key _named_module = self._target_vm_modules.get(device_type_key) if _named_module is not None: entry_point, main_module = _named_module logger.debug('Assembling binary for %s from cached module for %s', device_key, device_type_key) return self._assemble_target_binary_from_vm_module(turbine_device, entry_point, main_module) loader = self._loader if loader is not None: _named_module = loader(turbine_device) if _named_module is not None: logger.debug('Cached new module for %s', device_type_key) self._target_vm_modules[device_type_key] = _named_module entry_point, main_module = _named_module return self._assemble_target_binary_from_vm_module(turbine_device, entry_point, main_module) raise NotImplementedError(f'Could not load a target binary for device {turbine_device}') def __call__(self, *args, device: Optional[torch.device]=None, outputs: Sequence[Any]=()): turbine_device: Optional[Device] = None if device is None else get_device_from_torch(device) arg_list = VmVariantList(len(args)) for arg in args: if isinstance(arg, Tensor): if not arg.is_contiguous(): arg = arg.contiguous() tensor_device = arg.device if device is None: device = tensor_device elif tensor_device != device: raise RuntimeError(f'Cannot launch with tensors from multiple devices: {tensor_device} vs {device}') if turbine_device is None: turbine_device = get_device_from_torch(tensor_device) arg_list.push_ref(turbine_device.import_torch_tensor(arg)) elif isinstance(arg, int): arg_list.push_int(arg) elif isinstance(arg, float): arg_list.push_float(arg) if device is None or turbine_device is None: raise RuntimeError(f'Cannot invoke Launchable {self} without any Tensor args or an explicit device=') vm_context, vm_function = self._resolve_target_binary(turbine_device) ret_list = VmVariantList(len(outputs)) for output in outputs: if isinstance(output, Tensor): assert output.is_contiguous(), 'Outputs must be contiguous' ret_list.push_ref(turbine_device.import_torch_tensor(arg)) else: raise ValueError(f'Unsupported output type: {type(output)}') invoke_vm_function(turbine_device, self._is_async, vm_context, vm_function, arg_list, ret_list) torch_results = [] for i in range(len(ret_list)): result = ret_list.get_variant(i) if isinstance(result, VmRef): buffer_view = result.deref(HalBufferView, True) if buffer_view is not None: torch_results.append(_export_torch_tensor(buffer_view, turbine_device)) arity = len(torch_results) if arity == 1: return torch_results[0] elif arity == 0: return None else: return torch_results
class Launchable: '''Facilities for launching a compiled program (VMFB) on an attached device. Like the eager custom-op executor, this follows the usual PyTorch rules whereby the device that input tensors reside on dictates where the launch happens. Unlike that flow, this does not include any notion of jitting or caching. It also has APIs for using parameters, etc. You must manage all compilation/target settings yourself and you merely assert that a given binary is appropriate for launch on a device type. This has various limitations. ''' def __init__(self, loader: Optional[_Loader], parameter_providers: Sequence[ParameterProvider]=(), is_async: pass @staticmethod def jit_compile(source: Any, *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: ''' Generates a launchable from a program source (e.g., mlir string). Set a file_cache_dir to enable storing/retrieving artifacts between sessions. ''' pass @staticmethod def from_file_cache_only(file_cache_dir: Union[str, Path], *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: '''Only loads vmfbs from the provided file_cache_dir. Will raise an error if not found.''' pass def callback(device: Device): pass def preload(self, device: torch.device): '''Pre-loads (or JIT compiles) for the given torch.device.''' pass def _assemble_target_binary_from_vm_module(self, turbine_device: Device, entry_point: str, main_module: VmModule) -> _TargetBinary: pass @staticmethod def from_vm_module(vm_module_callback: Callable[[Device], VmModule], *, parameter_providers: Sequence[ParameterProvider]=(), entry_point: pass def loader(device: Device) -> _NamedVmModule: pass def _resolve_target_binary(self, turbine_device: Device) -> _TargetBinary: pass def __call__(self, *args, device: Optional[torch.device]=None, outputs: Sequence[Any]=()): pass
14
4
21
1
19
2
4
0.15
0
13
1
0
5
5
8
8
222
21
175
74
136
26
103
46
92
18
0
4
35
327,695
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/op_reg/base.py
wave_lang.runtime.op_reg.base.AttrArg
from torch import Tensor from typing import Any, Callable, Optional, Sequence, Type, Union, cast class AttrArg: ir_arity: int = 0 maybe_tensor_value: Optional[Tensor] = None is_list: bool = False __slots__ = ['v', 'spec_value'] def __init__(self, v: object): self.v = v self.spec_value: Optional[Any] = v def __repr__(self): return f'AttrArg(<{self.spec_value}>)' def generate_meta(self) -> object: return self.v @property def spec_key(self) -> str: """Generates a key that will be the same for all specializations.""" return f'attr<{self.spec_value}>' @property def mlir_type_asm(self) -> str: raise AssertionError('Cannot resolve `mlir_type_asm` for an AttrArg')
class AttrArg: def __init__(self, v: object): pass def __repr__(self): pass def generate_meta(self) -> object: pass @property def spec_key(self) -> str: '''Generates a key that will be the same for all specializations.''' pass @property def mlir_type_asm(self) -> str: pass
8
1
3
0
2
0
1
0.1
0
4
0
0
5
2
5
5
29
6
21
14
13
2
16
12
10
1
0
0
5
327,696
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/op_reg/base.py
wave_lang.runtime.op_reg.base.CustomOp
from abc import ABC, abstractmethod import functools from typing import Any, Callable, Optional, Sequence, Type, Union, cast import torch class CustomOp(ABC): """Users subclass this in order to register a wave custom op.""" @staticmethod def register(op_class: Optional[Type['CustomOp']]=None, *, library: torch.library.Library=WAVE_LIBRARY, dispatch_key: Union[str, Sequence[str], None]=None, register_meta: bool=True, register_impl: bool=True) -> Callable: """Class decorator for `CustomOp` implementations. The decorator will instantiate the class and then replace it with the callable operation that can be used to invoke the kernel. Typical usage: ``` @CustomOp.register class identity(CustomOp): ... result = identity(torch.tensor(1, 2, 3)) ``` """ if not op_class: return functools.partial(CustomOp.register, library=library, dispatch_key=dispatch_key, register_meta=register_meta, register_impl=register_impl) instance = op_class(library=library, dispatch_key=dispatch_key, register_meta=register_meta, register_impl=register_impl) return instance.op def __init__(self, *, library: torch.library.Library, dispatch_key: Union[str, Sequence[str], None], register_meta: bool, register_impl: bool): self.name = name = _define_signature_in_library(library, self.signature) self.library = library self.cache_key_base = f'{library.ns}.{library.kind}::{name}' self.op = _get_library_op(library, name) if register_meta: library.impl(name, _get_meta_impl(self), 'Meta') fq_name = f'{library.ns}.{name}' ALL_CUSTOM_OP_REGS[fq_name] = self @property @abstractmethod def signature(self) -> str: """PyTorch function signature. This is in the normal PyTorch kernel registration form. For example: ``` my_op(Tensor t) -> Tensor ``` The signature can have some special tokens in the name part: * "@UNIQUE@": Generates a name-specific numeric value and replaces it. """ ... @property def single_dispatch(self) -> bool: """Indicates whether the CustomOp should be forced into a single dispatch using a util.func pipeline attribute. It is recommended to only use this for more complicated ops which would not automatically get compiled into a single dispatch. E.g. A fused conv + bias-add + relu custom op. For eager contexts, this will apply the pipeline attribute to the main$async function. For aot contexts, this currently does nothing, but could eventually attempt to apply an `util.inline.never` attribute, in addition to the pipeline attribute, to the function being called by the InlineKernelBuilder. """ return False @abstractmethod def select(self, sel: 'KernelSelection'): """Performs kernel selection. This method has three purposes: 1. Selects which kernel specialization is needed based on arguments. 2. Returns the meta tensor results of the operation, effectively completing the transfer function from argument types to result types. 3. Sets additional metadata that the generate method can use. The `device="meta"` kernel implementation is composed completely by invoking `select`. For implementation devices, `select` is called for each invocation. The `generate` will be called subsequently if the kernel needs to be generated. """ ... def eager_execute(self, *args): """When executing eagerly, allows the CustomOp to provide a direct Python implementation. For AOT/Graph modes, this will not be called. If the method returns NotImplemented, then a standalone kernel will be compiled and executed. This is commonly used for ops that have no significance to a single op execution in the PyTorch runtime (e.g. metadata ops), but could theoretically be used to perform any Python analog desired. """ return NotImplemented @abstractmethod def generate(self, ksel: 'KernelSelection', kb: 'KernelBuilder'): """Generates a kernel based on the `KernelSelection`. This method should generate IR into the given `KernelBuilder`. It can do so by consulting any state set on the `KernelSelection`. Each `KernelSelection.args` corresponds to `KernelBuilder.args`. Unless if the argument was set as `ir_arity=0`, the argument will be a `Value`. Otherwise, it will be `None`. It is recommended to use `KernelBuilder.arg(n)` to access. Generation should conclude with a call to `KernelBuilder.yield_results`. """ ...
class CustomOp(ABC): '''Users subclass this in order to register a wave custom op.''' @staticmethod def register(op_class: Optional[Type['CustomOp']]=None, *, library: torch.library.Library=WAVE_LIBRARY, dispatch_key: Union[str, Sequence[str], None]=None, register_meta: bool=True, register_impl: bool=True) -> Callable: '''Class decorator for `CustomOp` implementations. The decorator will instantiate the class and then replace it with the callable operation that can be used to invoke the kernel. Typical usage: ``` @CustomOp.register class identity(CustomOp): ... result = identity(torch.tensor(1, 2, 3)) ``` ''' pass def __init__(self, *, library: torch.library.Library, dispatch_key: Union[str, Sequence[str], None], register_meta: bool, register_impl: bool): pass @property @abstractmethod def signature(self) -> str: '''PyTorch function signature. This is in the normal PyTorch kernel registration form. For example: ``` my_op(Tensor t) -> Tensor ``` The signature can have some special tokens in the name part: * "@UNIQUE@": Generates a name-specific numeric value and replaces it. ''' pass @property def single_dispatch(self) -> bool: '''Indicates whether the CustomOp should be forced into a single dispatch using a util.func pipeline attribute. It is recommended to only use this for more complicated ops which would not automatically get compiled into a single dispatch. E.g. A fused conv + bias-add + relu custom op. For eager contexts, this will apply the pipeline attribute to the main$async function. For aot contexts, this currently does nothing, but could eventually attempt to apply an `util.inline.never` attribute, in addition to the pipeline attribute, to the function being called by the InlineKernelBuilder. ''' pass @abstractmethod def select(self, sel: 'KernelSelection'): '''Performs kernel selection. This method has three purposes: 1. Selects which kernel specialization is needed based on arguments. 2. Returns the meta tensor results of the operation, effectively completing the transfer function from argument types to result types. 3. Sets additional metadata that the generate method can use. The `device="meta"` kernel implementation is composed completely by invoking `select`. For implementation devices, `select` is called for each invocation. The `generate` will be called subsequently if the kernel needs to be generated. ''' pass def eager_execute(self, *args): '''When executing eagerly, allows the CustomOp to provide a direct Python implementation. For AOT/Graph modes, this will not be called. If the method returns NotImplemented, then a standalone kernel will be compiled and executed. This is commonly used for ops that have no significance to a single op execution in the PyTorch runtime (e.g. metadata ops), but could theoretically be used to perform any Python analog desired. ''' pass @abstractmethod def generate(self, ksel: 'KernelSelection', kb: 'KernelBuilder'): '''Generates a kernel based on the `KernelSelection`. This method should generate IR into the given `KernelBuilder`. It can do so by consulting any state set on the `KernelSelection`. Each `KernelSelection.args` corresponds to `KernelBuilder.args`. Unless if the argument was set as `ir_arity=0`, the argument will be a `Value`. Otherwise, it will be `None`. It is recommended to use `KernelBuilder.arg(n)` to access. Generation should conclude with a call to `KernelBuilder.yield_results`. ''' pass
14
7
19
3
7
9
1
1.09
1
3
0
1
6
4
7
27
145
28
56
33
28
61
25
14
17
2
4
1
9
327,697
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/op_reg/base.py
wave_lang.runtime.op_reg.base.EagerKernelSelection
from torch import Tensor from typing import Any, Callable, Optional, Sequence, Type, Union, cast class EagerKernelSelection(KernelSelection): """Kernel selection specialized for eager arguments.""" __slots__ = ['args'] def __init__(self, op: CustomOp, args: list[Any]): super().__init__(op, len(args)) self.args = args def arg_tensor(self, arg: int, *, inplace_tied: bool=False) -> 'TensorArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, Tensor), f'Argument type mismatch from Torch for {arg}: Expected tensor, got {type(arg_value)}' arg_descs[arg] = desc = TensorArg(arg_value) if inplace_tied: self.inplace_tied_arg_descs.append(desc) return desc def arg_optional_tensor(self, arg: int) -> Optional['TensorArg']: if arg >= len(self.args) or self.args[arg] is None: if arg >= len(self.args): self.arg_descs.extend(cast(list[Optional[ArgDescriptor]], (arg + 1 - len(self.args)) * [None])) self.arg_descs[arg] = EmptyOptionalTensorArg() return None return self.arg_tensor(arg) def arg_tensor_list(self, arg: int) -> 'TensorListArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, list), f'Argument type mismatch from Torch for {arg}: Expected list, got {type(arg_value)}' arg_descs[arg] = desc = TensorListArg(arg_value) return desc def arg_int(self, arg: int) -> 'IntArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, int), f'Argument type mismatch from Torch for {arg}: Expected int, got {type(arg_value)}' arg_descs[arg] = desc = IntArg(arg_value) return desc def attr_str(self, arg: int) -> 'AttrArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, str), f'Argument type mismatch from Torch for {arg}: Expected str, got {type(arg_value)}' arg_descs[arg] = desc = AttrArg(arg_value) return desc def attr_int(self, arg: int) -> 'AttrArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, int), f'Argument type mismatch from Torch for {arg}: Expected int, got {type(arg_value)}' arg_descs[arg] = desc = AttrArg(arg_value) return desc def attr_list_int(self, arg: int) -> 'AttrArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, list), f'Argument type mismatch from Torch for {arg}: Expected list, got {type(arg_value)}' if len(arg_value) > 0: assert isinstance(arg_value[0], int), f'Argument type mismatch from Torch for {arg}: Expected list of int, got element type of {type(arg_value[0])}' arg_descs[arg] = desc = AttrArg(arg_value) return desc def attr_float(self, arg: int) -> 'AttrArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, float), f'Argument type mismatch from Torch for {arg}: Expected float, got {type(arg_value)}' arg_descs[arg] = desc = AttrArg(arg_value) return desc def attr_list_float(self, arg: int) -> 'AttrArg': arg_descs = self.arg_descs arg_value = self.args[arg] assert arg_descs[arg] is None, f'Already constrained argument {arg}' assert isinstance(arg_value, list), f'Argument type mismatch from Torch for {arg}: Expected list, got {type(arg_value)}' for arg_value_i in arg_value: assert isinstance(arg_value_i, float), f'Argument type mismatch from Torch for {arg}: Expected list of float, got element type of {type(arg_value_i)}' arg_descs[arg] = desc = AttrArg(arg_value) return desc def return_tensor(self, t: Tensor) -> 'TensorArg': desc = TensorArg(t) self.result_descs.append(desc) return desc
class EagerKernelSelection(KernelSelection): '''Kernel selection specialized for eager arguments.''' def __init__(self, op: CustomOp, args: list[Any]): pass def arg_tensor(self, arg: int, *, inplace_tied: bool=False) -> 'TensorArg': pass def arg_optional_tensor(self, arg: int) -> Optional['TensorArg']: pass def arg_tensor_list(self, arg: int) -> 'TensorListArg': pass def arg_int(self, arg: int) -> 'IntArg': pass def attr_str(self, arg: int) -> 'AttrArg': pass def attr_int(self, arg: int) -> 'AttrArg': pass def attr_list_int(self, arg: int) -> 'AttrArg': pass def attr_float(self, arg: int) -> 'AttrArg': pass def attr_list_float(self, arg: int) -> 'AttrArg': pass def return_tensor(self, t: Tensor) -> 'TensorArg': pass
12
1
10
0
9
0
1
0.05
1
13
6
0
11
1
11
47
122
12
105
40
93
5
78
32
66
3
5
2
16
327,698
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/op_reg/base.py
wave_lang.runtime.op_reg.base.EmptyOptionalTensorArg
from torch import Tensor from typing import Any, Callable, Optional, Sequence, Type, Union, cast class EmptyOptionalTensorArg: """Sentinel type marking an optional tensor argument that was not provided at the call site. To `KernelSelection` a `None` `ArgDescriptor` indicates an argument has been declared as part of the signature, but the `ArgDescriptor` hasn't been initialized with values an actual call site. An `EmptyOptionalTensorArg` signals that an `ArgDescriptor` has been initialized, but an argument was not provided at the call site. """ ir_arity: int = 0 maybe_tensor_value: Optional[Tensor] = None is_list: bool = False def __repr__(self): return 'TensorArg(None)' @property def spec_key(self) -> str: return 'TensorArg(None)' @property def mlir_type_asm(self) -> str: raise AssertionError('EmptyOptionalTensorArg has no mlir_type_asm') def generate_meta(self) -> None: return None
class EmptyOptionalTensorArg: '''Sentinel type marking an optional tensor argument that was not provided at the call site. To `KernelSelection` a `None` `ArgDescriptor` indicates an argument has been declared as part of the signature, but the `ArgDescriptor` hasn't been initialized with values an actual call site. An `EmptyOptionalTensorArg` signals that an `ArgDescriptor` has been initialized, but an argument was not provided at the call site. ''' def __repr__(self): pass @property def spec_key(self) -> str: pass @property def mlir_type_asm(self) -> str: pass def generate_meta(self) -> None: pass
7
1
2
0
2
0
1
0.57
0
2
0
0
4
0
4
4
28
6
14
10
7
8
12
8
7
1
0
0
4
327,699
iree-org/wave
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/iree-org_wave/wave_lang/runtime/op_reg/base.py
wave_lang.runtime.op_reg.base.FreeFuncKernelBuilder
from typing import Any, Callable, Optional, Sequence, Type, Union, cast from wave_lang.support.ir_imports import Block, Context, FunctionType, IndexType, InsertionPoint, IntegerAttr, IrType, Location, StringAttr, SymbolTable, Value, arith_d, builtin_d, func_d class FreeFuncKernelBuilder(KernelBuilder): """Kernel builder that emits the body of the kernel into a free function. This is intended to be used when compiling a standalone module that will be directly invoked by the runtime. Further variants exist that generate into a func but also emit a call into another local context. """ def __init__(self, ksel: KernelSelection, *, module_body: Block, symbol_table: SymbolTable, func_name: Optional[str]=None, is_public: bool=True): self.module_op = module_body.owner context = self.module_op.context if func_name is None: func_name = ksel.op.name with context, Location.unknown(), InsertionPoint(module_body): arg_types = [] for d in ksel.get_provided_arg_descs(): assert d is not None, 'Uninitialized argument descriptor' arity = d.ir_arity if not d.is_list: if arity == 1: arg_types.append(IrType.parse(d.mlir_type_asm)) else: continue else: for i in range(arity): arg_types.append(IrType.parse(d.mlir_type_asm[i])) result_types = [] for d in (*ksel.result_descs, *ksel.inplace_tied_arg_descs): if not d.is_list: if d.ir_arity == 1: result_types.append(IrType.parse(d.mlir_type_asm)) else: continue else: raise AssertionError('NYI: arity > 1 results') ftype = FunctionType.get(arg_types, result_types) func_op = func_d.FuncOp(func_name, ftype) if not is_public: func_op.attributes['sym_visibility'] = StringAttr.get('private') entry_block: Block = func_op.add_entry_block() symbol_table.insert(func_op) block_arguments = list(entry_block.arguments) block_arg_index = 0 arg_bindings: list[Optional[Value]] = [] for desc in ksel.get_provided_arg_descs(): assert desc is not None, 'Uninitialized argument descriptor' arity = desc.ir_arity if not desc.is_list: if arity == 1: arg_bindings.append(block_arguments[block_arg_index]) block_arg_index += 1 else: arg_bindings.append(None) else: arg_bindings.append(block_arguments[block_arg_index:block_arg_index + arity]) block_arg_index += arity super().__init__(ksel, arg_bindings, ip=InsertionPoint(entry_block), module_body=module_body, symbol_table=symbol_table) @staticmethod def create_module(ksel: KernelSelection, *, context: Optional[Context]=None, func_name: Optional[str]=None, is_public: bool=True) -> 'FreeFuncKernelBuilder': """Short-cut to create a new module with a single function in one shot.""" if context is None: context = Context() with context, Location.unknown(): module_op = builtin_d.ModuleOp() return FreeFuncKernelBuilder(ksel, module_body=module_op.body, symbol_table=SymbolTable(module_op), func_name=func_name, is_public=is_public) def yield_results(self, *results: Value): """Yields results of the kernel computation.""" assert not self.yielded, 'yield_results has already been called' ksel = self.ksel expected_count = len(ksel.result_descs) + len(ksel.inplace_tied_arg_descs) assert len(results) == expected_count, f'Mismatched yielded results and declared+inplace: Expected={expected_count}, Got={len(results)}' with self.ip, Location.unknown(): func_d.ReturnOp(results) self.yielded = True
class FreeFuncKernelBuilder(KernelBuilder): '''Kernel builder that emits the body of the kernel into a free function. This is intended to be used when compiling a standalone module that will be directly invoked by the runtime. Further variants exist that generate into a func but also emit a call into another local context. ''' def __init__(self, ksel: KernelSelection, *, module_body: Block, symbol_table: SymbolTable, func_name: Optional[str]=None, is_public: bool=True): pass @staticmethod def create_module(ksel: KernelSelection, *, context: Optional[Context]=None, func_name: Optional[str]=None, is_public: bool=True) -> 'FreeFuncKernelBuilder': '''Short-cut to create a new module with a single function in one shot.''' pass def yield_results(self, *results: Value): '''Yields results of the kernel computation.''' pass
5
3
34
1
31
2
5
0.12
1
7
1
0
2
2
3
27
114
8
95
37
76
11
58
22
54
13
5
4
16